Subversion Repositories Kolibri OS

Compare Revisions

Regard whitespace Rev 2340 → Rev 2342

/drivers/video/drm/i915/bitmap.c
6,6 → 6,16
#include "intel_drv.h"
#include "bitmap.h"
 
void __attribute__((regparm(1))) destroy_bitmap(bitmap_t *bitmap)
{
/*
*
*
*
*/
__DestroyObject(bitmap);
};
 
extern struct drm_device *main_device;
 
struct hman bm_man;
20,12 → 30,14
};
 
 
int create_bitmap(struct ubitmap *pbitmap, int width, int height)
int create_bitmap(struct ubitmap *pbitmap)
{
struct drm_i915_gem_object *obj;
 
bitmap_t *bitmap;
u32 handle;
u32 width;
u32 height;
u32 size;
u32 pitch;
void *uaddr;
35,15 → 47,24
pbitmap->handle = 0;
pbitmap->data = NULL;
 
width = pbitmap->width;
height = pbitmap->height;
 
if((width==0)||(height==0)||(width>4096)||(height>4096))
goto err1;
 
handle = alloc_handle(&bm_man);
// printf("%s %d\n",__FUNCTION__, handle);
 
if(handle == 0)
goto err1;
 
bitmap = CreateObject(GetPid(), sizeof(*bitmap));
bitmap->handle = handle;
bitmap->header.destroy = destroy_bitmap;
bitmap->obj = NULL;
 
// printf("bitmap %x\n", bitmap);
if( bitmap == NULL)
goto err1;
 
52,6 → 73,7
pitch = ALIGN(width*4,64);
 
size = roundup(pitch*height, PAGE_SIZE);
// printf("pitch %d size %d\n", pitch, size);
 
obj = i915_gem_alloc_object(main_device, size);
if (obj == NULL)
88,9 → 110,15
bitmap->gaddr = obj->gtt_offset;
bitmap->uaddr = uaddr;
bitmap->obj = obj;
bitmap->header.destroy = destroy_bitmap;
 
pbitmap->pitch = pitch;
pbitmap->handle = handle;
pbitmap->data = uaddr;
 
// printf("%s handle %d pitch %d gpu %x user %x\n",
// __FUNCTION__, handle, pitch, obj->gtt_offset, uaddr);
 
return 0;
 
err4:
99,7 → 127,7
// drm_gem_object_unreference(&obj->base);
err2:
free_handle(&bm_man, handle);
DestroyObject(bitmap);
__DestroyObject(bitmap);
err1:
return -1;
 
106,67 → 134,6
};
 
 
int create_video(int width, int height, u32_t *outp)
{
struct drm_i915_gem_object *obj;
 
size_t size;
size_t pitch;
void *uaddr;
 
int ret;
 
if((width==0)||(height==0)||(width>4096)||(height>4096))
goto err1;
 
pitch = ALIGN(width*4,64);
 
size = roundup(pitch*height, PAGE_SIZE);
 
obj = i915_gem_alloc_object(main_device, size);
if (obj == NULL)
goto err2;
 
ret = i915_gem_object_pin(obj, 4096, true);
if (ret)
goto err3;
 
uaddr = UserAlloc(size);
if( uaddr == NULL)
goto err4;
else
{
u32_t *src, *dst;
int count;
 
#define page_tabs 0xFDC00000 /* really dirty hack */
 
src = (u32_t*)obj->pages;
dst = &((u32_t*)page_tabs)[(u32_t)uaddr >> 12];
count = size/4096;
 
while(count--)
{
*dst++ = (0xFFFFF000 & *src++) | 0x207 ; // map as shared page
};
}
 
outp[0] = obj->gtt_offset;
outp[2] = (u32)uaddr;
outp[3] = pitch;
 
return 0;
 
err4:
// drm_gem_object_unpin;
err3:
// drm_gem_object_unreference(&obj->base);
err2:
err1:
return -1;
};
 
 
int init_hman(struct hman *man, u32 count)
{
u32* data;
/drivers/video/drm/i915/bitmap.h
55,5 → 55,6
void *data;
};
 
int create_bitmap(struct ubitmap *pbitmap, int width, int height);
int create_bitmap(struct ubitmap *pbitmap);
int init_bitmaps();
 
/drivers/video/drm/i915/i915_dma.c
560,11 → 560,14
// if (!IS_I945G(dev) && !IS_I945GM(dev))
// pci_enable_msi(dev->pdev);
 
spin_lock_init(&dev_priv->gt_lock);
spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->error_lock);
spin_lock_init(&dev_priv->rps_lock);
 
if (IS_MOBILE(dev) || !IS_GEN2(dev))
if (IS_IVYBRIDGE(dev))
dev_priv->num_pipe = 3;
else if (IS_MOBILE(dev) || !IS_GEN2(dev))
dev_priv->num_pipe = 2;
else
dev_priv->num_pipe = 1;
/drivers/video/drm/i915/i915_drm.h
198,6 → 198,8
#define DRM_I915_OVERLAY_PUT_IMAGE 0x27
#define DRM_I915_OVERLAY_ATTRS 0x28
#define DRM_I915_GEM_EXECBUFFER2 0x29
#define DRM_I915_GET_SPRITE_COLORKEY 0x2a
#define DRM_I915_SET_SPRITE_COLORKEY 0x2b
 
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
239,6 → 241,8
#define DRM_IOCTL_I915_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise)
#define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE DRM_IOW(DRM_COMMAND_BASE + DRM_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image)
#define DRM_IOCTL_I915_OVERLAY_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs)
#define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
#define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
 
/* Allow drivers to submit batchbuffers directly to hardware, relying
* on the security mechanisms provided by hardware.
291,6 → 295,7
#define I915_PARAM_HAS_COHERENT_RINGS 13
#define I915_PARAM_HAS_EXEC_CONSTANTS 14
#define I915_PARAM_HAS_RELAXED_DELTA 15
#define I915_PARAM_HAS_GEN7_SOL_RESET 16
 
typedef struct drm_i915_getparam {
int param;
653,6 → 658,9
__u64 rsvd2;
};
 
/** Resets the SO write offset registers for transform feedback on gen7. */
#define I915_EXEC_GEN7_SOL_RESET (1<<8)
 
struct drm_i915_gem_pin {
/** Handle of the buffer to be pinned. */
__u32 handle;
844,4 → 852,36
__u32 gamma5;
};
 
/*
* Intel sprite handling
*
* Color keying works with a min/mask/max tuple. Both source and destination
* color keying is allowed.
*
* Source keying:
* Sprite pixels within the min & max values, masked against the color channels
* specified in the mask field, will be transparent. All other pixels will
* be displayed on top of the primary plane. For RGB surfaces, only the min
* and mask fields will be used; ranged compares are not allowed.
*
* Destination keying:
* Primary plane pixels that match the min value, masked against the color
* channels specified in the mask field, will be replaced by corresponding
* pixels from the sprite plane.
*
* Note that source & destination keying are exclusive; only one can be
* active on a given plane.
*/
 
#define I915_SET_COLORKEY_NONE (1<<0) /* disable color key matching */
#define I915_SET_COLORKEY_DESTINATION (1<<1)
#define I915_SET_COLORKEY_SOURCE (1<<2)
struct drm_intel_sprite_colorkey {
__u32 plane_id;
__u32 min_value;
__u32 channel_mask;
__u32 max_value;
__u32 flags;
};
 
#endif /* _I915_DRM_H_ */
/drivers/video/drm/i915/i915_drv.c
53,7 → 53,7
 
unsigned int i915_powersave __read_mostly = 0;
 
unsigned int i915_enable_rc6 __read_mostly = 0;
unsigned int i915_enable_rc6 __read_mostly = -1;
 
unsigned int i915_enable_fbc __read_mostly = 0;
 
66,7 → 66,7
#define PCI_VENDOR_ID_INTEL 0x8086
 
#define INTEL_VGA_DEVICE(id, info) { \
.class = PCI_CLASS_DISPLAY_VGA << 8, \
.class = PCI_BASE_CLASS_DISPLAY << 16, \
.class_mask = 0xff0000, \
.vendor = 0x8086, \
.device = id, \
277,7 → 277,7
}
}
 
static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
{
int count;
 
293,6 → 293,22
udelay(10);
}
 
void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
{
int count;
 
count = 0;
while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1))
udelay(10);
 
I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 1);
POSTING_READ(FORCEWAKE_MT);
 
count = 0;
while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1) == 0)
udelay(10);
}
 
/*
* Generally this is called implicitly by the register read function. However,
* if some sequence requires the GT to not power down then this function should
301,28 → 317,37
*/
void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
{
// WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
unsigned long irqflags;
 
/* Forcewake is atomic in case we get in here without the lock */
if (atomic_add_return(1, &dev_priv->forcewake_count) == 1)
__gen6_gt_force_wake_get(dev_priv);
spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
if (dev_priv->forcewake_count++ == 0)
dev_priv->display.force_wake_get(dev_priv);
spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
}
 
static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
{
I915_WRITE_NOTRACE(FORCEWAKE, 0);
POSTING_READ(FORCEWAKE);
}
 
void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
{
I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 0);
POSTING_READ(FORCEWAKE_MT);
}
 
/*
* see gen6_gt_force_wake_get()
*/
void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
{
// WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
unsigned long irqflags;
 
if (atomic_dec_and_test(&dev_priv->forcewake_count))
__gen6_gt_force_wake_put(dev_priv);
spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
if (--dev_priv->forcewake_count == 0)
dev_priv->display.force_wake_put(dev_priv);
spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
}
 
void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
441,3 → 466,39
}
 
 
#define __i915_read(x, y) \
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
u##x val = 0; \
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
unsigned long irqflags; \
spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \
if (dev_priv->forcewake_count == 0) \
dev_priv->display.force_wake_get(dev_priv); \
val = read##y(dev_priv->regs + reg); \
if (dev_priv->forcewake_count == 0) \
dev_priv->display.force_wake_put(dev_priv); \
spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \
} else { \
val = read##y(dev_priv->regs + reg); \
} \
return val; \
}
 
__i915_read(8, b)
__i915_read(16, w)
__i915_read(32, l)
__i915_read(64, q)
#undef __i915_read
 
#define __i915_write(x, y) \
void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
__gen6_gt_wait_for_fifo(dev_priv); \
} \
write##y(val, dev_priv->regs + reg); \
}
__i915_write(8, b)
__i915_write(16, w)
__i915_write(32, l)
__i915_write(64, q)
#undef __i915_write
/drivers/video/drm/i915/i915_drv.h
105,6 → 105,7
struct opregion_acpi;
struct opregion_swsci;
struct opregion_asle;
struct drm_i915_private;
 
struct intel_opregion {
struct opregion_header *header;
124,6 → 125,9
struct _drm_i915_sarea *sarea_priv;
};
#define I915_FENCE_REG_NONE -1
#define I915_MAX_NUM_FENCES 16
/* 16 fences + sign bit for FENCE_REG_NONE */
#define I915_MAX_NUM_FENCE_BITS 5
 
struct drm_i915_fence_reg {
struct list_head lru_list;
137,7 → 141,6
u8 slave_addr;
u8 dvo_wiring;
u8 i2c_pin;
u8 i2c_speed;
u8 ddc_pin;
};
 
167,7 → 170,7
u32 instdone1;
u32 seqno;
u64 bbaddr;
u64 fence[16];
u64 fence[I915_MAX_NUM_FENCES];
struct timeval time;
struct drm_i915_error_object {
int page_count;
181,7 → 184,7
u32 gtt_offset;
u32 read_domains;
u32 write_domain;
s32 fence_reg:5;
s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
s32 pinned:2;
u32 tiling:2;
u32 dirty:1;
202,11 → 205,15
int (*get_display_clock_speed)(struct drm_device *dev);
int (*get_fifo_size)(struct drm_device *dev, int plane);
void (*update_wm)(struct drm_device *dev);
void (*update_sprite_wm)(struct drm_device *dev, int pipe,
uint32_t sprite_width, int pixel_size);
int (*crtc_mode_set)(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
int x, int y,
struct drm_framebuffer *old_fb);
void (*write_eld)(struct drm_connector *connector,
struct drm_crtc *crtc);
void (*fdi_link_train)(struct drm_crtc *crtc);
void (*init_clock_gating)(struct drm_device *dev);
void (*init_pch_clock_gating)(struct drm_device *dev);
215,6 → 222,8
struct drm_i915_gem_object *obj);
int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
int x, int y);
void (*force_wake_get)(struct drm_i915_private *dev_priv);
void (*force_wake_put)(struct drm_i915_private *dev_priv);
/* clock updates for mode set */
/* cursor updates */
/* render clock increase/decrease */
277,7 → 286,13
int relative_constants_mode;
 
void __iomem *regs;
u32 gt_fifo_count;
/** gt_fifo_count and the subsequent register write are synchronized
* with dev->struct_mutex. */
unsigned gt_fifo_count;
/** forcewake_count is protected by gt_lock */
unsigned forcewake_count;
/** gt_lock is also taken in irq contexts. */
spinlock_t gt_lock;
 
struct intel_gmbus {
struct i2c_adapter adapter;
328,6 → 343,8
struct timer_list hangcheck_timer;
int hangcheck_count;
uint32_t last_acthd;
uint32_t last_acthd_bsd;
uint32_t last_acthd_blt;
uint32_t last_instdone;
uint32_t last_instdone1;
 
341,11 → 358,11
 
/* overlay */
// struct intel_overlay *overlay;
bool sprite_scaling_enabled;
 
/* LVDS info */
int backlight_level; /* restore backlight to this value */
bool backlight_enabled;
struct drm_display_mode *panel_fixed_mode;
struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
 
355,6 → 372,7
unsigned int lvds_vbt:1;
unsigned int int_crt_support:1;
unsigned int lvds_use_ssc:1;
unsigned int display_clock_mode:1;
int lvds_ssc_freq;
struct {
int rate;
372,7 → 390,7
// struct notifier_block lid_notifier;
 
int crt_ddc_pin;
struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */
struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
int num_fence_regs; /* 8 on pre-965, 16 otherwise */
 
503,7 → 521,7
u8 saveAR[21];
u8 saveDACMASK;
u8 saveCR[37];
uint64_t saveFENCE[16];
uint64_t saveFENCE[I915_MAX_NUM_FENCES];
u32 saveCURACNTR;
u32 saveCURAPOS;
u32 saveCURABASE;
670,10 → 688,9
unsigned int lvds_border_bits;
/* Panel fitter placement and size for Ironlake+ */
u32 pch_pf_pos, pch_pf_size;
int panel_t3, panel_t12;
 
struct drm_crtc *plane_to_crtc_mapping[2];
struct drm_crtc *pipe_to_crtc_mapping[2];
struct drm_crtc *plane_to_crtc_mapping[3];
struct drm_crtc *pipe_to_crtc_mapping[3];
// wait_queue_head_t pending_flip_queue;
bool flip_pending_is_done;
 
705,6 → 722,7
 
u64 last_count1;
unsigned long last_time1;
unsigned long chipset_power;
u64 last_count2;
struct timespec last_time2;
unsigned long gfx_power;
727,8 → 745,6
 
// struct drm_property *broadcast_rgb_property;
// struct drm_property *force_audio_property;
 
atomic_t forcewake_count;
} drm_i915_private_t;
 
enum i915_cache_level {
775,10 → 791,8
* Fence register bits (if any) for this object. Will be set
* as needed when mapped into the GTT.
* Protected by dev->struct_mutex.
*
* Size: 4 bits for 16 fences + sign (for FENCE_REG_NONE)
*/
signed int fence_reg : 5;
signed int fence_reg:I915_MAX_NUM_FENCE_BITS;
 
/**
* Advice: are the backing pages purgeable?
915,7 → 929,7
 
struct drm_i915_file_private {
struct {
// struct spinlock lock;
spinlock_t lock;
struct list_head request_list;
} mm;
};
1159,6 → 1173,9
return ring->outstanding_lazy_request = dev_priv->next_seqno;
}
 
int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *pipelined);
int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
 
void i915_gem_retire_requests(struct drm_device *dev);
void i915_gem_reset(struct drm_device *dev);
1255,12 → 1272,10
extern void intel_teardown_gmbus(struct drm_device *dev);
extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
 
//extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
//{
// return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
//}
 
extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
{
return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
}
extern void intel_i2c_reset(struct drm_device *dev);
 
/* intel_opregion.c */
1296,11 → 1311,17
extern bool intel_fbc_enabled(struct drm_device *dev);
extern void intel_disable_fbc(struct drm_device *dev);
extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
extern void ironlake_init_pch_refclk(struct drm_device *dev);
extern void ironlake_enable_rc6(struct drm_device *dev);
extern void gen6_set_rps(struct drm_device *dev, u8 val);
extern void intel_detect_pch (struct drm_device *dev);
extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
 
extern void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
extern void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv);
extern void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
extern void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv);
 
/* overlay */
#ifdef CONFIG_DEBUG_FS
extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
1349,18 → 1370,7
((reg) != FORCEWAKE))
 
#define __i915_read(x, y) \
static inline u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
u##x val = 0; \
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
gen6_gt_force_wake_get(dev_priv); \
val = read##y(dev_priv->regs + reg); \
gen6_gt_force_wake_put(dev_priv); \
} else { \
val = read##y(dev_priv->regs + reg); \
} \
/* trace_i915_reg_rw(false, reg, val, sizeof(val)); */\
return val; \
}
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
 
__i915_read(8, b)
__i915_read(16, w)
1369,13 → 1379,8
#undef __i915_read
 
#define __i915_write(x, y) \
static inline void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
/* trace_i915_reg_rw(true, reg, val, sizeof(val));*/ \
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
__gen6_gt_wait_for_fifo(dev_priv); \
} \
write##y(val, dev_priv->regs + reg); \
}
void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val);
 
__i915_write(8, b)
__i915_write(16, w)
__i915_write(32, l)
/drivers/video/drm/i915/i915_gem.c
246,6 → 246,8
u32 handle;
 
size = roundup(size, PAGE_SIZE);
if (size == 0)
return -EINVAL;
 
/* Allocate the new object */
obj = i915_gem_alloc_object(dev, size);
1565,13 → 1567,6
 
 
 
 
 
 
 
 
 
 
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
size_t size)
{
1593,7 → 1588,7
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
obj->base.read_domains = I915_GEM_DOMAIN_CPU;
 
if (IS_GEN6(dev)) {
if (IS_GEN6(dev) || IS_GEN7(dev)) {
/* On Gen6, we can have the GPU use the LLC (the CPU
* cache) for about a 10% performance improvement
* compared to uncached. Graphics requests other than
1787,7 → 1782,7
INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
for (i = 0; i < I915_NUM_RINGS; i++)
init_ring_lists(&dev_priv->ring[i]);
for (i = 0; i < 16; i++)
for (i = 0; i < I915_MAX_NUM_FENCES; i++)
INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
// INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
// i915_gem_retire_work_handler);
/drivers/video/drm/i915/i915_gem_tiling.c
110,7 → 110,10
uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
 
if (INTEL_INFO(dev)->gen >= 5) {
if (INTEL_INFO(dev)->gen >= 6) {
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
} else if (IS_GEN5(dev)) {
/* On Ironlake whatever DRAM config, GPU always do
* same swizzling setup.
*/
459,14 → 462,9
void
i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
int page_count = obj->base.size >> PAGE_SHIFT;
int i;
 
if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17)
return;
 
if (obj->bit_17 == NULL)
return;
 
483,14 → 481,9
void
i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
int page_count = obj->base.size >> PAGE_SHIFT;
int i;
 
if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17)
return;
 
if (obj->bit_17 == NULL) {
obj->bit_17 = kmalloc(BITS_TO_LONGS(page_count) *
sizeof(long), GFP_KERNEL);
/drivers/video/drm/i915/i915_reg.h
194,6 → 194,13
#define MI_SEMAPHORE_UPDATE (1<<21)
#define MI_SEMAPHORE_COMPARE (1<<20)
#define MI_SEMAPHORE_REGISTER (1<<18)
#define MI_SEMAPHORE_SYNC_RV (2<<16)
#define MI_SEMAPHORE_SYNC_RB (0<<16)
#define MI_SEMAPHORE_SYNC_VR (0<<16)
#define MI_SEMAPHORE_SYNC_VB (2<<16)
#define MI_SEMAPHORE_SYNC_BR (2<<16)
#define MI_SEMAPHORE_SYNC_BV (0<<16)
#define MI_SEMAPHORE_SYNC_INVALID (1<<0)
/*
* 3D instructions used by the kernel
*/
235,16 → 242,22
#define ASYNC_FLIP (1<<22)
#define DISPLAY_PLANE_A (0<<20)
#define DISPLAY_PLANE_B (1<<20)
#define GFX_OP_PIPE_CONTROL ((0x3<<29)|(0x3<<27)|(0x2<<24)|2)
#define GFX_OP_PIPE_CONTROL(len) ((0x3<<29)|(0x3<<27)|(0x2<<24)|(len-2))
#define PIPE_CONTROL_CS_STALL (1<<20)
#define PIPE_CONTROL_QW_WRITE (1<<14)
#define PIPE_CONTROL_DEPTH_STALL (1<<13)
#define PIPE_CONTROL_WC_FLUSH (1<<12)
#define PIPE_CONTROL_IS_FLUSH (1<<11) /* MBZ on Ironlake */
#define PIPE_CONTROL_TC_FLUSH (1<<10) /* GM45+ only */
#define PIPE_CONTROL_ISP_DIS (1<<9)
#define PIPE_CONTROL_WRITE_FLUSH (1<<12)
#define PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH (1<<12) /* gen6+ */
#define PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE (1<<11) /* MBZ on Ironlake */
#define PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE (1<<10) /* GM45+ only */
#define PIPE_CONTROL_INDIRECT_STATE_DISABLE (1<<9)
#define PIPE_CONTROL_NOTIFY (1<<8)
#define PIPE_CONTROL_VF_CACHE_INVALIDATE (1<<4)
#define PIPE_CONTROL_CONST_CACHE_INVALIDATE (1<<3)
#define PIPE_CONTROL_STATE_CACHE_INVALIDATE (1<<2)
#define PIPE_CONTROL_STALL_AT_SCOREBOARD (1<<1)
#define PIPE_CONTROL_DEPTH_CACHE_FLUSH (1<<0)
#define PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */
#define PIPE_CONTROL_STALL_EN (1<<1) /* in addr word, Ironlake+ only */
 
 
/*
296,6 → 309,12
#define RING_CTL(base) ((base)+0x3c)
#define RING_SYNC_0(base) ((base)+0x40)
#define RING_SYNC_1(base) ((base)+0x44)
#define GEN6_RVSYNC (RING_SYNC_0(RENDER_RING_BASE))
#define GEN6_RBSYNC (RING_SYNC_1(RENDER_RING_BASE))
#define GEN6_VRSYNC (RING_SYNC_1(GEN6_BSD_RING_BASE))
#define GEN6_VBSYNC (RING_SYNC_0(GEN6_BSD_RING_BASE))
#define GEN6_BRSYNC (RING_SYNC_0(BLT_RING_BASE))
#define GEN6_BVSYNC (RING_SYNC_1(BLT_RING_BASE))
#define RING_MAX_IDLE(base) ((base)+0x54)
#define RING_HWS_PGA(base) ((base)+0x80)
#define RING_HWS_PGA_GEN6(base) ((base)+0x2080)
423,6 → 442,7
#define INSTPM_AGPBUSY_DIS (1<<11) /* gen3: when disabled, pending interrupts
will not assert AGPBUSY# and will only
be delivered when out of C3. */
#define INSTPM_FORCE_ORDERING (1<<7) /* GEN6+ */
#define ACTHD 0x020c8
#define FW_BLC 0x020d8
#define FW_BLC2 0x020dc
1534,12 → 1554,21
*/
#define PP_READY (1 << 30)
#define PP_SEQUENCE_NONE (0 << 28)
#define PP_SEQUENCE_ON (1 << 28)
#define PP_SEQUENCE_OFF (2 << 28)
#define PP_SEQUENCE_MASK 0x30000000
#define PP_SEQUENCE_POWER_UP (1 << 28)
#define PP_SEQUENCE_POWER_DOWN (2 << 28)
#define PP_SEQUENCE_MASK (3 << 28)
#define PP_SEQUENCE_SHIFT 28
#define PP_CYCLE_DELAY_ACTIVE (1 << 27)
#define PP_SEQUENCE_STATE_ON_IDLE (1 << 3)
#define PP_SEQUENCE_STATE_MASK 0x0000000f
#define PP_SEQUENCE_STATE_OFF_IDLE (0x0 << 0)
#define PP_SEQUENCE_STATE_OFF_S0_1 (0x1 << 0)
#define PP_SEQUENCE_STATE_OFF_S0_2 (0x2 << 0)
#define PP_SEQUENCE_STATE_OFF_S0_3 (0x3 << 0)
#define PP_SEQUENCE_STATE_ON_IDLE (0x8 << 0)
#define PP_SEQUENCE_STATE_ON_S1_0 (0x9 << 0)
#define PP_SEQUENCE_STATE_ON_S1_2 (0xa << 0)
#define PP_SEQUENCE_STATE_ON_S1_3 (0xb << 0)
#define PP_SEQUENCE_STATE_RESET (0xf << 0)
#define PP_CONTROL 0x61204
#define POWER_TARGET_ON (1 << 0)
#define PP_ON_DELAYS 0x61208
2293,6 → 2322,7
#define PIPECONF_PROGRESSIVE (0 << 21)
#define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21)
#define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21)
#define PIPECONF_INTERLACE_MASK (7 << 21)
#define PIPECONF_CXSR_DOWNCLOCK (1<<16)
#define PIPECONF_BPP_MASK (0x000000e0)
#define PIPECONF_BPP_8 (0<<5)
2416,6 → 2446,7
#define WM0_PIPE_CURSOR_MASK (0x1f)
 
#define WM0_PIPEB_ILK 0x45104
#define WM0_PIPEC_IVB 0x45200
#define WM1_LP_ILK 0x45108
#define WM1_LP_SR_EN (1<<31)
#define WM1_LP_LATENCY_SHIFT 24
2430,6 → 2461,8
#define WM3_LP_ILK 0x45110
#define WM3_LP_EN (1<<31)
#define WM1S_LP_ILK 0x45120
#define WM2S_LP_IVB 0x45124
#define WM3S_LP_IVB 0x45128
#define WM1S_LP_EN (1<<31)
 
/* Memory latency timer register */
2554,10 → 2587,18
#define _CURBBASE 0x700c4
#define _CURBPOS 0x700c8
 
#define _CURBCNTR_IVB 0x71080
#define _CURBBASE_IVB 0x71084
#define _CURBPOS_IVB 0x71088
 
#define CURCNTR(pipe) _PIPE(pipe, _CURACNTR, _CURBCNTR)
#define CURBASE(pipe) _PIPE(pipe, _CURABASE, _CURBBASE)
#define CURPOS(pipe) _PIPE(pipe, _CURAPOS, _CURBPOS)
 
#define CURCNTR_IVB(pipe) _PIPE(pipe, _CURACNTR, _CURBCNTR_IVB)
#define CURBASE_IVB(pipe) _PIPE(pipe, _CURABASE, _CURBBASE_IVB)
#define CURPOS_IVB(pipe) _PIPE(pipe, _CURAPOS, _CURBPOS_IVB)
 
/* Display A control */
#define _DSPACNTR 0x70180
#define DISPLAY_PLANE_ENABLE (1<<31)
2638,6 → 2679,140
#define _DSPBSURF 0x7119C
#define _DSPBTILEOFF 0x711A4
 
/* Sprite A control */
#define _DVSACNTR 0x72180
#define DVS_ENABLE (1<<31)
#define DVS_GAMMA_ENABLE (1<<30)
#define DVS_PIXFORMAT_MASK (3<<25)
#define DVS_FORMAT_YUV422 (0<<25)
#define DVS_FORMAT_RGBX101010 (1<<25)
#define DVS_FORMAT_RGBX888 (2<<25)
#define DVS_FORMAT_RGBX161616 (3<<25)
#define DVS_SOURCE_KEY (1<<22)
#define DVS_RGB_ORDER_RGBX (1<<20)
#define DVS_YUV_BYTE_ORDER_MASK (3<<16)
#define DVS_YUV_ORDER_YUYV (0<<16)
#define DVS_YUV_ORDER_UYVY (1<<16)
#define DVS_YUV_ORDER_YVYU (2<<16)
#define DVS_YUV_ORDER_VYUY (3<<16)
#define DVS_DEST_KEY (1<<2)
#define DVS_TRICKLE_FEED_DISABLE (1<<14)
#define DVS_TILED (1<<10)
#define _DVSALINOFF 0x72184
#define _DVSASTRIDE 0x72188
#define _DVSAPOS 0x7218c
#define _DVSASIZE 0x72190
#define _DVSAKEYVAL 0x72194
#define _DVSAKEYMSK 0x72198
#define _DVSASURF 0x7219c
#define _DVSAKEYMAXVAL 0x721a0
#define _DVSATILEOFF 0x721a4
#define _DVSASURFLIVE 0x721ac
#define _DVSASCALE 0x72204
#define DVS_SCALE_ENABLE (1<<31)
#define DVS_FILTER_MASK (3<<29)
#define DVS_FILTER_MEDIUM (0<<29)
#define DVS_FILTER_ENHANCING (1<<29)
#define DVS_FILTER_SOFTENING (2<<29)
#define DVS_VERTICAL_OFFSET_HALF (1<<28) /* must be enabled below */
#define DVS_VERTICAL_OFFSET_ENABLE (1<<27)
#define _DVSAGAMC 0x72300
 
#define _DVSBCNTR 0x73180
#define _DVSBLINOFF 0x73184
#define _DVSBSTRIDE 0x73188
#define _DVSBPOS 0x7318c
#define _DVSBSIZE 0x73190
#define _DVSBKEYVAL 0x73194
#define _DVSBKEYMSK 0x73198
#define _DVSBSURF 0x7319c
#define _DVSBKEYMAXVAL 0x731a0
#define _DVSBTILEOFF 0x731a4
#define _DVSBSURFLIVE 0x731ac
#define _DVSBSCALE 0x73204
#define _DVSBGAMC 0x73300
 
#define DVSCNTR(pipe) _PIPE(pipe, _DVSACNTR, _DVSBCNTR)
#define DVSLINOFF(pipe) _PIPE(pipe, _DVSALINOFF, _DVSBLINOFF)
#define DVSSTRIDE(pipe) _PIPE(pipe, _DVSASTRIDE, _DVSBSTRIDE)
#define DVSPOS(pipe) _PIPE(pipe, _DVSAPOS, _DVSBPOS)
#define DVSSURF(pipe) _PIPE(pipe, _DVSASURF, _DVSBSURF)
#define DVSKEYMAX(pipe) _PIPE(pipe, _DVSAKEYMAXVAL, _DVSBKEYMAXVAL)
#define DVSSIZE(pipe) _PIPE(pipe, _DVSASIZE, _DVSBSIZE)
#define DVSSCALE(pipe) _PIPE(pipe, _DVSASCALE, _DVSBSCALE)
#define DVSTILEOFF(pipe) _PIPE(pipe, _DVSATILEOFF, _DVSBTILEOFF)
#define DVSKEYVAL(pipe) _PIPE(pipe, _DVSAKEYVAL, _DVSBKEYVAL)
#define DVSKEYMSK(pipe) _PIPE(pipe, _DVSAKEYMSK, _DVSBKEYMSK)
 
#define _SPRA_CTL 0x70280
#define SPRITE_ENABLE (1<<31)
#define SPRITE_GAMMA_ENABLE (1<<30)
#define SPRITE_PIXFORMAT_MASK (7<<25)
#define SPRITE_FORMAT_YUV422 (0<<25)
#define SPRITE_FORMAT_RGBX101010 (1<<25)
#define SPRITE_FORMAT_RGBX888 (2<<25)
#define SPRITE_FORMAT_RGBX161616 (3<<25)
#define SPRITE_FORMAT_YUV444 (4<<25)
#define SPRITE_FORMAT_XR_BGR101010 (5<<25) /* Extended range */
#define SPRITE_CSC_ENABLE (1<<24)
#define SPRITE_SOURCE_KEY (1<<22)
#define SPRITE_RGB_ORDER_RGBX (1<<20) /* only for 888 and 161616 */
#define SPRITE_YUV_TO_RGB_CSC_DISABLE (1<<19)
#define SPRITE_YUV_CSC_FORMAT_BT709 (1<<18) /* 0 is BT601 */
#define SPRITE_YUV_BYTE_ORDER_MASK (3<<16)
#define SPRITE_YUV_ORDER_YUYV (0<<16)
#define SPRITE_YUV_ORDER_UYVY (1<<16)
#define SPRITE_YUV_ORDER_YVYU (2<<16)
#define SPRITE_YUV_ORDER_VYUY (3<<16)
#define SPRITE_TRICKLE_FEED_DISABLE (1<<14)
#define SPRITE_INT_GAMMA_ENABLE (1<<13)
#define SPRITE_TILED (1<<10)
#define SPRITE_DEST_KEY (1<<2)
#define _SPRA_LINOFF 0x70284
#define _SPRA_STRIDE 0x70288
#define _SPRA_POS 0x7028c
#define _SPRA_SIZE 0x70290
#define _SPRA_KEYVAL 0x70294
#define _SPRA_KEYMSK 0x70298
#define _SPRA_SURF 0x7029c
#define _SPRA_KEYMAX 0x702a0
#define _SPRA_TILEOFF 0x702a4
#define _SPRA_SCALE 0x70304
#define SPRITE_SCALE_ENABLE (1<<31)
#define SPRITE_FILTER_MASK (3<<29)
#define SPRITE_FILTER_MEDIUM (0<<29)
#define SPRITE_FILTER_ENHANCING (1<<29)
#define SPRITE_FILTER_SOFTENING (2<<29)
#define SPRITE_VERTICAL_OFFSET_HALF (1<<28) /* must be enabled below */
#define SPRITE_VERTICAL_OFFSET_ENABLE (1<<27)
#define _SPRA_GAMC 0x70400
 
#define _SPRB_CTL 0x71280
#define _SPRB_LINOFF 0x71284
#define _SPRB_STRIDE 0x71288
#define _SPRB_POS 0x7128c
#define _SPRB_SIZE 0x71290
#define _SPRB_KEYVAL 0x71294
#define _SPRB_KEYMSK 0x71298
#define _SPRB_SURF 0x7129c
#define _SPRB_KEYMAX 0x712a0
#define _SPRB_TILEOFF 0x712a4
#define _SPRB_SCALE 0x71304
#define _SPRB_GAMC 0x71400
 
#define SPRCTL(pipe) _PIPE(pipe, _SPRA_CTL, _SPRB_CTL)
#define SPRLINOFF(pipe) _PIPE(pipe, _SPRA_LINOFF, _SPRB_LINOFF)
#define SPRSTRIDE(pipe) _PIPE(pipe, _SPRA_STRIDE, _SPRB_STRIDE)
#define SPRPOS(pipe) _PIPE(pipe, _SPRA_POS, _SPRB_POS)
#define SPRSIZE(pipe) _PIPE(pipe, _SPRA_SIZE, _SPRB_SIZE)
#define SPRKEYVAL(pipe) _PIPE(pipe, _SPRA_KEYVAL, _SPRB_KEYVAL)
#define SPRKEYMSK(pipe) _PIPE(pipe, _SPRA_KEYMSK, _SPRB_KEYMSK)
#define SPRSURF(pipe) _PIPE(pipe, _SPRA_SURF, _SPRB_SURF)
#define SPRKEYMAX(pipe) _PIPE(pipe, _SPRA_KEYMAX, _SPRB_KEYMAX)
#define SPRTILEOFF(pipe) _PIPE(pipe, _SPRA_TILEOFF, _SPRB_TILEOFF)
#define SPRSCALE(pipe) _PIPE(pipe, _SPRA_SCALE, _SPRB_SCALE)
#define SPRGAMC(pipe) _PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC)
 
/* VBIOS regs */
#define VGACNTRL 0x71400
# define VGA_DISP_DISABLE (1 << 31)
2845,6 → 3020,10
#define ILK_DPFC_DIS1 (1<<8)
#define ILK_DPFC_DIS2 (1<<9)
 
#define IVB_CHICKEN3 0x4200c
# define CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE (1 << 5)
# define CHICKEN3_DGMG_DONE_FIX_DISABLE (1 << 2)
 
#define DISP_ARB_CTL 0x45000
#define DISP_TILE_SURFACE_SWIZZLING (1<<13)
#define DISP_FBC_WM_DIS (1<<15)
2903,12 → 3082,13
#define SDEIER 0xc400c
 
/* digital port hotplug */
#define PCH_PORT_HOTPLUG 0xc4030
#define PCH_PORT_HOTPLUG 0xc4030 /* SHOTPLUG_CTL */
#define PORTD_HOTPLUG_ENABLE (1 << 20)
#define PORTD_PULSE_DURATION_2ms (0)
#define PORTD_PULSE_DURATION_4_5ms (1 << 18)
#define PORTD_PULSE_DURATION_6ms (2 << 18)
#define PORTD_PULSE_DURATION_100ms (3 << 18)
#define PORTD_PULSE_DURATION_MASK (3 << 18)
#define PORTD_HOTPLUG_NO_DETECT (0)
#define PORTD_HOTPLUG_SHORT_DETECT (1 << 16)
#define PORTD_HOTPLUG_LONG_DETECT (1 << 17)
2917,6 → 3097,7
#define PORTC_PULSE_DURATION_4_5ms (1 << 10)
#define PORTC_PULSE_DURATION_6ms (2 << 10)
#define PORTC_PULSE_DURATION_100ms (3 << 10)
#define PORTC_PULSE_DURATION_MASK (3 << 10)
#define PORTC_HOTPLUG_NO_DETECT (0)
#define PORTC_HOTPLUG_SHORT_DETECT (1 << 8)
#define PORTC_HOTPLUG_LONG_DETECT (1 << 9)
2925,6 → 3106,7
#define PORTB_PULSE_DURATION_4_5ms (1 << 2)
#define PORTB_PULSE_DURATION_6ms (2 << 2)
#define PORTB_PULSE_DURATION_100ms (3 << 2)
#define PORTB_PULSE_DURATION_MASK (3 << 2)
#define PORTB_HOTPLUG_NO_DETECT (0)
#define PORTB_HOTPLUG_SHORT_DETECT (1 << 0)
#define PORTB_HOTPLUG_LONG_DETECT (1 << 1)
2945,7 → 3127,7
 
#define _PCH_DPLL_A 0xc6014
#define _PCH_DPLL_B 0xc6018
#define PCH_DPLL(pipe) _PIPE(pipe, _PCH_DPLL_A, _PCH_DPLL_B)
#define PCH_DPLL(pipe) (pipe == 0 ? _PCH_DPLL_A : _PCH_DPLL_B)
 
#define _PCH_FPA0 0xc6040
#define FP_CB_TUNE (0x3<<22)
2952,8 → 3134,8
#define _PCH_FPA1 0xc6044
#define _PCH_FPB0 0xc6048
#define _PCH_FPB1 0xc604c
#define PCH_FP0(pipe) _PIPE(pipe, _PCH_FPA0, _PCH_FPB0)
#define PCH_FP1(pipe) _PIPE(pipe, _PCH_FPA1, _PCH_FPB1)
#define PCH_FP0(pipe) (pipe == 0 ? _PCH_FPA0 : _PCH_FPB0)
#define PCH_FP1(pipe) (pipe == 0 ? _PCH_FPA1 : _PCH_FPB1)
 
#define PCH_DPLL_TEST 0xc606c
 
3167,6 → 3349,7
#define FDI_LINK_TRAIN_NONE_IVB (3<<8)
 
/* both Tx and Rx */
#define FDI_COMPOSITE_SYNC (1<<11)
#define FDI_LINK_TRAIN_AUTO (1<<10)
#define FDI_SCRAMBLING_ENABLE (0<<7)
#define FDI_SCRAMBLING_DISABLE (1<<7)
3262,10 → 3445,10
/* or SDVOB */
#define HDMIB 0xe1140
#define PORT_ENABLE (1 << 31)
#define TRANSCODER_A (0)
#define TRANSCODER_B (1 << 30)
#define TRANSCODER(pipe) ((pipe) << 30)
#define TRANSCODER_CPT(pipe) ((pipe) << 29)
#define TRANSCODER_MASK (1 << 30)
#define TRANSCODER_MASK_CPT (3 << 29)
#define COLOR_FORMAT_8bpc (0)
#define COLOR_FORMAT_12bpc (3 << 26)
#define SDVOB_HOTPLUG_ENABLE (1 << 23)
3308,6 → 3491,7
#define PCH_PP_STATUS 0xc7200
#define PCH_PP_CONTROL 0xc7204
#define PANEL_UNLOCK_REGS (0xabcd << 16)
#define PANEL_UNLOCK_MASK (0xffff << 16)
#define EDP_FORCE_VDD (1 << 3)
#define EDP_BLC_ENABLE (1 << 2)
#define PANEL_POWER_RESET (1 << 1)
3314,9 → 3498,28
#define PANEL_POWER_OFF (0 << 0)
#define PANEL_POWER_ON (1 << 0)
#define PCH_PP_ON_DELAYS 0xc7208
#define PANEL_PORT_SELECT_MASK (3 << 30)
#define PANEL_PORT_SELECT_LVDS (0 << 30)
#define PANEL_PORT_SELECT_DPA (1 << 30)
#define EDP_PANEL (1 << 30)
#define PANEL_PORT_SELECT_DPC (2 << 30)
#define PANEL_PORT_SELECT_DPD (3 << 30)
#define PANEL_POWER_UP_DELAY_MASK (0x1fff0000)
#define PANEL_POWER_UP_DELAY_SHIFT 16
#define PANEL_LIGHT_ON_DELAY_MASK (0x1fff)
#define PANEL_LIGHT_ON_DELAY_SHIFT 0
 
#define PCH_PP_OFF_DELAYS 0xc720c
#define PANEL_POWER_DOWN_DELAY_MASK (0x1fff0000)
#define PANEL_POWER_DOWN_DELAY_SHIFT 16
#define PANEL_LIGHT_OFF_DELAY_MASK (0x1fff)
#define PANEL_LIGHT_OFF_DELAY_SHIFT 0
 
#define PCH_PP_DIVISOR 0xc7210
#define PP_REFERENCE_DIVIDER_MASK (0xffffff00)
#define PP_REFERENCE_DIVIDER_SHIFT 8
#define PANEL_POWER_CYCLE_DELAY_MASK (0x1f)
#define PANEL_POWER_CYCLE_DELAY_SHIFT 0
 
#define PCH_DP_B 0xe4100
#define PCH_DPB_AUX_CH_CTL 0xe4110
3386,12 → 3589,38
#define EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B (0x38<<22)
#define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f<<22)
 
/* IVB */
#define EDP_LINK_TRAIN_400MV_0DB_IVB (0x24 <<22)
#define EDP_LINK_TRAIN_400MV_3_5DB_IVB (0x2a <<22)
#define EDP_LINK_TRAIN_400MV_6DB_IVB (0x2f <<22)
#define EDP_LINK_TRAIN_600MV_0DB_IVB (0x30 <<22)
#define EDP_LINK_TRAIN_600MV_3_5DB_IVB (0x36 <<22)
#define EDP_LINK_TRAIN_800MV_0DB_IVB (0x38 <<22)
#define EDP_LINK_TRAIN_800MV_3_5DB_IVB (0x33 <<22)
 
/* legacy values */
#define EDP_LINK_TRAIN_500MV_0DB_IVB (0x00 <<22)
#define EDP_LINK_TRAIN_1000MV_0DB_IVB (0x20 <<22)
#define EDP_LINK_TRAIN_500MV_3_5DB_IVB (0x02 <<22)
#define EDP_LINK_TRAIN_1000MV_3_5DB_IVB (0x22 <<22)
#define EDP_LINK_TRAIN_1000MV_6DB_IVB (0x23 <<22)
 
#define EDP_LINK_TRAIN_VOL_EMP_MASK_IVB (0x3f<<22)
 
#define FORCEWAKE 0xA18C
#define FORCEWAKE_ACK 0x130090
#define FORCEWAKE_MT 0xa188 /* multi-threaded */
#define FORCEWAKE_MT_ACK 0x130040
#define ECOBUS 0xa180
#define FORCEWAKE_MT_ENABLE (1<<5)
 
#define GT_FIFO_FREE_ENTRIES 0x120008
#define GT_FIFO_NUM_RESERVED_ENTRIES 20
 
#define GEN6_UCGCTL2 0x9404
# define GEN6_RCPBUNIT_CLOCK_GATE_DISABLE (1 << 12)
# define GEN6_RCCUNIT_CLOCK_GATE_DISABLE (1 << 11)
 
#define GEN6_RPNSWREQ 0xA008
#define GEN6_TURBO_DISABLE (1<<31)
#define GEN6_FREQUENCY(x) ((x)<<25)
3413,7 → 3642,11
#define GEN6_CAGF_MASK (0x7f << GEN6_CAGF_SHIFT)
#define GEN6_RP_CONTROL 0xA024
#define GEN6_RP_MEDIA_TURBO (1<<11)
#define GEN6_RP_USE_NORMAL_FREQ (1<<9)
#define GEN6_RP_MEDIA_MODE_MASK (3<<9)
#define GEN6_RP_MEDIA_HW_TURBO_MODE (3<<9)
#define GEN6_RP_MEDIA_HW_NORMAL_MODE (2<<9)
#define GEN6_RP_MEDIA_HW_MODE (1<<9)
#define GEN6_RP_MEDIA_SW_MODE (0<<9)
#define GEN6_RP_MEDIA_IS_GFX (1<<8)
#define GEN6_RP_ENABLE (1<<7)
#define GEN6_RP_UP_IDLE_MIN (0x1<<3)
3470,4 → 3703,43
#define GEN6_PCODE_DATA 0x138128
#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
 
#define GEN6_GT_CORE_STATUS 0x138060
#define GEN6_CORE_CPD_STATE_MASK (7<<4)
#define GEN6_RCn_MASK 7
#define GEN6_RC0 0
#define GEN6_RC3 2
#define GEN6_RC6 3
#define GEN6_RC7 4
 
#define G4X_AUD_VID_DID 0x62020
#define INTEL_AUDIO_DEVCL 0x808629FB
#define INTEL_AUDIO_DEVBLC 0x80862801
#define INTEL_AUDIO_DEVCTG 0x80862802
 
#define G4X_AUD_CNTL_ST 0x620B4
#define G4X_ELDV_DEVCL_DEVBLC (1 << 13)
#define G4X_ELDV_DEVCTG (1 << 14)
#define G4X_ELD_ADDR (0xf << 5)
#define G4X_ELD_ACK (1 << 4)
#define G4X_HDMIW_HDMIEDID 0x6210C
 
#define IBX_HDMIW_HDMIEDID_A 0xE2050
#define IBX_AUD_CNTL_ST_A 0xE20B4
#define IBX_ELD_BUFFER_SIZE (0x1f << 10)
#define IBX_ELD_ADDRESS (0x1f << 5)
#define IBX_ELD_ACK (1 << 4)
#define IBX_AUD_CNTL_ST2 0xE20C0
#define IBX_ELD_VALIDB (1 << 0)
#define IBX_CP_READYB (1 << 1)
 
#define CPT_HDMIW_HDMIEDID_A 0xE5050
#define CPT_AUD_CNTL_ST_A 0xE50B4
#define CPT_AUD_CNTRL_ST2 0xE50C0
 
/* These are the 4 32-bit write offset registers for each stream
* output buffer. It determines the offset from the
* 3DSTATE_SO_BUFFERs that the next streamed vertex output goes to.
*/
#define GEN7_SO_WRITE_OFFSET(n) (0x5280 + (n) * 4)
 
#endif /* _I915_REG_H_ */
/drivers/video/drm/i915/intel_bios.c
309,6 → 309,13
dev_priv->lvds_use_ssc = general->enable_ssc;
dev_priv->lvds_ssc_freq =
intel_bios_ssc_frequency(dev, general->ssc_freq);
dev_priv->display_clock_mode = general->display_clock_mode;
DRM_DEBUG_KMS("BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d\n",
dev_priv->int_tv_support,
dev_priv->int_crt_support,
dev_priv->lvds_use_ssc,
dev_priv->lvds_ssc_freq,
dev_priv->display_clock_mode);
}
}
 
396,15 → 403,13
p_mapping->dvo_wiring = p_child->dvo_wiring;
p_mapping->ddc_pin = p_child->ddc_pin;
p_mapping->i2c_pin = p_child->i2c_pin;
p_mapping->i2c_speed = p_child->i2c_speed;
p_mapping->initialized = 1;
DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d, i2c_speed=%d\n",
DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n",
p_mapping->dvo_port,
p_mapping->slave_addr,
p_mapping->dvo_wiring,
p_mapping->ddc_pin,
p_mapping->i2c_pin,
p_mapping->i2c_speed);
p_mapping->i2c_pin);
} else {
DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
"two SDVO device.\n");
610,7 → 615,7
/* Default to using SSC */
dev_priv->lvds_use_ssc = 1;
dev_priv->lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1);
DRM_DEBUG("Set default to SSC at %dMHz\n", dev_priv->lvds_ssc_freq);
DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->lvds_ssc_freq);
 
/* eDP data */
dev_priv->edp.bpp = 18;
639,7 → 644,7
if (dev_priv->opregion.vbt) {
struct vbt_header *vbt = dev_priv->opregion.vbt;
if (memcmp(vbt->signature, "$VBT", 4) == 0) {
DRM_DEBUG_DRIVER("Using VBT from OpRegion: %20s\n",
DRM_DEBUG_KMS("Using VBT from OpRegion: %20s\n",
vbt->signature);
bdb = (struct bdb_header *)((char *)vbt + vbt->bdb_offset);
} else
/drivers/video/drm/i915/intel_bios.h
120,7 → 120,9
u8 ssc_freq:1;
u8 enable_lfp_on_override:1;
u8 disable_ssc_ddt:1;
u8 rsvd8:3; /* finish byte */
u8 rsvd7:1;
u8 display_clock_mode:1;
u8 rsvd8:1; /* finish byte */
 
/* bits 3 */
u8 disable_smooth_vision:1;
133,7 → 135,10
/* bits 5 */
u8 int_crt_support:1;
u8 int_tv_support:1;
u8 rsvd11:6; /* finish byte */
u8 int_efp_support:1;
u8 dp_ssc_enb:1; /* PCH attached eDP supports SSC */
u8 dp_ssc_freq:1; /* SSC freq for PCH attached eDP */
u8 rsvd11:3; /* finish byte */
} __attribute__((packed));
 
/* pre-915 */
197,8 → 202,7
struct child_device_config {
u16 handle;
u16 device_type;
u8 i2c_speed;
u8 rsvd[9];
u8 device_id[10]; /* ascii string */
u16 addin_offset;
u8 dvo_port; /* See Device_PORT_* above */
u8 i2c_pin;
446,11 → 450,11
#define EDP_VSWING_1_2V 3
 
struct edp_power_seq {
u16 t3;
u16 t7;
u16 t1_t3;
u16 t8;
u16 t9;
u16 t10;
u16 t12;
u16 t11_t12;
} __attribute__ ((packed));
 
struct edp_link_params {
463,8 → 467,12
struct bdb_edp {
struct edp_power_seq power_seqs[16];
u32 color_depth;
struct edp_link_params link_params[16];
u32 sdrrs_msa_timing_delay;
struct edp_link_params link_params[16];
 
/* ith bit indicates enabled/disabled for (i+1)th panel */
u16 edp_s3d_feature;
u16 edp_t3_optimization;
} __attribute__ ((packed));
 
void intel_setup_bios(struct drm_device *dev);
/drivers/video/drm/i915/intel_crt.c
152,17 → 152,13
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
adpa |= ADPA_VSYNC_ACTIVE_HIGH;
 
if (intel_crtc->pipe == 0) {
/* For CPT allow 3 pipe config, for others just use A or B */
if (HAS_PCH_CPT(dev))
adpa |= PORT_TRANS_A_SEL_CPT;
else
adpa |= PORT_TRANS_SEL_CPT(intel_crtc->pipe);
else if (intel_crtc->pipe == 0)
adpa |= ADPA_PIPE_A_SELECT;
} else {
if (HAS_PCH_CPT(dev))
adpa |= PORT_TRANS_B_SEL_CPT;
else
adpa |= ADPA_PIPE_B_SELECT;
}
 
if (!HAS_PCH_SPLIT(dev))
I915_WRITE(BCLRPAT(intel_crtc->pipe), 0);
/drivers/video/drm/i915/intel_display.c
31,6 → 31,7
#include <linux/kernel.h>
#include <linux/slab.h>
//#include <linux/vgaarb.h>
#include <drm/drm_edid.h>
#include "drmP.h"
#include "intel_drv.h"
#include "i915_drm.h"
833,6 → 834,19
u32 val;
bool cur_state;
 
if (HAS_PCH_CPT(dev_priv->dev)) {
u32 pch_dpll;
 
pch_dpll = I915_READ(PCH_DPLL_SEL);
 
/* Make sure the selected PLL is enabled to the transcoder */
WARN(!((pch_dpll >> (4 * pipe)) & 8),
"transcoder %d PLL not enabled\n", pipe);
 
/* Convert the transcoder pipe number to a pll pipe number */
pipe = (pch_dpll >> (4 * pipe)) & 1;
}
 
reg = PCH_DPLL(pipe);
val = I915_READ(reg);
cur_state = !!(val & DPLL_VCO_ENABLE);
932,7 → 946,7
pipe_name(pipe));
}
 
static void assert_pipe(struct drm_i915_private *dev_priv,
void assert_pipe(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state)
{
int reg;
946,8 → 960,6
"pipe %c assertion failure (expected %s, current %s)\n",
pipe_name(pipe), state_string(state), state_string(cur_state));
}
#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
 
static void assert_plane_enabled(struct drm_i915_private *dev_priv,
enum plane plane)
1202,6 → 1214,9
int reg;
u32 val;
 
if (pipe > 1)
return;
 
/* PCH only available on ILK+ */
BUG_ON(dev_priv->info->gen < 5);
 
1220,8 → 1235,12
enum pipe pipe)
{
int reg;
u32 val;
u32 val, pll_mask = TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL,
pll_sel = TRANSC_DPLL_ENABLE;
 
if (pipe > 1)
return;
 
/* PCH only available on ILK+ */
BUG_ON(dev_priv->info->gen < 5);
 
1228,6 → 1247,15
/* Make sure transcoder isn't still depending on us */
assert_transcoder_disabled(dev_priv, pipe);
 
if (pipe == 0)
pll_sel |= TRANSC_DPLLA_SEL;
else if (pipe == 1)
pll_sel |= TRANSC_DPLLB_SEL;
 
 
if ((I915_READ(PCH_DPLL_SEL) & pll_mask) == pll_sel)
return;
 
reg = PCH_DPLL(pipe);
val = I915_READ(reg);
val &= ~DPLL_VCO_ENABLE;
1287,7 → 1315,7
I915_WRITE(reg, val);
/* wait for PCH transcoder off, transcoder state */
if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
DRM_ERROR("failed to disable transcoder\n");
DRM_ERROR("failed to disable transcoder %d\n", pipe);
}
 
/**
1522,8 → 1550,8
u32 fbc_ctl, fbc_ctl2;
 
cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
if (fb->pitch < cfb_pitch)
cfb_pitch = fb->pitch;
if (fb->pitches[0] < cfb_pitch)
cfb_pitch = fb->pitches[0];
 
/* FBC_CTL wants 64B units */
cfb_pitch = (cfb_pitch / 64) - 1;
1787,6 → 1815,7
struct drm_framebuffer *fb;
struct intel_framebuffer *intel_fb;
struct drm_i915_gem_object *obj;
int enable_fbc;
 
DRM_DEBUG_KMS("\n");
 
1827,8 → 1856,15
intel_fb = to_intel_framebuffer(fb);
obj = intel_fb->obj;
 
if (!i915_enable_fbc) {
DRM_DEBUG_KMS("fbc disabled per module param (default off)\n");
enable_fbc = i915_enable_fbc;
if (enable_fbc < 0) {
DRM_DEBUG_KMS("fbc set to per-chip default\n");
enable_fbc = 1;
if (INTEL_INFO(dev)->gen <= 5)
enable_fbc = 0;
}
if (!enable_fbc) {
DRM_DEBUG_KMS("fbc disabled per module param\n");
dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
goto out_disable;
}
2033,11 → 2069,11
I915_WRITE(reg, dspcntr);
 
Start = obj->gtt_offset;
Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
 
DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
Start, Offset, x, y, fb->pitch);
I915_WRITE(DSPSTRIDE(plane), fb->pitch);
Start, Offset, x, y, fb->pitches[0]);
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
if (INTEL_INFO(dev)->gen >= 4) {
I915_WRITE(DSPSURF(plane), Start);
I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2065,6 → 2101,7
switch (plane) {
case 0:
case 1:
case 2:
break;
default:
DRM_ERROR("Can't update plane %d in SAREA\n", plane);
2113,11 → 2150,11
I915_WRITE(reg, dspcntr);
 
Start = obj->gtt_offset;
Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
 
DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
Start, Offset, x, y, fb->pitch);
I915_WRITE(DSPSTRIDE(plane), fb->pitch);
Start, Offset, x, y, fb->pitches[0]);
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
I915_WRITE(DSPSURF(plane), Start);
I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
I915_WRITE(DSPADDR(plane), Offset);
2158,7 → 2195,7
struct drm_device *dev = crtc->dev;
struct drm_i915_master_private *master_priv;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int ret = 0;
int ret;
 
ENTER();
 
2172,6 → 2209,10
case 0:
case 1:
break;
case 2:
if (IS_IVYBRIDGE(dev))
break;
/* fall through otherwise */
default:
DRM_ERROR("no plane for crtc\n");
return -EINVAL;
2570,6 → 2611,7
temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
temp |= FDI_COMPOSITE_SYNC;
I915_WRITE(reg, temp | FDI_TX_ENABLE);
 
reg = FDI_RX_CTL(pipe);
2577,6 → 2619,7
temp &= ~FDI_LINK_TRAIN_AUTO;
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
temp |= FDI_COMPOSITE_SYNC;
I915_WRITE(reg, temp | FDI_RX_ENABLE);
 
POSTING_READ(reg);
2836,7 → 2879,7
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
u32 reg, temp;
u32 reg, temp, transc_sel;
 
/* For PCH output, training FDI link */
dev_priv->display.fdi_link_train(crtc);
2844,12 → 2887,21
intel_enable_pch_pll(dev_priv, pipe);
 
if (HAS_PCH_CPT(dev)) {
transc_sel = intel_crtc->use_pll_a ? TRANSC_DPLLA_SEL :
TRANSC_DPLLB_SEL;
 
/* Be sure PCH DPLL SEL is set */
temp = I915_READ(PCH_DPLL_SEL);
if (pipe == 0 && (temp & TRANSA_DPLL_ENABLE) == 0)
if (pipe == 0) {
temp &= ~(TRANSA_DPLLB_SEL);
temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
else if (pipe == 1 && (temp & TRANSB_DPLL_ENABLE) == 0)
} else if (pipe == 1) {
temp &= ~(TRANSB_DPLLB_SEL);
temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
} else if (pipe == 2) {
temp &= ~(TRANSC_DPLLB_SEL);
temp |= (TRANSC_DPLL_ENABLE | transc_sel);
}
I915_WRITE(PCH_DPLL_SEL, temp);
}
 
2867,7 → 2919,8
 
/* For PCH DP, enable TRANS_DP_CTL */
if (HAS_PCH_CPT(dev) &&
intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
(intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5;
reg = TRANS_DP_CTL(pipe);
temp = I915_READ(reg);
2905,6 → 2958,24
intel_enable_transcoder(dev_priv, pipe);
}
 
void intel_cpt_verify_modeset(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int dslreg = PIPEDSL(pipe), tc2reg = TRANS_CHICKEN2(pipe);
u32 temp;
 
temp = I915_READ(dslreg);
udelay(500);
if (wait_for(I915_READ(dslreg) != temp, 5)) {
/* Without this, mode sets may fail silently on FDI */
I915_WRITE(tc2reg, TRANS_AUTOTRAIN_GEN_STALL_DIS);
udelay(250);
I915_WRITE(tc2reg, 0);
if (wait_for(I915_READ(dslreg) != temp, 5))
DRM_ERROR("mode set failed: pipe %d stuck\n", pipe);
}
}
 
static void ironlake_crtc_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
3017,13 → 3088,13
temp = I915_READ(PCH_DPLL_SEL);
switch (pipe) {
case 0:
temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL);
break;
case 1:
temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
break;
case 2:
/* FIXME: manage transcoder PLLs? */
/* C shares PLL A or B */
temp &= ~(TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL);
break;
default:
3033,6 → 3104,7
}
 
/* disable PCH DPLL */
if (!intel_crtc->no_pll)
intel_disable_pch_pll(dev_priv, pipe);
 
/* Switch from PCDclk to Rawclk */
3281,8 → 3353,15
void intel_encoder_commit (struct drm_encoder *encoder)
{
struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
struct drm_device *dev = encoder->dev;
struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
struct intel_crtc *intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
 
/* lvds has its own version of commit see intel_lvds_commit */
encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
 
if (HAS_PCH_CPT(dev))
intel_cpt_verify_modeset(dev, intel_crtc->pipe);
}
 
void intel_encoder_destroy(struct drm_encoder *encoder)
4424,7 → 4503,7
*/
}
 
static void sandybridge_update_wm(struct drm_device *dev)
void sandybridge_update_wm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
4458,6 → 4537,20
enabled |= 2;
}
 
/* IVB has 3 pipes */
if (IS_IVYBRIDGE(dev) &&
g4x_compute_wm0(dev, 2,
&sandybridge_display_wm_info, latency,
&sandybridge_cursor_wm_info, latency,
&plane_wm, &cursor_wm)) {
I915_WRITE(WM0_PIPEC_IVB,
(plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
" plane %d, cursor: %d\n",
plane_wm, cursor_wm);
enabled |= 3;
}
 
/*
* Calculate and update the self-refresh watermark only when one
* display plane is used.
4472,16 → 4565,11
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
 
if (!single_plane_enabled(enabled))
{
LEAVE();
if (!single_plane_enabled(enabled) ||
dev_priv->sprite_scaling_enabled)
return;
};
 
enabled = ffs(enabled) - 1;
 
dbgprintf("compute wm1\n");
 
/* WM1 */
if (!ironlake_compute_srwm(dev, 1, enabled,
SNB_READ_WM1_LATENCY() * 500,
4497,8 → 4585,6
(plane_wm << WM1_LP_SR_SHIFT) |
cursor_wm);
 
dbgprintf("compute wm2\n");
 
/* WM2 */
if (!ironlake_compute_srwm(dev, 2, enabled,
SNB_READ_WM2_LATENCY() * 500,
4514,8 → 4600,6
(plane_wm << WM1_LP_SR_SHIFT) |
cursor_wm);
 
dbgprintf("compute wm3\n");
 
/* WM3 */
if (!ironlake_compute_srwm(dev, 3, enabled,
SNB_READ_WM3_LATENCY() * 500,
4530,11 → 4614,151
(fbc_wm << WM1_LP_FBC_SHIFT) |
(plane_wm << WM1_LP_SR_SHIFT) |
cursor_wm);
}
 
LEAVE();
static bool
sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
uint32_t sprite_width, int pixel_size,
const struct intel_watermark_params *display,
int display_latency_ns, int *sprite_wm)
{
struct drm_crtc *crtc;
int clock;
int entries, tlb_miss;
 
crtc = intel_get_crtc_for_plane(dev, plane);
if (crtc->fb == NULL || !crtc->enabled) {
*sprite_wm = display->guard_size;
return false;
}
 
clock = crtc->mode.clock;
 
/* Use the small buffer method to calculate the sprite watermark */
entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
tlb_miss = display->fifo_size*display->cacheline_size -
sprite_width * 8;
if (tlb_miss > 0)
entries += tlb_miss;
entries = DIV_ROUND_UP(entries, display->cacheline_size);
*sprite_wm = entries + display->guard_size;
if (*sprite_wm > (int)display->max_wm)
*sprite_wm = display->max_wm;
 
return true;
}
 
static bool
sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
uint32_t sprite_width, int pixel_size,
const struct intel_watermark_params *display,
int latency_ns, int *sprite_wm)
{
struct drm_crtc *crtc;
unsigned long line_time_us;
int clock;
int line_count, line_size;
int small, large;
int entries;
 
if (!latency_ns) {
*sprite_wm = 0;
return false;
}
 
crtc = intel_get_crtc_for_plane(dev, plane);
clock = crtc->mode.clock;
 
line_time_us = (sprite_width * 1000) / clock;
line_count = (latency_ns / line_time_us + 1000) / 1000;
line_size = sprite_width * pixel_size;
 
/* Use the minimum of the small and large buffer method for primary */
small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
large = line_count * line_size;
 
entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
*sprite_wm = entries + display->guard_size;
 
return *sprite_wm > 0x3ff ? false : true;
}
 
static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
uint32_t sprite_width, int pixel_size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
int sprite_wm, reg;
int ret;
 
switch (pipe) {
case 0:
reg = WM0_PIPEA_ILK;
break;
case 1:
reg = WM0_PIPEB_ILK;
break;
case 2:
reg = WM0_PIPEC_IVB;
break;
default:
return; /* bad pipe */
}
 
ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size,
&sandybridge_display_wm_info,
latency, &sprite_wm);
if (!ret) {
DRM_DEBUG_KMS("failed to compute sprite wm for pipe %d\n",
pipe);
return;
}
 
I915_WRITE(reg, I915_READ(reg) | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm);
 
 
ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
pixel_size,
&sandybridge_display_srwm_info,
SNB_READ_WM1_LATENCY() * 500,
&sprite_wm);
if (!ret) {
DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %d\n",
pipe);
return;
}
I915_WRITE(WM1S_LP_ILK, sprite_wm);
 
/* Only IVB has two more LP watermarks for sprite */
if (!IS_IVYBRIDGE(dev))
return;
 
ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
pixel_size,
&sandybridge_display_srwm_info,
SNB_READ_WM2_LATENCY() * 500,
&sprite_wm);
if (!ret) {
DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %d\n",
pipe);
return;
}
I915_WRITE(WM2S_LP_IVB, sprite_wm);
 
ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
pixel_size,
&sandybridge_display_srwm_info,
SNB_READ_WM3_LATENCY() * 500,
&sprite_wm);
if (!ret) {
DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %d\n",
pipe);
return;
}
I915_WRITE(WM3S_LP_IVB, sprite_wm);
}
 
/**
* intel_update_watermarks - update FIFO watermark values based on current modes
*
4576,9 → 4800,21
LEAVE();
}
 
void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
uint32_t sprite_width, int pixel_size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (dev_priv->display.update_sprite_wm)
dev_priv->display.update_sprite_wm(dev, pipe, sprite_width,
pixel_size);
}
 
static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
{
return dev_priv->lvds_use_ssc && i915_panel_use_ssc
if (i915_panel_use_ssc >= 0)
return i915_panel_use_ssc != 0;
return dev_priv->lvds_use_ssc
&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
}
 
4585,6 → 4821,7
/**
* intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send
* @crtc: CRTC structure
* @mode: requested mode
*
* A pipe may be connected to one or more outputs. Based on the depth of the
* attached framebuffer, choose a good color depth to use on the pipe.
4596,6 → 4833,7
* HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc
* Displays may support a restricted set as well, check EDID and clamp as
* appropriate.
* DP may want to dither down to 6bpc to fit larger modes
*
* RETURNS:
* Dithering requirement (i.e. false if display bpc and pipe bpc match,
4602,7 → 4840,8
* true if they don't match).
*/
static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
unsigned int *pipe_bpp)
unsigned int *pipe_bpp,
struct drm_display_mode *mode)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
4627,7 → 4866,7
lvds_bpc = 6;
 
if (lvds_bpc < display_bpc) {
DRM_DEBUG_DRIVER("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc);
DRM_DEBUG_KMS("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc);
display_bpc = lvds_bpc;
}
continue;
4638,7 → 4877,7
unsigned int edp_bpc = dev_priv->edp.bpp / 3;
 
if (edp_bpc < display_bpc) {
DRM_DEBUG_DRIVER("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
display_bpc = edp_bpc;
}
continue;
4653,7 → 4892,7
/* Don't use an invalid EDID bpc value */
if (connector->display_info.bpc &&
connector->display_info.bpc < display_bpc) {
DRM_DEBUG_DRIVER("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc);
DRM_DEBUG_KMS("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc);
display_bpc = connector->display_info.bpc;
}
}
4664,15 → 4903,20
*/
if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
if (display_bpc > 8 && display_bpc < 12) {
DRM_DEBUG_DRIVER("forcing bpc to 12 for HDMI\n");
DRM_DEBUG_KMS("forcing bpc to 12 for HDMI\n");
display_bpc = 12;
} else {
DRM_DEBUG_DRIVER("forcing bpc to 8 for HDMI\n");
DRM_DEBUG_KMS("forcing bpc to 8 for HDMI\n");
display_bpc = 8;
}
}
}
 
if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
DRM_DEBUG_KMS("Dithering DP to 6bpc\n");
display_bpc = 6;
}
 
/*
* We could just drive the pipe at the highest bpc all the time and
* enable dithering as needed, but that costs bandwidth. So choose
4689,13 → 4933,13
bpc = 6; /* min is 18bpp */
break;
case 24:
bpc = min((unsigned int)8, display_bpc);
bpc = 8;
break;
case 30:
bpc = min((unsigned int)10, display_bpc);
bpc = 10;
break;
case 48:
bpc = min((unsigned int)12, display_bpc);
bpc = 12;
break;
default:
DRM_DEBUG("unsupported depth, assuming 24 bits\n");
4703,10 → 4947,12
break;
}
 
DRM_DEBUG_DRIVER("setting pipe bpc to %d (max display bpc %d)\n",
display_bpc = min(display_bpc, bpc);
 
DRM_DEBUG_KMS("setting pipe bpc to %d (max display bpc %d)\n",
bpc, display_bpc);
 
*pipe_bpp = bpc * 3;
*pipe_bpp = display_bpc * 3;
 
return display_bpc != bpc;
}
4932,6 → 5178,16
pipeconf &= ~PIPECONF_DOUBLE_WIDE;
}
 
/* default to 8bpc */
pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN);
if (is_dp) {
if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
pipeconf |= PIPECONF_BPP_6 |
PIPECONF_DITHER_EN |
PIPECONF_DITHER_TYPE_SP;
}
}
 
dpll |= DPLL_VCO_ENABLE;
 
DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
5050,7 → 5306,7
adjusted_mode->crtc_vsync_end -= 1;
adjusted_mode->crtc_vsync_start -= 1;
} else
pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */
pipeconf &= ~PIPECONF_INTERLACE_MASK; /* progressive */
 
I915_WRITE(HTOTAL(pipe),
(adjusted_mode->crtc_hdisplay - 1) |
5099,36 → 5355,52
return ret;
}
 
static void ironlake_update_pch_refclk(struct drm_device *dev)
/*
* Initialize reference clocks when the driver loads
*/
void ironlake_init_pch_refclk(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_crtc *crtc;
struct intel_encoder *encoder;
struct intel_encoder *has_edp_encoder = NULL;
u32 temp;
bool has_lvds = false;
bool has_cpu_edp = false;
bool has_pch_edp = false;
bool has_panel = false;
bool has_ck505 = false;
bool can_ssc = false;
 
/* We need to take the global config into account */
list_for_each_entry(crtc, &mode_config->crtc_list, head) {
if (!crtc->enabled)
continue;
 
list_for_each_entry(encoder, &mode_config->encoder_list,
base.head) {
if (encoder->base.crtc != crtc)
continue;
 
switch (encoder->type) {
case INTEL_OUTPUT_LVDS:
has_panel = true;
has_lvds = true;
break;
case INTEL_OUTPUT_EDP:
has_edp_encoder = encoder;
has_panel = true;
if (intel_encoder_is_pch_edp(&encoder->base))
has_pch_edp = true;
else
has_cpu_edp = true;
break;
}
}
 
if (HAS_PCH_IBX(dev)) {
has_ck505 = dev_priv->display_clock_mode;
can_ssc = has_ck505;
} else {
has_ck505 = false;
can_ssc = true;
}
 
DRM_DEBUG_KMS("has_panel %d has_lvds %d has_pch_edp %d has_cpu_edp %d has_ck505 %d\n",
has_panel, has_lvds, has_pch_edp, has_cpu_edp,
has_ck505);
 
/* Ironlake: try to setup display ref clock before DPLL
* enabling. This is only under driver's control after
* PCH B stepping, previous chipset stepping should be
5137,43 → 5409,102
temp = I915_READ(PCH_DREF_CONTROL);
/* Always enable nonspread source */
temp &= ~DREF_NONSPREAD_SOURCE_MASK;
 
if (has_ck505)
temp |= DREF_NONSPREAD_CK505_ENABLE;
else
temp |= DREF_NONSPREAD_SOURCE_ENABLE;
 
if (has_panel) {
temp &= ~DREF_SSC_SOURCE_MASK;
temp |= DREF_SSC_SOURCE_ENABLE;
I915_WRITE(PCH_DREF_CONTROL, temp);
 
POSTING_READ(PCH_DREF_CONTROL);
udelay(200);
/* SSC must be turned on before enabling the CPU output */
if (intel_panel_use_ssc(dev_priv) && can_ssc) {
DRM_DEBUG_KMS("Using SSC on panel\n");
temp |= DREF_SSC1_ENABLE;
}
 
if (has_edp_encoder) {
if (intel_panel_use_ssc(dev_priv)) {
temp |= DREF_SSC1_ENABLE;
/* Get SSC going before enabling the outputs */
I915_WRITE(PCH_DREF_CONTROL, temp);
 
POSTING_READ(PCH_DREF_CONTROL);
udelay(200);
}
 
temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
 
/* Enable CPU source on CPU attached eDP */
if (!intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
if (intel_panel_use_ssc(dev_priv))
if (has_cpu_edp) {
if (intel_panel_use_ssc(dev_priv) && can_ssc) {
DRM_DEBUG_KMS("Using SSC on eDP\n");
temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
}
else
temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
} else
temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
 
I915_WRITE(PCH_DREF_CONTROL, temp);
POSTING_READ(PCH_DREF_CONTROL);
udelay(200);
} else {
/* Enable SSC on PCH eDP if needed */
if (intel_panel_use_ssc(dev_priv)) {
DRM_ERROR("enabling SSC on PCH\n");
temp |= DREF_SUPERSPREAD_SOURCE_ENABLE;
}
}
DRM_DEBUG_KMS("Disabling SSC entirely\n");
 
temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
 
/* Turn off CPU output */
temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
 
I915_WRITE(PCH_DREF_CONTROL, temp);
POSTING_READ(PCH_DREF_CONTROL);
udelay(200);
 
/* Turn off the SSC source */
temp &= ~DREF_SSC_SOURCE_MASK;
temp |= DREF_SSC_SOURCE_DISABLE;
 
/* Turn off SSC1 */
temp &= ~ DREF_SSC1_ENABLE;
 
I915_WRITE(PCH_DREF_CONTROL, temp);
POSTING_READ(PCH_DREF_CONTROL);
udelay(200);
}
}
 
static int ironlake_get_refclk(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *encoder;
struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *edp_encoder = NULL;
int num_connectors = 0;
bool is_lvds = false;
 
list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
if (encoder->base.crtc != crtc)
continue;
 
switch (encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
break;
case INTEL_OUTPUT_EDP:
edp_encoder = encoder;
break;
}
num_connectors++;
}
 
if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
dev_priv->lvds_ssc_freq);
return dev_priv->lvds_ssc_freq * 1000;
}
 
return 120000;
}
 
static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
5235,16 → 5566,7
num_connectors++;
}
 
if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
refclk = dev_priv->lvds_ssc_freq * 1000;
DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
refclk / 1000);
} else {
refclk = 96000;
if (!has_edp_encoder ||
intel_encoder_is_pch_edp(&has_edp_encoder->base))
refclk = 120000; /* 120Mhz refclk */
}
refclk = ironlake_get_refclk(crtc);
 
/*
* Returns a set of divisors for the desired target clock with the given
5329,7 → 5651,7
/* determine panel color depth */
temp = I915_READ(PIPECONF(pipe));
temp &= ~PIPE_BPC_MASK;
dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp);
dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, mode);
switch (pipe_bpp) {
case 18:
temp |= PIPE_6BPC;
5371,8 → 5693,6
ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw,
&m_n);
 
ironlake_update_pch_refclk(dev);
 
fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
if (has_reduced_clock)
fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
5444,11 → 5764,13
/* Set up the display plane register */
dspcntr = DISPPLANE_GAMMA_ENABLE;
 
DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
drm_mode_debug_printmodeline(mode);
 
/* PCH eDP needs FDI, but CPU eDP does not */
if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
if (!intel_crtc->no_pll) {
if (!has_edp_encoder ||
intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
I915_WRITE(PCH_FP0(pipe), fp);
I915_WRITE(PCH_DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
 
5455,28 → 5777,19
POSTING_READ(PCH_DPLL(pipe));
udelay(150);
}
 
/* enable transcoder DPLL */
if (HAS_PCH_CPT(dev)) {
temp = I915_READ(PCH_DPLL_SEL);
switch (pipe) {
case 0:
temp |= TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL;
break;
case 1:
temp |= TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL;
break;
case 2:
/* FIXME: manage transcoder PLLs? */
temp |= TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL;
break;
default:
BUG();
} else {
if (dpll == (I915_READ(PCH_DPLL(0)) & 0x7fffffff) &&
fp == I915_READ(PCH_FP0(0))) {
intel_crtc->use_pll_a = true;
DRM_DEBUG_KMS("using pipe a dpll\n");
} else if (dpll == (I915_READ(PCH_DPLL(1)) & 0x7fffffff) &&
fp == I915_READ(PCH_FP0(1))) {
intel_crtc->use_pll_a = false;
DRM_DEBUG_KMS("using pipe b dpll\n");
} else {
DRM_DEBUG_KMS("no matching PLL configuration for pipe 2\n");
return -EINVAL;
}
I915_WRITE(PCH_DPLL_SEL, temp);
 
POSTING_READ(PCH_DPLL_SEL);
udelay(150);
}
 
/* The LVDS pin pair needs to be on before the DPLLs are enabled.
5486,17 → 5799,16
if (is_lvds) {
temp = I915_READ(PCH_LVDS);
temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
if (pipe == 1) {
if (HAS_PCH_CPT(dev))
temp |= PORT_TRANS_B_SEL_CPT;
else
if (HAS_PCH_CPT(dev)) {
temp &= ~PORT_TRANS_SEL_MASK;
temp |= PORT_TRANS_SEL_CPT(pipe);
} else {
if (pipe == 1)
temp |= LVDS_PIPEB_SELECT;
} else {
if (HAS_PCH_CPT(dev))
temp &= ~PORT_TRANS_SEL_MASK;
else
temp &= ~LVDS_PIPEB_SELECT;
}
 
/* set the corresponsding LVDS_BORDER bit */
temp |= dev_priv->lvds_border_bits;
/* Set the B0-B3 data pairs corresponding to whether we're going to
5534,7 → 5846,7
pipeconf &= ~PIPECONF_DITHER_TYPE_MASK;
if ((is_lvds && dev_priv->lvds_dither) || dither) {
pipeconf |= PIPECONF_DITHER_EN;
pipeconf |= PIPECONF_DITHER_TYPE_ST1;
pipeconf |= PIPECONF_DITHER_TYPE_SP;
}
if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
intel_dp_set_m_n(crtc, mode, adjusted_mode);
5546,8 → 5858,9
I915_WRITE(TRANSDPLINK_N1(pipe), 0);
}
 
if (!has_edp_encoder ||
intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
if (!intel_crtc->no_pll &&
(!has_edp_encoder ||
intel_encoder_is_pch_edp(&has_edp_encoder->base))) {
I915_WRITE(PCH_DPLL(pipe), dpll);
 
/* Wait for the clocks to stabilize. */
5563,6 → 5876,7
}
 
intel_crtc->lowfreq_avail = false;
if (!intel_crtc->no_pll) {
if (is_lvds && has_reduced_clock && i915_powersave) {
I915_WRITE(PCH_FP1(pipe), fp2);
intel_crtc->lowfreq_avail = true;
5577,6 → 5891,7
pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
}
}
}
 
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
5677,6 → 5992,172
return ret;
}
 
static bool intel_eld_uptodate(struct drm_connector *connector,
int reg_eldv, uint32_t bits_eldv,
int reg_elda, uint32_t bits_elda,
int reg_edid)
{
struct drm_i915_private *dev_priv = connector->dev->dev_private;
uint8_t *eld = connector->eld;
uint32_t i;
 
i = I915_READ(reg_eldv);
i &= bits_eldv;
 
if (!eld[0])
return !i;
 
if (!i)
return false;
 
i = I915_READ(reg_elda);
i &= ~bits_elda;
I915_WRITE(reg_elda, i);
 
for (i = 0; i < eld[2]; i++)
if (I915_READ(reg_edid) != *((uint32_t *)eld + i))
return false;
 
return true;
}
 
static void g4x_write_eld(struct drm_connector *connector,
struct drm_crtc *crtc)
{
struct drm_i915_private *dev_priv = connector->dev->dev_private;
uint8_t *eld = connector->eld;
uint32_t eldv;
uint32_t len;
uint32_t i;
 
i = I915_READ(G4X_AUD_VID_DID);
 
if (i == INTEL_AUDIO_DEVBLC || i == INTEL_AUDIO_DEVCL)
eldv = G4X_ELDV_DEVCL_DEVBLC;
else
eldv = G4X_ELDV_DEVCTG;
 
if (intel_eld_uptodate(connector,
G4X_AUD_CNTL_ST, eldv,
G4X_AUD_CNTL_ST, G4X_ELD_ADDR,
G4X_HDMIW_HDMIEDID))
return;
 
i = I915_READ(G4X_AUD_CNTL_ST);
i &= ~(eldv | G4X_ELD_ADDR);
len = (i >> 9) & 0x1f; /* ELD buffer size */
I915_WRITE(G4X_AUD_CNTL_ST, i);
 
if (!eld[0])
return;
 
len = min_t(uint8_t, eld[2], len);
DRM_DEBUG_DRIVER("ELD size %d\n", len);
for (i = 0; i < len; i++)
I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i));
 
i = I915_READ(G4X_AUD_CNTL_ST);
i |= eldv;
I915_WRITE(G4X_AUD_CNTL_ST, i);
}
 
static void ironlake_write_eld(struct drm_connector *connector,
struct drm_crtc *crtc)
{
struct drm_i915_private *dev_priv = connector->dev->dev_private;
uint8_t *eld = connector->eld;
uint32_t eldv;
uint32_t i;
int len;
int hdmiw_hdmiedid;
int aud_cntl_st;
int aud_cntrl_st2;
 
if (HAS_PCH_IBX(connector->dev)) {
hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID_A;
aud_cntl_st = IBX_AUD_CNTL_ST_A;
aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
} else {
hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID_A;
aud_cntl_st = CPT_AUD_CNTL_ST_A;
aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
}
 
i = to_intel_crtc(crtc)->pipe;
hdmiw_hdmiedid += i * 0x100;
aud_cntl_st += i * 0x100;
 
DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(i));
 
i = I915_READ(aud_cntl_st);
i = (i >> 29) & 0x3; /* DIP_Port_Select, 0x1 = PortB */
if (!i) {
DRM_DEBUG_DRIVER("Audio directed to unknown port\n");
/* operate blindly on all ports */
eldv = IBX_ELD_VALIDB;
eldv |= IBX_ELD_VALIDB << 4;
eldv |= IBX_ELD_VALIDB << 8;
} else {
DRM_DEBUG_DRIVER("ELD on port %c\n", 'A' + i);
eldv = IBX_ELD_VALIDB << ((i - 1) * 4);
}
 
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */
}
 
if (intel_eld_uptodate(connector,
aud_cntrl_st2, eldv,
aud_cntl_st, IBX_ELD_ADDRESS,
hdmiw_hdmiedid))
return;
 
i = I915_READ(aud_cntrl_st2);
i &= ~eldv;
I915_WRITE(aud_cntrl_st2, i);
 
if (!eld[0])
return;
 
i = I915_READ(aud_cntl_st);
i &= ~IBX_ELD_ADDRESS;
I915_WRITE(aud_cntl_st, i);
 
len = min_t(uint8_t, eld[2], 21); /* 84 bytes of hw ELD buffer */
DRM_DEBUG_DRIVER("ELD size %d\n", len);
for (i = 0; i < len; i++)
I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
 
i = I915_READ(aud_cntrl_st2);
i |= eldv;
I915_WRITE(aud_cntrl_st2, i);
}
 
void intel_write_eld(struct drm_encoder *encoder,
struct drm_display_mode *mode)
{
struct drm_crtc *crtc = encoder->crtc;
struct drm_connector *connector;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
connector = drm_select_eld(encoder, mode);
if (!connector)
return;
 
DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
connector->base.id,
drm_get_connector_name(connector),
connector->encoder->base.id,
drm_get_encoder_name(connector->encoder));
 
connector->eld[6] = drm_av_sync_delay(connector, mode) / 2;
 
if (dev_priv->display.write_eld)
dev_priv->display.write_eld(connector, crtc);
}
 
/** Loads the palette/gamma unit for the CRTC with the prepared values */
void intel_crtc_load_lut(struct drm_crtc *crtc)
{
6398,6 → 6879,8
intel_crtc->bpp = 24; /* default for pre-Ironlake */
 
if (HAS_PCH_SPLIT(dev)) {
if (pipe == 2 && IS_IVYBRIDGE(dev))
intel_crtc->no_pll = true;
intel_helper_funcs.prepare = ironlake_crtc_prepare;
intel_helper_funcs.commit = ironlake_crtc_commit;
} else {
6559,6 → 7042,9
/* disable all the possible outputs/crtcs before entering KMS mode */
// drm_helper_disable_unused_functions(dev);
 
if (HAS_PCH_SPLIT(dev))
ironlake_init_pch_refclk(dev);
 
LEAVE();
}
 
6573,15 → 7059,6
 
 
 
 
 
 
 
 
 
 
 
 
static const struct drm_framebuffer_funcs intel_fb_funcs = {
// .destroy = intel_user_framebuffer_destroy,
// .create_handle = intel_user_framebuffer_create_handle,
6589,7 → 7066,7
 
int intel_framebuffer_init(struct drm_device *dev,
struct intel_framebuffer *intel_fb,
struct drm_mode_fb_cmd *mode_cmd,
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_i915_gem_object *obj)
{
int ret;
6597,21 → 7074,25
if (obj->tiling_mode == I915_TILING_Y)
return -EINVAL;
 
if (mode_cmd->pitch & 63)
if (mode_cmd->pitches[0] & 63)
return -EINVAL;
 
switch (mode_cmd->bpp) {
case 8:
case 16:
/* Only pre-ILK can handle 5:5:5 */
if (mode_cmd->depth == 15 && !HAS_PCH_SPLIT(dev))
return -EINVAL;
switch (mode_cmd->pixel_format) {
case DRM_FORMAT_RGB332:
case DRM_FORMAT_RGB565:
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_ARGB2101010:
/* RGB formats are common across chipsets */
break;
 
case 24:
case 32:
case DRM_FORMAT_YUYV:
case DRM_FORMAT_UYVY:
case DRM_FORMAT_YVYU:
case DRM_FORMAT_VYUY:
break;
default:
DRM_ERROR("unsupported pixel format\n");
return -EINVAL;
}
 
6820,6 → 7301,31
dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
}
 
static bool intel_enable_rc6(struct drm_device *dev)
{
/*
* Respect the kernel parameter if it is set
*/
if (i915_enable_rc6 >= 0)
return i915_enable_rc6;
 
/*
* Disable RC6 on Ironlake
*/
if (INTEL_INFO(dev)->gen == 5)
return 0;
 
/*
* Disable rc6 on Sandybridge
*/
if (INTEL_INFO(dev)->gen == 6) {
DRM_DEBUG_DRIVER("Sandybridge: RC6 disabled\n");
return 0;
}
DRM_DEBUG_DRIVER("RC6 enabled\n");
return 1;
}
 
void gen6_enable_rps(struct drm_i915_private *dev_priv)
{
u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
6856,7 → 7362,7
I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
 
if (i915_enable_rc6)
if (intel_enable_rc6(dev_priv->dev))
rc6_mask = GEN6_RC_CTL_RC6p_ENABLE |
GEN6_RC_CTL_RC6_ENABLE;
 
6883,7 → 7389,7
I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
I915_WRITE(GEN6_RP_CONTROL,
GEN6_RP_MEDIA_TURBO |
GEN6_RP_USE_NORMAL_FREQ |
GEN6_RP_MEDIA_HW_MODE |
GEN6_RP_MEDIA_IS_GFX |
GEN6_RP_ENABLE |
GEN6_RP_UP_BUSY_AVG |
7082,6 → 7588,20
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
 
/* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
* gating disable must be set. Failure to set it results in
* flickering pixels due to Z write ordering failures after
* some amount of runtime in the Mesa "fire" demo, and Unigine
* Sanctuary and Tropics, and apparently anything else with
* alpha test or pixel discard.
*
* According to the spec, bit 11 (RCCUNIT) must also be set,
* but we didn't debug actual testcases to find it out.
*/
I915_WRITE(GEN6_UCGCTL2,
GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
 
/*
* According to the spec the following bits should be
* set in order to enable memory self-refresh and fbc:
7124,6 → 7644,10
 
I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
 
I915_WRITE(IVB_CHICKEN3,
CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
CHICKEN3_DGMG_DONE_FIX_DISABLE);
 
for_each_pipe(pipe) {
I915_WRITE(DSPCNTR(pipe),
I915_READ(DSPCNTR(pipe)) |
7291,7 → 7815,7
/* rc6 disabled by default due to repeated reports of hanging during
* boot and resume.
*/
if (!i915_enable_rc6)
if (!intel_enable_rc6(dev))
return;
 
mutex_lock(&dev->struct_mutex);
7412,6 → 7936,34
 
/* For FIFO watermark updates */
if (HAS_PCH_SPLIT(dev)) {
dev_priv->display.force_wake_get = __gen6_gt_force_wake_get;
dev_priv->display.force_wake_put = __gen6_gt_force_wake_put;
 
/* IVB configs may use multi-threaded forcewake */
if (IS_IVYBRIDGE(dev)) {
u32 ecobus;
 
/* A small trick here - if the bios hasn't configured MT forcewake,
* and if the device is in RC6, then force_wake_mt_get will not wake
* the device and the ECOBUS read will return zero. Which will be
* (correctly) interpreted by the test below as MT forcewake being
* disabled.
*/
mutex_lock(&dev->struct_mutex);
__gen6_gt_force_wake_mt_get(dev_priv);
ecobus = I915_READ_NOTRACE(ECOBUS);
__gen6_gt_force_wake_mt_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
 
if (ecobus & FORCEWAKE_MT_ENABLE) {
DRM_DEBUG_KMS("Using MT version of forcewake\n");
dev_priv->display.force_wake_get =
__gen6_gt_force_wake_mt_get;
dev_priv->display.force_wake_put =
__gen6_gt_force_wake_mt_put;
}
}
 
if (HAS_PCH_IBX(dev))
dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
else if (HAS_PCH_CPT(dev))
7427,9 → 7979,11
}
dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
dev_priv->display.write_eld = ironlake_write_eld;
} else if (IS_GEN6(dev)) {
if (SNB_READ_WM0_LATENCY()) {
dev_priv->display.update_wm = sandybridge_update_wm;
dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
} else {
DRM_DEBUG_KMS("Failed to read display plane latency. "
"Disable CxSR\n");
7437,11 → 7991,13
}
dev_priv->display.fdi_link_train = gen6_fdi_link_train;
dev_priv->display.init_clock_gating = gen6_init_clock_gating;
dev_priv->display.write_eld = ironlake_write_eld;
} else if (IS_IVYBRIDGE(dev)) {
/* FIXME: detect B0+ stepping and use auto training */
dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
if (SNB_READ_WM0_LATENCY()) {
dev_priv->display.update_wm = sandybridge_update_wm;
dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
} else {
DRM_DEBUG_KMS("Failed to read display plane latency. "
"Disable CxSR\n");
7448,7 → 8004,7
dev_priv->display.update_wm = NULL;
}
dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
 
dev_priv->display.write_eld = ironlake_write_eld;
} else
dev_priv->display.update_wm = NULL;
} else if (IS_PINEVIEW(dev)) {
7468,6 → 8024,7
dev_priv->display.update_wm = pineview_update_wm;
dev_priv->display.init_clock_gating = gen3_init_clock_gating;
} else if (IS_G4X(dev)) {
dev_priv->display.write_eld = g4x_write_eld;
dev_priv->display.update_wm = g4x_update_wm;
dev_priv->display.init_clock_gating = g4x_init_clock_gating;
} else if (IS_GEN4(dev)) {
7626,7 → 8183,7
void intel_modeset_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
int i, ret;
 
drm_mode_config_init(dev);
 
7656,6 → 8213,9
 
for (i = 0; i < dev_priv->num_pipe; i++) {
intel_crtc_init(dev, i);
ret = intel_plane_init(dev, i);
if (ret)
DRM_DEBUG_KMS("plane %d init failed: %d\n", i, ret);
}
 
/* Just disable it once at startup */
/drivers/video/drm/i915/intel_dp.c
36,7 → 36,7
#include "i915_drv.h"
#include "drm_dp_helper.h"
 
 
#define DP_RECEIVER_CAP_SIZE 0xf
#define DP_LINK_STATUS_SIZE 6
#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
 
53,12 → 53,18
int dpms_mode;
uint8_t link_bw;
uint8_t lane_count;
uint8_t dpcd[8];
uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
struct i2c_adapter adapter;
struct i2c_algo_dp_aux_data algo;
bool is_pch_edp;
uint8_t train_set[4];
uint8_t link_status[DP_LINK_STATUS_SIZE];
int panel_power_up_delay;
int panel_power_down_delay;
int panel_power_cycle_delay;
int backlight_on_delay;
int backlight_off_delay;
struct drm_display_mode *panel_fixed_mode; /* for eDP */
bool want_panel_vdd;
};
 
/**
86,6 → 92,17
return intel_dp->is_pch_edp;
}
 
/**
* is_cpu_edp - is the port on the CPU and attached to an eDP panel?
* @intel_dp: DP struct
*
* Returns true if the given DP struct corresponds to a CPU eDP port.
*/
static bool is_cpu_edp(struct intel_dp *intel_dp)
{
return is_edp(intel_dp) && !is_pch_edp(intel_dp);
}
 
static struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
{
return container_of(encoder, struct intel_dp, base.base);
136,10 → 153,7
static int
intel_dp_max_lane_count(struct intel_dp *intel_dp)
{
int max_lane_count = 4;
 
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f;
int max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f;
switch (max_lane_count) {
case 1: case 2: case 4:
break;
146,7 → 160,6
default:
max_lane_count = 4;
}
}
return max_lane_count;
}
 
175,18 → 188,36
return 162000;
}
 
/* I think this is a fiction */
/*
* The units on the numbers in the next two are... bizarre. Examples will
* make it clearer; this one parallels an example in the eDP spec.
*
* intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
*
* 270000 * 1 * 8 / 10 == 216000
*
* The actual data capacity of that configuration is 2.16Gbit/s, so the
* units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
* or equivalently, kilopixels per second - so for 1680x1050R it'd be
* 119000. At 18bpp that's 2142000 kilobits per second.
*
* Thus the strange-looking division by 10 in intel_dp_link_required, to
* get the result in decakilobits instead of kilobits.
*/
 
static int
intel_dp_link_required(struct drm_device *dev, struct intel_dp *intel_dp, int pixel_clock)
intel_dp_link_required(struct intel_dp *intel_dp, int pixel_clock, int check_bpp)
{
struct drm_crtc *crtc = intel_dp->base.base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int bpp = 24;
 
if (intel_crtc)
if (check_bpp)
bpp = check_bpp;
else if (intel_crtc)
bpp = intel_crtc->bpp;
 
return (pixel_clock * bpp + 7) / 8;
return (pixel_clock * bpp + 9) / 10;
}
 
static int
200,25 → 231,29
struct drm_display_mode *mode)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
int max_lanes = intel_dp_max_lane_count(intel_dp);
int max_rate, mode_rate;
 
if (is_edp(intel_dp) && dev_priv->panel_fixed_mode) {
if (mode->hdisplay > dev_priv->panel_fixed_mode->hdisplay)
if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
if (mode->hdisplay > intel_dp->panel_fixed_mode->hdisplay)
return MODE_PANEL;
 
if (mode->vdisplay > dev_priv->panel_fixed_mode->vdisplay)
if (mode->vdisplay > intel_dp->panel_fixed_mode->vdisplay)
return MODE_PANEL;
}
 
/* only refuse the mode on non eDP since we have seen some weird eDP panels
which are outside spec tolerances but somehow work by magic */
if (!is_edp(intel_dp) &&
(intel_dp_link_required(connector->dev, intel_dp, mode->clock)
> intel_dp_max_data_rate(max_link_clock, max_lanes)))
mode_rate = intel_dp_link_required(intel_dp, mode->clock, 0);
max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
 
if (mode_rate > max_rate) {
mode_rate = intel_dp_link_required(intel_dp,
mode->clock, 18);
if (mode_rate > max_rate)
return MODE_CLOCK_HIGH;
else
mode->private_flags |= INTEL_MODE_DP_FORCE_6BPC;
}
 
if (mode->clock < 10000)
return MODE_CLOCK_LOW;
279,6 → 314,38
}
}
 
static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
return (I915_READ(PCH_PP_STATUS) & PP_ON) != 0;
}
 
static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
return (I915_READ(PCH_PP_CONTROL) & EDP_FORCE_VDD) != 0;
}
 
static void
intel_dp_check_edp(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (!is_edp(intel_dp))
return;
if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) {
WARN(1, "eDP powered off while attempting aux channel communication.\n");
DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
I915_READ(PCH_PP_STATUS),
I915_READ(PCH_PP_CONTROL));
}
}
 
static int
intel_dp_aux_ch(struct intel_dp *intel_dp,
uint8_t *send, int send_bytes,
295,6 → 362,7
uint32_t aux_clock_divider;
int try, precharge;
 
intel_dp_check_edp(intel_dp);
/* The clock divider is based off the hrawclk,
* and would like to run at 2MHz. So, take the
* hrawclk value and divide by 2 and use that
302,9 → 370,9
* Note that PCH attached eDP panels should use a 125MHz input
* clock divider.
*/
if (is_edp(intel_dp) && !is_pch_edp(intel_dp)) {
if (IS_GEN6(dev))
aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */
if (is_cpu_edp(intel_dp)) {
if (IS_GEN6(dev) || IS_GEN7(dev))
aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */
else
aux_clock_divider = 225; /* eDP input clock at 450Mhz */
} else if (HAS_PCH_SPLIT(dev))
408,6 → 476,7
int msg_bytes;
uint8_t ack;
 
intel_dp_check_edp(intel_dp);
if (send_bytes > 16)
return -1;
msg[0] = AUX_NATIVE_WRITE << 4;
450,6 → 519,7
uint8_t ack;
int ret;
 
intel_dp_check_edp(intel_dp);
msg[0] = AUX_NATIVE_READ << 4;
msg[1] = address >> 8;
msg[2] = address & 0xff;
493,6 → 563,7
int reply_bytes;
int ret;
 
intel_dp_check_edp(intel_dp);
/* Set up the command byte */
if (mode & MODE_I2C_READ)
msg[0] = AUX_I2C_READ << 4;
573,10 → 644,15
return -EREMOTEIO;
}
 
static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp);
static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
 
static int
intel_dp_i2c_init(struct intel_dp *intel_dp,
struct intel_connector *intel_connector, const char *name)
{
int ret;
 
DRM_DEBUG_KMS("i2c_init %s\n", name);
intel_dp->algo.running = false;
intel_dp->algo.address = 0;
590,7 → 666,10
intel_dp->adapter.algo_data = &intel_dp->algo;
intel_dp->adapter.dev.parent = &intel_connector->base.kdev;
 
return i2c_dp_aux_add_bus(&intel_dp->adapter);
ironlake_edp_panel_vdd_on(intel_dp);
ret = i2c_dp_aux_add_bus(&intel_dp->adapter);
ironlake_edp_panel_vdd_off(intel_dp, false);
return ret;
}
 
static bool
598,15 → 677,15
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
int lane_count, clock;
int max_lane_count = intel_dp_max_lane_count(intel_dp);
int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
int bpp = mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 0;
static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
 
if (is_edp(intel_dp) && dev_priv->panel_fixed_mode) {
intel_fixed_panel_mode(dev_priv->panel_fixed_mode, adjusted_mode);
if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
intel_fixed_panel_mode(intel_dp->panel_fixed_mode, adjusted_mode);
intel_pch_panel_fitting(dev, DRM_MODE_SCALE_FULLSCREEN,
mode, adjusted_mode);
/*
613,7 → 692,7
* the mode->clock is used to calculate the Data&Link M/N
* of the pipe. For the eDP the fixed clock should be used.
*/
mode->clock = dev_priv->panel_fixed_mode->clock;
mode->clock = intel_dp->panel_fixed_mode->clock;
}
 
for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
620,7 → 699,7
for (clock = 0; clock <= max_clock; clock++) {
int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
 
if (intel_dp_link_required(encoder->dev, intel_dp, mode->clock)
if (intel_dp_link_required(intel_dp, mode->clock, bpp)
<= link_avail) {
intel_dp->link_bw = bws[clock];
intel_dp->lane_count = lane_count;
634,19 → 713,6
}
}
 
if (is_edp(intel_dp)) {
/* okay we failed just pick the highest */
intel_dp->lane_count = max_lane_count;
intel_dp->link_bw = bws[max_clock];
adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
DRM_DEBUG_KMS("Force picking display port link bw %02x lane "
"count %d clock %d\n",
intel_dp->link_bw, intel_dp->lane_count,
adjusted_mode->clock);
 
return true;
}
 
return false;
}
 
706,12 → 772,11
continue;
 
intel_dp = enc_to_intel_dp(encoder);
if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT) {
if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT ||
intel_dp->base.type == INTEL_OUTPUT_EDP)
{
lane_count = intel_dp->lane_count;
break;
} else if (is_edp(intel_dp)) {
lane_count = dev_priv->edp.lanes;
break;
}
}
 
740,28 → 805,54
}
}
 
static void ironlake_edp_pll_on(struct drm_encoder *encoder);
static void ironlake_edp_pll_off(struct drm_encoder *encoder);
 
static void
intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct drm_crtc *crtc = intel_dp->base.base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
intel_dp->DP = DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
intel_dp->DP |= intel_dp->color_range;
/* Turn on the eDP PLL if needed */
if (is_edp(intel_dp)) {
if (!is_pch_edp(intel_dp))
ironlake_edp_pll_on(encoder);
else
ironlake_edp_pll_off(encoder);
}
 
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
intel_dp->DP |= DP_SYNC_HS_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
intel_dp->DP |= DP_SYNC_VS_HIGH;
/*
* There are four kinds of DP registers:
*
* IBX PCH
* SNB CPU
* IVB CPU
* CPT PCH
*
* IBX PCH and CPU are the same for almost everything,
* except that the CPU DP PLL is configured in this
* register
*
* CPT PCH is quite different, having many bits moved
* to the TRANS_DP_CTL register instead. That
* configuration happens (oddly) in ironlake_pch_enable
*/
 
if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
else
intel_dp->DP |= DP_LINK_TRAIN_OFF;
/* Preserve the BIOS-computed detected bit. This is
* supposed to be read-only.
*/
intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
 
/* Handle DP bits in common between all three register formats */
 
intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
 
switch (intel_dp->lane_count) {
case 1:
intel_dp->DP |= DP_PORT_WIDTH_1;
773,14 → 864,16
intel_dp->DP |= DP_PORT_WIDTH_4;
break;
}
if (intel_dp->has_audio)
if (intel_dp->has_audio) {
DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
pipe_name(intel_crtc->pipe));
intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
 
intel_write_eld(encoder, adjusted_mode);
}
memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
intel_dp->link_configuration[0] = intel_dp->link_bw;
intel_dp->link_configuration[1] = intel_dp->lane_count;
intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B;
 
/*
* Check for DPCD version > 1.1 and enhanced framing support
*/
787,14 → 880,44
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
(intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
intel_dp->DP |= DP_ENHANCED_FRAMING;
}
 
/* CPT DP's pipe select is decided in TRANS_DP_CTL */
if (intel_crtc->pipe == 1 && !HAS_PCH_CPT(dev))
/* Split out the IBX/CPU vs CPT settings */
 
if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) {
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
intel_dp->DP |= DP_SYNC_HS_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
intel_dp->DP |= DP_SYNC_VS_HIGH;
intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
 
if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
intel_dp->DP |= DP_ENHANCED_FRAMING;
 
intel_dp->DP |= intel_crtc->pipe << 29;
 
/* don't miss out required setting for eDP */
intel_dp->DP |= DP_PLL_ENABLE;
if (adjusted_mode->clock < 200000)
intel_dp->DP |= DP_PLL_FREQ_160MHZ;
else
intel_dp->DP |= DP_PLL_FREQ_270MHZ;
} else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
intel_dp->DP |= intel_dp->color_range;
 
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
intel_dp->DP |= DP_SYNC_HS_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
intel_dp->DP |= DP_SYNC_VS_HIGH;
intel_dp->DP |= DP_LINK_TRAIN_OFF;
 
if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
intel_dp->DP |= DP_ENHANCED_FRAMING;
 
if (intel_crtc->pipe == 1)
intel_dp->DP |= DP_PIPEB_SELECT;
 
if (is_edp(intel_dp) && !is_pch_edp(intel_dp)) {
if (is_cpu_edp(intel_dp)) {
/* don't miss out required setting for eDP */
intel_dp->DP |= DP_PLL_ENABLE;
if (adjusted_mode->clock < 200000)
802,8 → 925,71
else
intel_dp->DP |= DP_PLL_FREQ_270MHZ;
}
} else {
intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
}
}
 
#define IDLE_ON_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
#define IDLE_ON_VALUE (PP_ON | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
 
#define IDLE_OFF_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
#define IDLE_OFF_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
 
#define IDLE_CYCLE_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
#define IDLE_CYCLE_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
 
static void ironlake_wait_panel_status(struct intel_dp *intel_dp,
u32 mask,
u32 value)
{
struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
mask, value,
I915_READ(PCH_PP_STATUS),
I915_READ(PCH_PP_CONTROL));
 
if (_wait_for((I915_READ(PCH_PP_STATUS) & mask) == value, 5000, 10)) {
DRM_ERROR("Panel status timeout: status %08x control %08x\n",
I915_READ(PCH_PP_STATUS),
I915_READ(PCH_PP_CONTROL));
}
}
 
static void ironlake_wait_panel_on(struct intel_dp *intel_dp)
{
DRM_DEBUG_KMS("Wait for panel power on\n");
ironlake_wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
}
 
static void ironlake_wait_panel_off(struct intel_dp *intel_dp)
{
DRM_DEBUG_KMS("Wait for panel power off time\n");
ironlake_wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
}
 
static void ironlake_wait_panel_power_cycle(struct intel_dp *intel_dp)
{
DRM_DEBUG_KMS("Wait for panel power cycle\n");
ironlake_wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
}
 
 
/* Read the current pp_control value, unlocking the register if it
* is locked
*/
 
static u32 ironlake_get_pp_control(struct drm_i915_private *dev_priv)
{
u32 control = I915_READ(PCH_PP_CONTROL);
 
control &= ~PANEL_UNLOCK_MASK;
control |= PANEL_UNLOCK_REGS;
return control;
}
 
static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp->base.base.dev;
810,98 → 996,155
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
 
/*
* If the panel wasn't on, make sure there's not a currently
* active PP sequence before enabling AUX VDD.
*/
if (!(I915_READ(PCH_PP_STATUS) & PP_ON))
msleep(dev_priv->panel_t3);
if (!is_edp(intel_dp))
return;
DRM_DEBUG_KMS("Turn eDP VDD on\n");
 
pp = I915_READ(PCH_PP_CONTROL);
WARN(intel_dp->want_panel_vdd,
"eDP VDD already requested on\n");
 
intel_dp->want_panel_vdd = true;
 
if (ironlake_edp_have_panel_vdd(intel_dp)) {
DRM_DEBUG_KMS("eDP VDD already on\n");
return;
}
 
if (!ironlake_edp_have_panel_power(intel_dp))
ironlake_wait_panel_power_cycle(intel_dp);
 
pp = ironlake_get_pp_control(dev_priv);
pp |= EDP_FORCE_VDD;
I915_WRITE(PCH_PP_CONTROL, pp);
POSTING_READ(PCH_PP_CONTROL);
DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n",
I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL));
 
/*
* If the panel wasn't on, delay before accessing aux channel
*/
if (!ironlake_edp_have_panel_power(intel_dp)) {
DRM_DEBUG_KMS("eDP was not running\n");
msleep(intel_dp->panel_power_up_delay);
}
}
 
static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp)
static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
 
pp = I915_READ(PCH_PP_CONTROL);
if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) {
pp = ironlake_get_pp_control(dev_priv);
pp &= ~EDP_FORCE_VDD;
I915_WRITE(PCH_PP_CONTROL, pp);
POSTING_READ(PCH_PP_CONTROL);
 
/* Make sure sequencer is idle before allowing subsequent activity */
msleep(dev_priv->panel_t12);
DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n",
I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL));
 
msleep(intel_dp->panel_power_down_delay);
}
}
 
/* Returns true if the panel was already on when called */
static bool ironlake_edp_panel_on (struct intel_dp *intel_dp)
 
static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
{
if (!is_edp(intel_dp))
return;
 
DRM_DEBUG_KMS("Turn eDP VDD off %d\n", intel_dp->want_panel_vdd);
WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on");
 
intel_dp->want_panel_vdd = false;
 
if (sync) {
ironlake_panel_vdd_off_sync(intel_dp);
} else {
/*
* Queue the timer to fire a long
* time from now (relative to the power down delay)
* to keep the panel power up across a sequence of operations
*/
// schedule_delayed_work(&intel_dp->panel_vdd_work,
// msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
}
}
 
static void ironlake_edp_panel_on(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp, idle_on_mask = PP_ON | PP_SEQUENCE_STATE_ON_IDLE;
u32 pp;
 
if (I915_READ(PCH_PP_STATUS) & PP_ON)
return true;
if (!is_edp(intel_dp))
return;
 
pp = I915_READ(PCH_PP_CONTROL);
DRM_DEBUG_KMS("Turn eDP power on\n");
 
if (ironlake_edp_have_panel_power(intel_dp)) {
DRM_DEBUG_KMS("eDP power already on\n");
return;
}
 
ironlake_wait_panel_power_cycle(intel_dp);
 
pp = ironlake_get_pp_control(dev_priv);
if (IS_GEN5(dev)) {
/* ILK workaround: disable reset around power sequence */
pp &= ~PANEL_POWER_RESET;
I915_WRITE(PCH_PP_CONTROL, pp);
POSTING_READ(PCH_PP_CONTROL);
}
 
pp |= PANEL_UNLOCK_REGS | POWER_TARGET_ON;
pp |= POWER_TARGET_ON;
if (!IS_GEN5(dev))
pp |= PANEL_POWER_RESET;
 
I915_WRITE(PCH_PP_CONTROL, pp);
POSTING_READ(PCH_PP_CONTROL);
 
if (wait_for((I915_READ(PCH_PP_STATUS) & idle_on_mask) == idle_on_mask,
5000))
DRM_ERROR("panel on wait timed out: 0x%08x\n",
I915_READ(PCH_PP_STATUS));
ironlake_wait_panel_on(intel_dp);
 
if (IS_GEN5(dev)) {
pp |= PANEL_POWER_RESET; /* restore panel reset bit */
I915_WRITE(PCH_PP_CONTROL, pp);
POSTING_READ(PCH_PP_CONTROL);
 
return false;
}
}
 
static void ironlake_edp_panel_off (struct drm_device *dev)
static void ironlake_edp_panel_off(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp, idle_off_mask = PP_ON | PP_SEQUENCE_MASK |
PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK;
u32 pp;
 
pp = I915_READ(PCH_PP_CONTROL);
if (!is_edp(intel_dp))
return;
 
/* ILK workaround: disable reset around power sequence */
pp &= ~PANEL_POWER_RESET;
I915_WRITE(PCH_PP_CONTROL, pp);
POSTING_READ(PCH_PP_CONTROL);
DRM_DEBUG_KMS("Turn eDP power off\n");
 
pp &= ~POWER_TARGET_ON;
WARN(intel_dp->want_panel_vdd, "Cannot turn power off while VDD is on\n");
 
pp = ironlake_get_pp_control(dev_priv);
pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
I915_WRITE(PCH_PP_CONTROL, pp);
POSTING_READ(PCH_PP_CONTROL);
 
if (wait_for((I915_READ(PCH_PP_STATUS) & idle_off_mask) == 0, 5000))
DRM_ERROR("panel off wait timed out: 0x%08x\n",
I915_READ(PCH_PP_STATUS));
 
pp |= PANEL_POWER_RESET; /* restore panel reset bit */
I915_WRITE(PCH_PP_CONTROL, pp);
POSTING_READ(PCH_PP_CONTROL);
ironlake_wait_panel_off(intel_dp);
}
 
static void ironlake_edp_backlight_on (struct drm_device *dev)
static void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
 
if (!is_edp(intel_dp))
return;
 
DRM_DEBUG_KMS("\n");
/*
* If we enable the backlight right away following a panel power
909,21 → 1152,28
* link. So delay a bit to make sure the image is solid before
* allowing it to appear.
*/
msleep(300);
pp = I915_READ(PCH_PP_CONTROL);
msleep(intel_dp->backlight_on_delay);
pp = ironlake_get_pp_control(dev_priv);
pp |= EDP_BLC_ENABLE;
I915_WRITE(PCH_PP_CONTROL, pp);
POSTING_READ(PCH_PP_CONTROL);
}
 
static void ironlake_edp_backlight_off (struct drm_device *dev)
static void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
 
if (!is_edp(intel_dp))
return;
 
DRM_DEBUG_KMS("\n");
pp = I915_READ(PCH_PP_CONTROL);
pp = ironlake_get_pp_control(dev_priv);
pp &= ~EDP_BLC_ENABLE;
I915_WRITE(PCH_PP_CONTROL, pp);
POSTING_READ(PCH_PP_CONTROL);
msleep(intel_dp->backlight_off_delay);
}
 
static void ironlake_edp_pll_on(struct drm_encoder *encoder)
986,43 → 1236,39
static void intel_dp_prepare(struct drm_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct drm_device *dev = encoder->dev;
 
ironlake_edp_backlight_off(intel_dp);
ironlake_edp_panel_off(intel_dp);
 
/* Wake up the sink first */
ironlake_edp_panel_vdd_on(intel_dp);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
intel_dp_link_down(intel_dp);
ironlake_edp_panel_vdd_off(intel_dp, false);
 
if (is_edp(intel_dp)) {
ironlake_edp_backlight_off(dev);
ironlake_edp_panel_off(dev);
if (!is_pch_edp(intel_dp))
ironlake_edp_pll_on(encoder);
else
ironlake_edp_pll_off(encoder);
/* Make sure the panel is off before trying to
* change the mode
*/
}
intel_dp_link_down(intel_dp);
}
 
static void intel_dp_commit(struct drm_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct drm_device *dev = encoder->dev;
struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc);
 
if (is_edp(intel_dp))
ironlake_edp_panel_vdd_on(intel_dp);
 
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
intel_dp_start_link_train(intel_dp);
 
if (is_edp(intel_dp)) {
ironlake_edp_panel_on(intel_dp);
ironlake_edp_panel_vdd_off(intel_dp);
}
 
ironlake_edp_panel_vdd_off(intel_dp, true);
intel_dp_complete_link_train(intel_dp);
ironlake_edp_backlight_on(intel_dp);
 
if (is_edp(intel_dp))
ironlake_edp_backlight_on(dev);
intel_dp->dpms_mode = DRM_MODE_DPMS_ON;
 
intel_dp->dpms_mode = DRM_MODE_DPMS_ON;
if (HAS_PCH_CPT(dev))
intel_cpt_verify_modeset(dev, intel_crtc->pipe);
}
 
static void
1034,29 → 1280,31
uint32_t dp_reg = I915_READ(intel_dp->output_reg);
 
if (mode != DRM_MODE_DPMS_ON) {
if (is_edp(intel_dp))
ironlake_edp_backlight_off(dev);
ironlake_edp_backlight_off(intel_dp);
ironlake_edp_panel_off(intel_dp);
 
ironlake_edp_panel_vdd_on(intel_dp);
intel_dp_sink_dpms(intel_dp, mode);
intel_dp_link_down(intel_dp);
if (is_edp(intel_dp))
ironlake_edp_panel_off(dev);
if (is_edp(intel_dp) && !is_pch_edp(intel_dp))
ironlake_edp_panel_vdd_off(intel_dp, false);
 
if (is_cpu_edp(intel_dp))
ironlake_edp_pll_off(encoder);
} else {
if (is_edp(intel_dp))
if (is_cpu_edp(intel_dp))
ironlake_edp_pll_on(encoder);
 
ironlake_edp_panel_vdd_on(intel_dp);
intel_dp_sink_dpms(intel_dp, mode);
if (!(dp_reg & DP_PORT_EN)) {
intel_dp_start_link_train(intel_dp);
if (is_edp(intel_dp)) {
ironlake_edp_panel_on(intel_dp);
ironlake_edp_panel_vdd_off(intel_dp);
}
ironlake_edp_panel_vdd_off(intel_dp, true);
intel_dp_complete_link_train(intel_dp);
} else
ironlake_edp_panel_vdd_off(intel_dp, false);
ironlake_edp_backlight_on(intel_dp);
}
if (is_edp(intel_dp))
ironlake_edp_backlight_on(dev);
}
intel_dp->dpms_mode = mode;
}
 
1090,11 → 1338,11
* link status information
*/
static bool
intel_dp_get_link_status(struct intel_dp *intel_dp)
intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
{
return intel_dp_aux_native_read_retry(intel_dp,
DP_LANE0_1_STATUS,
intel_dp->link_status,
link_status,
DP_LINK_STATUS_SIZE);
}
 
1106,27 → 1354,25
}
 
static uint8_t
intel_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE],
intel_get_adjust_request_voltage(uint8_t adjust_request[2],
int lane)
{
int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
int s = ((lane & 1) ?
DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
uint8_t l = intel_dp_link_status(link_status, i);
uint8_t l = adjust_request[lane>>1];
 
return ((l >> s) & 3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
}
 
static uint8_t
intel_get_adjust_request_pre_emphasis(uint8_t link_status[DP_LINK_STATUS_SIZE],
intel_get_adjust_request_pre_emphasis(uint8_t adjust_request[2],
int lane)
{
int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
int s = ((lane & 1) ?
DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
uint8_t l = intel_dp_link_status(link_status, i);
uint8_t l = adjust_request[lane>>1];
 
return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
}
1148,16 → 1394,41
* These are source-specific values; current Intel hardware supports
* a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
*/
#define I830_DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_800
 
static uint8_t
intel_dp_pre_emphasis_max(uint8_t voltage_swing)
intel_dp_voltage_max(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp->base.base.dev;
 
if (IS_GEN7(dev) && is_cpu_edp(intel_dp))
return DP_TRAIN_VOLTAGE_SWING_800;
else if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
return DP_TRAIN_VOLTAGE_SWING_1200;
else
return DP_TRAIN_VOLTAGE_SWING_800;
}
 
static uint8_t
intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
{
struct drm_device *dev = intel_dp->base.base.dev;
 
if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_400:
return DP_TRAIN_PRE_EMPHASIS_6;
case DP_TRAIN_VOLTAGE_SWING_600:
case DP_TRAIN_VOLTAGE_SWING_800:
return DP_TRAIN_PRE_EMPHASIS_3_5;
default:
return DP_TRAIN_PRE_EMPHASIS_0;
}
} else {
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_400:
return DP_TRAIN_PRE_EMPHASIS_6;
case DP_TRAIN_VOLTAGE_SWING_600:
return DP_TRAIN_PRE_EMPHASIS_6;
case DP_TRAIN_VOLTAGE_SWING_800:
return DP_TRAIN_PRE_EMPHASIS_3_5;
case DP_TRAIN_VOLTAGE_SWING_1200:
1165,17 → 1436,21
return DP_TRAIN_PRE_EMPHASIS_0;
}
}
}
 
static void
intel_get_adjust_train(struct intel_dp *intel_dp)
intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
{
uint8_t v = 0;
uint8_t p = 0;
int lane;
uint8_t *adjust_request = link_status + (DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS);
uint8_t voltage_max;
uint8_t preemph_max;
 
for (lane = 0; lane < intel_dp->lane_count; lane++) {
uint8_t this_v = intel_get_adjust_request_voltage(intel_dp->link_status, lane);
uint8_t this_p = intel_get_adjust_request_pre_emphasis(intel_dp->link_status, lane);
uint8_t this_v = intel_get_adjust_request_voltage(adjust_request, lane);
uint8_t this_p = intel_get_adjust_request_pre_emphasis(adjust_request, lane);
 
if (this_v > v)
v = this_v;
1183,11 → 1458,13
p = this_p;
}
 
if (v >= I830_DP_VOLTAGE_MAX)
v = I830_DP_VOLTAGE_MAX | DP_TRAIN_MAX_SWING_REACHED;
voltage_max = intel_dp_voltage_max(intel_dp);
if (v >= voltage_max)
v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
 
if (p >= intel_dp_pre_emphasis_max(v))
p = intel_dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
if (p >= preemph_max)
p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
 
for (lane = 0; lane < 4; lane++)
intel_dp->train_set[lane] = v | p;
1194,7 → 1471,7
}
 
static uint32_t
intel_dp_signal_levels(uint8_t train_set, int lane_count)
intel_dp_signal_levels(uint8_t train_set)
{
uint32_t signal_levels = 0;
 
1259,13 → 1536,43
}
}
 
/* Gen7's DP voltage swing and pre-emphasis control */
static uint32_t
intel_gen7_edp_signal_levels(uint8_t train_set)
{
int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
DP_TRAIN_PRE_EMPHASIS_MASK);
switch (signal_levels) {
case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
return EDP_LINK_TRAIN_400MV_0DB_IVB;
case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
return EDP_LINK_TRAIN_400MV_6DB_IVB;
 
case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
return EDP_LINK_TRAIN_600MV_0DB_IVB;
case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
 
case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
return EDP_LINK_TRAIN_800MV_0DB_IVB;
case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
 
default:
DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
"0x%x\n", signal_levels);
return EDP_LINK_TRAIN_500MV_0DB_IVB;
}
}
 
static uint8_t
intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
int lane)
{
int i = DP_LANE0_1_STATUS + (lane >> 1);
int s = (lane & 1) * 4;
uint8_t l = intel_dp_link_status(link_status, i);
uint8_t l = link_status[lane>>1];
 
return (l >> s) & 0xf;
}
1290,18 → 1597,18
DP_LANE_CHANNEL_EQ_DONE|\
DP_LANE_SYMBOL_LOCKED)
static bool
intel_channel_eq_ok(struct intel_dp *intel_dp)
intel_channel_eq_ok(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
{
uint8_t lane_align;
uint8_t lane_status;
int lane;
 
lane_align = intel_dp_link_status(intel_dp->link_status,
lane_align = intel_dp_link_status(link_status,
DP_LANE_ALIGN_STATUS_UPDATED);
if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
return false;
for (lane = 0; lane < intel_dp->lane_count; lane++) {
lane_status = intel_get_lane_status(intel_dp->link_status, lane);
lane_status = intel_get_lane_status(link_status, lane);
if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS)
return false;
}
1326,8 → 1633,9
 
ret = intel_dp_aux_native_write(intel_dp,
DP_TRAINING_LANE0_SET,
intel_dp->train_set, 4);
if (ret != 4)
intel_dp->train_set,
intel_dp->lane_count);
if (ret != intel_dp->lane_count)
return false;
 
return true;
1343,7 → 1651,7
int i;
uint8_t voltage;
bool clock_recovery = false;
int tries;
int voltage_tries, loop_tries;
u32 reg;
uint32_t DP = intel_dp->DP;
 
1364,26 → 1672,35
DP_LINK_CONFIGURATION_SIZE);
 
DP |= DP_PORT_EN;
if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
 
if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
DP &= ~DP_LINK_TRAIN_MASK_CPT;
else
DP &= ~DP_LINK_TRAIN_MASK;
memset(intel_dp->train_set, 0, 4);
voltage = 0xff;
tries = 0;
voltage_tries = 0;
loop_tries = 0;
clock_recovery = false;
for (;;) {
/* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
uint8_t link_status[DP_LINK_STATUS_SIZE];
uint32_t signal_levels;
if (IS_GEN6(dev) && is_edp(intel_dp)) {
 
 
if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
} else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
} else {
signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count);
signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]);
DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n", signal_levels);
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
}
 
if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
reg = DP | DP_LINK_TRAIN_PAT_1_CPT;
else
reg = DP | DP_LINK_TRAIN_PAT_1;
1395,10 → 1712,13
/* Set training pattern 1 */
 
udelay(100);
if (!intel_dp_get_link_status(intel_dp))
if (!intel_dp_get_link_status(intel_dp, link_status)) {
DRM_ERROR("failed to get link status\n");
break;
}
 
if (intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) {
if (intel_clock_recovery_ok(link_status, intel_dp->lane_count)) {
DRM_DEBUG_KMS("clock recovery OK\n");
clock_recovery = true;
break;
}
1407,20 → 1727,30
for (i = 0; i < intel_dp->lane_count; i++)
if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
break;
if (i == intel_dp->lane_count)
if (i == intel_dp->lane_count) {
++loop_tries;
if (loop_tries == 5) {
DRM_DEBUG_KMS("too many full retries, give up\n");
break;
}
memset(intel_dp->train_set, 0, 4);
voltage_tries = 0;
continue;
}
 
/* Check to see if we've tried the same voltage 5 times */
if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
++tries;
if (tries == 5)
++voltage_tries;
if (voltage_tries == 5) {
DRM_DEBUG_KMS("too many voltage retries, give up\n");
break;
}
} else
tries = 0;
voltage_tries = 0;
voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
 
/* Compute new intel_dp->train_set as requested by target */
intel_get_adjust_train(intel_dp);
intel_get_adjust_train(intel_dp, link_status);
}
 
intel_dp->DP = DP;
1443,6 → 1773,7
for (;;) {
/* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
uint32_t signal_levels;
uint8_t link_status[DP_LINK_STATUS_SIZE];
 
if (cr_tries > 5) {
DRM_ERROR("failed to train DP, aborting\n");
1450,15 → 1781,18
break;
}
 
if (IS_GEN6(dev) && is_edp(intel_dp)) {
if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
} else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
} else {
signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count);
signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]);
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
}
 
if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
reg = DP | DP_LINK_TRAIN_PAT_2_CPT;
else
reg = DP | DP_LINK_TRAIN_PAT_2;
1470,17 → 1804,17
break;
 
udelay(400);
if (!intel_dp_get_link_status(intel_dp))
if (!intel_dp_get_link_status(intel_dp, link_status))
break;
 
/* Make sure clock is still ok */
if (!intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) {
if (!intel_clock_recovery_ok(link_status, intel_dp->lane_count)) {
intel_dp_start_link_train(intel_dp);
cr_tries++;
continue;
}
 
if (intel_channel_eq_ok(intel_dp)) {
if (intel_channel_eq_ok(intel_dp, link_status)) {
channel_eq = true;
break;
}
1495,11 → 1829,11
}
 
/* Compute new intel_dp->train_set as requested by target */
intel_get_adjust_train(intel_dp);
intel_get_adjust_train(intel_dp, link_status);
++tries;
}
 
if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
reg = DP | DP_LINK_TRAIN_OFF_CPT;
else
reg = DP | DP_LINK_TRAIN_OFF;
1529,7 → 1863,7
udelay(100);
}
 
if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) {
if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
DP &= ~DP_LINK_TRAIN_MASK_CPT;
I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
} else {
1540,8 → 1874,12
 
msleep(17);
 
if (is_edp(intel_dp))
if (is_edp(intel_dp)) {
if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
DP |= DP_LINK_TRAIN_OFF_CPT;
else
DP |= DP_LINK_TRAIN_OFF;
}
 
if (!HAS_PCH_CPT(dev) &&
I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
1576,8 → 1914,10
intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe);
}
 
DP &= ~DP_AUDIO_OUTPUT_ENABLE;
I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
POSTING_READ(intel_dp->output_reg);
msleep(intel_dp->panel_power_down_delay);
}
 
static bool
1592,6 → 1932,27
return false;
}
 
static bool
intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
{
int ret;
 
ret = intel_dp_aux_native_read_retry(intel_dp,
DP_DEVICE_SERVICE_IRQ_VECTOR,
sink_irq_vector, 1);
if (!ret)
return false;
 
return true;
}
 
static void
intel_dp_handle_test_request(struct intel_dp *intel_dp)
{
/* NAK by default */
intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_ACK);
}
 
/*
* According to DP spec
* 5.1.2:
1604,6 → 1965,9
static void
intel_dp_check_link_status(struct intel_dp *intel_dp)
{
u8 sink_irq_vector;
u8 link_status[DP_LINK_STATUS_SIZE];
 
if (intel_dp->dpms_mode != DRM_MODE_DPMS_ON)
return;
 
1611,7 → 1975,7
return;
 
/* Try to read receiver status if the link appears to be up */
if (!intel_dp_get_link_status(intel_dp)) {
if (!intel_dp_get_link_status(intel_dp, link_status)) {
intel_dp_link_down(intel_dp);
return;
}
1622,7 → 1986,21
return;
}
 
if (!intel_channel_eq_ok(intel_dp)) {
/* Try to read the source of the interrupt */
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
/* Clear interrupt source */
intel_dp_aux_native_write_1(intel_dp,
DP_DEVICE_SERVICE_IRQ_VECTOR,
sink_irq_vector);
 
if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
intel_dp_handle_test_request(intel_dp);
if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
}
 
if (!intel_channel_eq_ok(intel_dp, link_status)) {
DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
drm_get_encoder_name(&intel_dp->base.base));
intel_dp_start_link_train(intel_dp);
1683,6 → 2061,31
return intel_dp_detect_dpcd(intel_dp);
}
 
static struct edid *
intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct edid *edid;
 
ironlake_edp_panel_vdd_on(intel_dp);
edid = drm_get_edid(connector, adapter);
ironlake_edp_panel_vdd_off(intel_dp, false);
return edid;
}
 
static int
intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *adapter)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
int ret;
 
ironlake_edp_panel_vdd_on(intel_dp);
ret = intel_ddc_get_modes(connector, adapter);
ironlake_edp_panel_vdd_off(intel_dp, false);
return ret;
}
 
 
/**
* Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection.
*
1736,28 → 2139,36
/* We should parse the EDID data and find out if it has an audio sink
*/
 
ret = intel_ddc_get_modes(connector, &intel_dp->adapter);
ret = intel_dp_get_edid_modes(connector, &intel_dp->adapter);
if (ret) {
if (is_edp(intel_dp) && !dev_priv->panel_fixed_mode) {
if (is_edp(intel_dp) && !intel_dp->panel_fixed_mode) {
struct drm_display_mode *newmode;
list_for_each_entry(newmode, &connector->probed_modes,
head) {
if (newmode->type & DRM_MODE_TYPE_PREFERRED) {
dev_priv->panel_fixed_mode =
if ((newmode->type & DRM_MODE_TYPE_PREFERRED)) {
intel_dp->panel_fixed_mode =
drm_mode_duplicate(dev, newmode);
break;
}
}
}
 
return ret;
}
 
/* if eDP has no EDID, try to use fixed panel mode from VBT */
if (is_edp(intel_dp)) {
if (dev_priv->panel_fixed_mode != NULL) {
/* initialize panel mode from VBT if available for eDP */
if (intel_dp->panel_fixed_mode == NULL && dev_priv->lfp_lvds_vbt_mode != NULL) {
intel_dp->panel_fixed_mode =
drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
if (intel_dp->panel_fixed_mode) {
intel_dp->panel_fixed_mode->type |=
DRM_MODE_TYPE_PREFERRED;
}
}
if (intel_dp->panel_fixed_mode) {
struct drm_display_mode *mode;
mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode);
mode = drm_mode_duplicate(dev, intel_dp->panel_fixed_mode);
drm_mode_probed_add(connector, mode);
return 1;
}
1844,6 → 2255,10
 
// i2c_del_adapter(&intel_dp->adapter);
drm_encoder_cleanup(encoder);
if (is_edp(intel_dp)) {
// cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
ironlake_panel_vdd_off_sync(intel_dp);
}
kfree(intel_dp);
}
 
1896,7 → 2311,8
continue;
 
intel_dp = enc_to_intel_dp(encoder);
if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT)
if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT ||
intel_dp->base.type == INTEL_OUTPUT_EDP)
return intel_dp->output_reg;
}
 
1980,10 → 2396,13
else if (output_reg == DP_D || output_reg == PCH_DP_D)
intel_encoder->clone_mask = (1 << INTEL_DP_D_CLONE_BIT);
 
if (is_edp(intel_dp))
if (is_edp(intel_dp)) {
intel_encoder->clone_mask = (1 << INTEL_EDP_CLONE_BIT);
// INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
// ironlake_panel_vdd_work);
}
 
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
connector->interlace_allowed = true;
connector->doublescan_allowed = 0;
 
2019,25 → 2438,59
break;
}
 
intel_dp_i2c_init(intel_dp, intel_connector, name);
 
/* Cache some DPCD data in the eDP case */
if (is_edp(intel_dp)) {
bool ret;
u32 pp_on, pp_div;
struct edp_power_seq cur, vbt;
u32 pp_on, pp_off, pp_div;
 
pp_on = I915_READ(PCH_PP_ON_DELAYS);
pp_off = I915_READ(PCH_PP_OFF_DELAYS);
pp_div = I915_READ(PCH_PP_DIVISOR);
 
/* Get T3 & T12 values (note: VESA not bspec terminology) */
dev_priv->panel_t3 = (pp_on & 0x1fff0000) >> 16;
dev_priv->panel_t3 /= 10; /* t3 in 100us units */
dev_priv->panel_t12 = pp_div & 0xf;
dev_priv->panel_t12 *= 100; /* t12 in 100ms units */
/* Pull timing values out of registers */
cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
PANEL_POWER_UP_DELAY_SHIFT;
 
cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
PANEL_LIGHT_ON_DELAY_SHIFT;
 
cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
PANEL_LIGHT_OFF_DELAY_SHIFT;
 
cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
PANEL_POWER_DOWN_DELAY_SHIFT;
 
cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
 
DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
 
vbt = dev_priv->edp.pps;
 
DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
 
#define get_delay(field) ((max(cur.field, vbt.field) + 9) / 10)
 
intel_dp->panel_power_up_delay = get_delay(t1_t3);
intel_dp->backlight_on_delay = get_delay(t8);
intel_dp->backlight_off_delay = get_delay(t9);
intel_dp->panel_power_down_delay = get_delay(t10);
intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
 
DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
intel_dp->panel_power_cycle_delay);
 
DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
 
ironlake_edp_panel_vdd_on(intel_dp);
ret = intel_dp_get_dpcd(intel_dp);
ironlake_edp_panel_vdd_off(intel_dp);
ironlake_edp_panel_vdd_off(intel_dp, false);
 
if (ret) {
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
dev_priv->no_aux_handshake =
2052,18 → 2505,11
}
}
 
intel_dp_i2c_init(intel_dp, intel_connector, name);
 
intel_encoder->hot_plug = intel_dp_hot_plug;
 
if (is_edp(intel_dp)) {
/* initialize panel mode from VBT if available for eDP */
if (dev_priv->lfp_lvds_vbt_mode) {
dev_priv->panel_fixed_mode =
drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
if (dev_priv->panel_fixed_mode) {
dev_priv->panel_fixed_mode->type |=
DRM_MODE_TYPE_PREFERRED;
}
}
dev_priv->int_edp_connector = connector;
intel_panel_setup_backlight(dev);
}
/drivers/video/drm/i915/intel_drv.h
26,6 → 26,7
#define __INTEL_DRV_H__
 
#include <linux/i2c.h>
#include "i915_drm.h"
#include "i915_drv.h"
#include "drm_crtc.h"
#include "drm_crtc_helper.h"
111,6 → 112,7
/* drm_display_mode->private_flags */
#define INTEL_MODE_PIXEL_MULTIPLIER_SHIFT (0x0)
#define INTEL_MODE_PIXEL_MULTIPLIER_MASK (0xf << INTEL_MODE_PIXEL_MULTIPLIER_SHIFT)
#define INTEL_MODE_DP_FORCE_6BPC (0x10)
 
static inline void
intel_mode_set_pixel_multiplier(struct drm_display_mode *mode,
172,12 → 174,37
int16_t cursor_width, cursor_height;
bool cursor_visible;
unsigned int bpp;
 
bool no_pll; /* tertiary pipe for IVB */
bool use_pll_a;
};
 
struct intel_plane {
struct drm_plane base;
enum pipe pipe;
struct drm_i915_gem_object *obj;
bool primary_disabled;
int max_downscale;
u32 lut_r[1024], lut_g[1024], lut_b[1024];
void (*update_plane)(struct drm_plane *plane,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t x, uint32_t y,
uint32_t src_w, uint32_t src_h);
void (*disable_plane)(struct drm_plane *plane);
int (*update_colorkey)(struct drm_plane *plane,
struct drm_intel_sprite_colorkey *key);
void (*get_colorkey)(struct drm_plane *plane,
struct drm_intel_sprite_colorkey *key);
};
 
#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
#define to_intel_connector(x) container_of(x, struct intel_connector, base)
#define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
#define to_intel_plane(x) container_of(x, struct intel_plane, base)
 
#define DIP_HEADER_SIZE 5
 
185,7 → 212,7
#define DIP_VERSION_AVI 0x2
#define DIP_LEN_AVI 13
 
#define DIP_TYPE_SPD 0x3
#define DIP_TYPE_SPD 0x83
#define DIP_VERSION_SPD 0x1
#define DIP_LEN_SPD 25
#define DIP_SPD_UNKNOWN 0
287,6 → 314,7
extern bool intel_dpd_is_edp(struct drm_device *dev);
extern void intel_edp_link_config (struct intel_encoder *, int *, int *);
extern bool intel_encoder_is_pch_edp(struct drm_encoder *encoder);
extern int intel_plane_init(struct drm_device *dev, enum pipe pipe);
 
/* intel_panel.c */
extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
338,9 → 366,6
struct drm_connector *connector,
struct intel_load_detect_pipe *old);
 
extern struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB);
extern int intel_sdvo_supports_hotplug(struct drm_connector *connector);
extern void intel_sdvo_set_hotplug(struct drm_connector *connector, int enable);
extern void intelfb_restore(void);
extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, int regno);
360,7 → 385,7
 
extern int intel_framebuffer_init(struct drm_device *dev,
struct intel_framebuffer *ifb,
struct drm_mode_fb_cmd *mode_cmd,
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_i915_gem_object *obj);
extern int intel_fbdev_init(struct drm_device *dev);
extern void intel_fbdev_fini(struct drm_device *dev);
380,5 → 405,25
extern void intel_fb_output_poll_changed(struct drm_device *dev);
extern void intel_fb_restore_mode(struct drm_device *dev);
 
extern void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
bool state);
#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
 
extern void intel_init_clock_gating(struct drm_device *dev);
extern void intel_write_eld(struct drm_encoder *encoder,
struct drm_display_mode *mode);
extern void intel_cpt_verify_modeset(struct drm_device *dev, int pipe);
 
/* For use by IVB LP watermark workaround in intel_sprite.c */
extern void sandybridge_update_wm(struct drm_device *dev);
extern void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
uint32_t sprite_width,
int pixel_size);
 
extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv);
 
#endif /* __INTEL_DRV_H__ */
/drivers/video/drm/i915/intel_fb.c
93,7 → 93,7
struct drm_i915_private *dev_priv = dev->dev_private;
struct fb_info *info;
struct drm_framebuffer *fb;
struct drm_mode_fb_cmd mode_cmd;
struct drm_mode_fb_cmd2 mode_cmd;
struct drm_i915_gem_object *obj;
struct device *device = &dev->pdev->dev;
int size, ret;
105,11 → 105,12
mode_cmd.width = sizes->surface_width;
mode_cmd.height = sizes->surface_height;
 
mode_cmd.bpp = sizes->surface_bpp;
mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 7) / 8), 64);
mode_cmd.depth = sizes->surface_depth;
mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((sizes->surface_bpp + 7) /
8), 64);
mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
sizes->surface_depth);
 
size = mode_cmd.pitch * mode_cmd.height;
size = mode_cmd.pitches[0] * mode_cmd.height;
size = ALIGN(size, PAGE_SIZE);
obj = i915_gem_alloc_object(dev, size);
if (!obj) {
186,7 → 187,7
 
// memset(info->screen_base, 0, size);
 
drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);
 
DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n",
206,6 → 207,7
out:
return ret;
}
 
static int intel_fb_find_or_create_single(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
/drivers/video/drm/i915/intel_hdmi.c
69,8 → 69,7
frame->checksum = 0;
frame->ecc = 0;
 
/* Header isn't part of the checksum */
for (i = 5; i < frame->len; i++)
for (i = 0; i < frame->len + DIP_HEADER_SIZE; i++)
sum += data[i];
 
frame->checksum = 0x100 - sum;
104,7 → 103,7
flags |= VIDEO_DIP_ENABLE_AVI | VIDEO_DIP_FREQ_VSYNC;
break;
case DIP_TYPE_SPD:
flags |= VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_FREQ_2VSYNC;
flags |= VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_FREQ_VSYNC;
break;
default:
DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
165,9 → 164,9
 
flags = intel_infoframe_index(frame);
 
val &= ~VIDEO_DIP_SELECT_MASK;
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
 
I915_WRITE(reg, val | flags);
I915_WRITE(reg, VIDEO_DIP_ENABLE | val | flags);
 
for (i = 0; i < len; i += 4) {
I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
245,16 → 244,17
sdvox |= HDMI_MODE_SELECT;
 
if (intel_hdmi->has_audio) {
DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
pipe_name(intel_crtc->pipe));
sdvox |= SDVO_AUDIO_ENABLE;
sdvox |= SDVO_NULL_PACKETS_DURING_VSYNC;
intel_write_eld(encoder, adjusted_mode);
}
 
if (intel_crtc->pipe == 1) {
if (HAS_PCH_CPT(dev))
sdvox |= PORT_TRANS_B_SEL_CPT;
else
sdvox |= PORT_TRANS_SEL_CPT(intel_crtc->pipe);
else if (intel_crtc->pipe == 1)
sdvox |= SDVO_PIPE_B_SELECT;
}
 
I915_WRITE(intel_hdmi->sdvox_reg, sdvox);
POSTING_READ(intel_hdmi->sdvox_reg);
269,7 → 269,11
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
u32 temp;
u32 enable_bits = SDVO_ENABLE;
 
if (intel_hdmi->has_audio)
enable_bits |= SDVO_AUDIO_ENABLE;
 
temp = I915_READ(intel_hdmi->sdvox_reg);
 
/* HW workaround, need to toggle enable bit off and on for 12bpc, but
281,9 → 285,9
}
 
if (mode != DRM_MODE_DPMS_ON) {
temp &= ~SDVO_ENABLE;
temp &= ~enable_bits;
} else {
temp |= SDVO_ENABLE;
temp |= enable_bits;
}
 
I915_WRITE(intel_hdmi->sdvox_reg, temp);
486,6 → 490,7
struct intel_encoder *intel_encoder;
struct intel_connector *intel_connector;
struct intel_hdmi *intel_hdmi;
int i;
 
intel_hdmi = kzalloc(sizeof(struct intel_hdmi), GFP_KERNEL);
if (!intel_hdmi)
511,7 → 516,7
connector->polled = DRM_CONNECTOR_POLL_HPD;
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
 
/* Set up the DDC bus. */
if (sdvox_reg == SDVOB) {
538,10 → 543,14
 
intel_hdmi->sdvox_reg = sdvox_reg;
 
if (!HAS_PCH_SPLIT(dev))
if (!HAS_PCH_SPLIT(dev)) {
intel_hdmi->write_infoframe = i9xx_write_infoframe;
else
I915_WRITE(VIDEO_DIP_CTL, 0);
} else {
intel_hdmi->write_infoframe = ironlake_write_infoframe;
for_each_pipe(i)
I915_WRITE(TVIDEO_DIP_CTL(i), 0);
}
 
drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
 
/drivers/video/drm/i915/intel_i2c.c
550,13 → 550,7
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
 
/* speed:
* 0x0 = 100 KHz
* 0x1 = 50 KHz
* 0x2 = 400 KHz
* 0x3 = 1000 Khz
*/
bus->reg0 = (bus->reg0 & ~(0x3 << 8)) | (speed << 8);
bus->reg0 = (bus->reg0 & ~(0x3 << 8)) | speed;
}
 
void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
/drivers/video/drm/i915/intel_lvds.c
711,6 → 711,14
},
{
.callback = intel_no_lvds_dmi_callback,
.ident = "Clientron E830",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Clientron"),
DMI_MATCH(DMI_PRODUCT_NAME, "E830"),
},
},
{
.callback = intel_no_lvds_dmi_callback,
.ident = "Asus EeeBox PC EB1007",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer INC."),
717,6 → 725,14
DMI_MATCH(DMI_PRODUCT_NAME, "EB1007"),
},
},
{
.callback = intel_no_lvds_dmi_callback,
.ident = "Asus AT5NM10T-I",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
DMI_MATCH(DMI_BOARD_NAME, "AT5NM10T-I"),
},
},
 
{ } /* terminating entry */
};
890,9 → 906,11
intel_encoder->type = INTEL_OUTPUT_LVDS;
 
intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT);
if (HAS_PCH_SPLIT(dev))
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
else
intel_encoder->crtc_mask = (1 << 1);
if (INTEL_INFO(dev)->gen >= 5)
intel_encoder->crtc_mask |= (1 << 0);
 
drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs);
drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
connector->display_info.subpixel_order = SubPixelHorizontalRGB;
/drivers/video/drm/i915/intel_modes.c
26,6 → 26,7
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/fb.h>
#include <drm/drm_edid.h>
#include "drmP.h"
#include "intel_drv.h"
#include "i915_drv.h"
74,6 → 75,7
if (edid) {
drm_mode_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
drm_edid_to_eld(connector, edid);
connector->display_info.raw_edid = NULL;
kfree(edid);
}
/drivers/video/drm/i915/intel_panel.c
193,13 → 193,10
if (HAS_PCH_SPLIT(dev)) {
max >>= 16;
} else {
if (IS_PINEVIEW(dev)) {
if (INTEL_INFO(dev)->gen < 4)
max >>= 17;
} else {
else
max >>= 16;
if (INTEL_INFO(dev)->gen < 4)
max &= ~1;
}
 
if (is_backlight_combination_mode(dev))
max *= 0xff;
218,13 → 215,12
val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
} else {
val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
if (IS_PINEVIEW(dev))
if (INTEL_INFO(dev)->gen < 4)
val >>= 1;
 
if (is_backlight_combination_mode(dev)){
u8 lbpc;
 
val &= ~1;
pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc);
val *= lbpc;
}
241,7 → 237,7
I915_WRITE(BLC_PWM_CPU_CTL, val | level);
}
 
void intel_panel_set_backlight(struct drm_device *dev, u32 level)
static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 tmp;
261,26 → 257,29
}
 
tmp = I915_READ(BLC_PWM_CTL);
if (IS_PINEVIEW(dev)) {
tmp &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1);
if (INTEL_INFO(dev)->gen < 4)
level <<= 1;
} else
tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
I915_WRITE(BLC_PWM_CTL, tmp | level);
}
 
void intel_panel_set_backlight(struct drm_device *dev, u32 level)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
dev_priv->backlight_level = level;
if (dev_priv->backlight_enabled)
intel_panel_actually_set_backlight(dev, level);
}
 
void intel_panel_disable_backlight(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (dev_priv->backlight_enabled) {
dev_priv->backlight_level = intel_panel_get_backlight(dev);
dev_priv->backlight_enabled = false;
intel_panel_actually_set_backlight(dev, 0);
}
 
intel_panel_set_backlight(dev, 0);
}
 
void intel_panel_enable_backlight(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
288,8 → 287,8
if (dev_priv->backlight_level == 0)
dev_priv->backlight_level = intel_panel_get_max_backlight(dev);
 
intel_panel_set_backlight(dev, dev_priv->backlight_level);
dev_priv->backlight_enabled = true;
intel_panel_actually_set_backlight(dev, dev_priv->backlight_level);
}
 
static void intel_panel_init_backlight(struct drm_device *dev)
336,7 → 335,8
static int intel_panel_get_brightness(struct backlight_device *bd)
{
struct drm_device *dev = bl_get_data(bd);
return intel_panel_get_backlight(dev);
struct drm_i915_private *dev_priv = dev->dev_private;
return dev_priv->backlight_level;
}
 
static const struct backlight_ops intel_panel_bl_ops = {
/drivers/video/drm/i915/intel_ringbuffer.c
36,6 → 36,16
//#include "i915_trace.h"
#include "intel_drv.h"
 
/*
* 965+ support PIPE_CONTROL commands, which provide finer grained control
* over cache flushing.
*/
struct pipe_control {
struct drm_i915_gem_object *obj;
volatile u32 *cpu_page;
u32 gtt_offset;
};
 
static inline int ring_space(struct intel_ring_buffer *ring)
{
int space = (ring->head & HEAD_ADDR) - (ring->tail + 8);
125,6 → 135,118
return 0;
}
 
/**
* Emits a PIPE_CONTROL with a non-zero post-sync operation, for
* implementing two workarounds on gen6. From section 1.4.7.1
* "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
*
* [DevSNB-C+{W/A}] Before any depth stall flush (including those
* produced by non-pipelined state commands), software needs to first
* send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
* 0.
*
* [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
* =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
*
* And the workaround for these two requires this workaround first:
*
* [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
* BEFORE the pipe-control with a post-sync op and no write-cache
* flushes.
*
* And this last workaround is tricky because of the requirements on
* that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
* volume 2 part 1:
*
* "1 of the following must also be set:
* - Render Target Cache Flush Enable ([12] of DW1)
* - Depth Cache Flush Enable ([0] of DW1)
* - Stall at Pixel Scoreboard ([1] of DW1)
* - Depth Stall ([13] of DW1)
* - Post-Sync Operation ([13] of DW1)
* - Notify Enable ([8] of DW1)"
*
* The cache flushes require the workaround flush that triggered this
* one, so we can't use it. Depth stall would trigger the same.
* Post-sync nonzero is what triggered this second workaround, so we
* can't use that one either. Notify enable is IRQs, which aren't
* really our business. That leaves only stall at scoreboard.
*/
static int
intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring)
{
struct pipe_control *pc = ring->private;
u32 scratch_addr = pc->gtt_offset + 128;
int ret;
 
 
ret = intel_ring_begin(ring, 6);
if (ret)
return ret;
 
intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_STALL_AT_SCOREBOARD);
intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
intel_ring_emit(ring, 0); /* low dword */
intel_ring_emit(ring, 0); /* high dword */
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
 
ret = intel_ring_begin(ring, 6);
if (ret)
return ret;
 
intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
intel_ring_emit(ring, 0);
intel_ring_emit(ring, 0);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
 
return 0;
}
 
static int
gen6_render_ring_flush(struct intel_ring_buffer *ring,
u32 invalidate_domains, u32 flush_domains)
{
u32 flags = 0;
struct pipe_control *pc = ring->private;
u32 scratch_addr = pc->gtt_offset + 128;
int ret;
 
/* Force SNB workarounds for PIPE_CONTROL flushes */
intel_emit_post_sync_nonzero_flush(ring);
 
/* Just flush everything. Experiments have shown that reducing the
* number of bits based on the write domains has little performance
* impact.
*/
flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
 
ret = intel_ring_begin(ring, 6);
if (ret)
return ret;
 
intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
intel_ring_emit(ring, flags);
intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
intel_ring_emit(ring, 0); /* lower dword */
intel_ring_emit(ring, 0); /* uppwer dword */
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
 
return 0;
}
 
static void ring_write_tail(struct intel_ring_buffer *ring,
u32 value)
{
205,16 → 327,6
return 0;
}
 
/*
* 965+ support PIPE_CONTROL commands, which provide finer grained control
* over cache flushing.
*/
struct pipe_control {
struct drm_i915_gem_object *obj;
volatile u32 *cpu_page;
u32 gtt_offset;
};
 
static int
init_pipe_control(struct intel_ring_buffer *ring)
{
295,13 → 407,17
GFX_MODE_ENABLE(GFX_REPLAY_MODE));
}
 
if (INTEL_INFO(dev)->gen >= 6) {
} else if (IS_GEN5(dev)) {
if (INTEL_INFO(dev)->gen >= 5) {
ret = init_pipe_control(ring);
if (ret)
return ret;
}
 
if (INTEL_INFO(dev)->gen >= 6) {
I915_WRITE(INSTPM,
INSTPM_FORCE_ORDERING << 16 | INSTPM_FORCE_ORDERING);
}
 
return ret;
}
 
314,35 → 430,33
}
 
static void
update_semaphore(struct intel_ring_buffer *ring, int i, u32 seqno)
update_mboxes(struct intel_ring_buffer *ring,
u32 seqno,
u32 mmio_offset)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int id;
 
/*
* cs -> 1 = vcs, 0 = bcs
* vcs -> 1 = bcs, 0 = cs,
* bcs -> 1 = cs, 0 = vcs.
*/
id = ring - dev_priv->ring;
id += 2 - i;
id %= 3;
 
intel_ring_emit(ring,
MI_SEMAPHORE_MBOX |
intel_ring_emit(ring, MI_SEMAPHORE_MBOX |
MI_SEMAPHORE_GLOBAL_GTT |
MI_SEMAPHORE_REGISTER |
MI_SEMAPHORE_UPDATE);
intel_ring_emit(ring, seqno);
intel_ring_emit(ring,
RING_SYNC_0(dev_priv->ring[id].mmio_base) + 4*i);
intel_ring_emit(ring, mmio_offset);
}
 
/**
* gen6_add_request - Update the semaphore mailbox registers
*
* @ring - ring that is adding a request
* @seqno - return seqno stuck into the ring
*
* Update the mailbox registers in the *other* rings with the current seqno.
* This acts like a signal in the canonical semaphore.
*/
static int
gen6_add_request(struct intel_ring_buffer *ring,
u32 *result)
u32 *seqno)
{
u32 seqno;
u32 mbox1_reg;
u32 mbox2_reg;
int ret;
 
ret = intel_ring_begin(ring, 10);
349,48 → 463,98
if (ret)
return ret;
 
seqno = i915_gem_get_seqno(ring->dev);
update_semaphore(ring, 0, seqno);
update_semaphore(ring, 1, seqno);
mbox1_reg = ring->signal_mbox[0];
mbox2_reg = ring->signal_mbox[1];
 
*seqno = i915_gem_get_seqno(ring->dev);
 
update_mboxes(ring, *seqno, mbox1_reg);
update_mboxes(ring, *seqno, mbox2_reg);
intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
intel_ring_emit(ring, seqno);
intel_ring_emit(ring, *seqno);
intel_ring_emit(ring, MI_USER_INTERRUPT);
intel_ring_advance(ring);
 
*result = seqno;
return 0;
}
 
int
intel_ring_sync(struct intel_ring_buffer *ring,
struct intel_ring_buffer *to,
/**
* intel_ring_sync - sync the waiter to the signaller on seqno
*
* @waiter - ring that is waiting
* @signaller - ring which has, or will signal
* @seqno - seqno which the waiter will block on
*/
static int
intel_ring_sync(struct intel_ring_buffer *waiter,
struct intel_ring_buffer *signaller,
int ring,
u32 seqno)
{
int ret;
u32 dw1 = MI_SEMAPHORE_MBOX |
MI_SEMAPHORE_COMPARE |
MI_SEMAPHORE_REGISTER;
 
ret = intel_ring_begin(ring, 4);
ret = intel_ring_begin(waiter, 4);
if (ret)
return ret;
 
intel_ring_emit(ring,
MI_SEMAPHORE_MBOX |
MI_SEMAPHORE_REGISTER |
intel_ring_sync_index(ring, to) << 17 |
MI_SEMAPHORE_COMPARE);
intel_ring_emit(ring, seqno);
intel_ring_emit(ring, 0);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
intel_ring_emit(waiter, dw1 | signaller->semaphore_register[ring]);
intel_ring_emit(waiter, seqno);
intel_ring_emit(waiter, 0);
intel_ring_emit(waiter, MI_NOOP);
intel_ring_advance(waiter);
 
return 0;
}
 
/* VCS->RCS (RVSYNC) or BCS->RCS (RBSYNC) */
int
render_ring_sync_to(struct intel_ring_buffer *waiter,
struct intel_ring_buffer *signaller,
u32 seqno)
{
// WARN_ON(signaller->semaphore_register[RCS] == MI_SEMAPHORE_SYNC_INVALID);
return intel_ring_sync(waiter,
signaller,
RCS,
seqno);
}
 
/* RCS->VCS (VRSYNC) or BCS->VCS (VBSYNC) */
int
gen6_bsd_ring_sync_to(struct intel_ring_buffer *waiter,
struct intel_ring_buffer *signaller,
u32 seqno)
{
// WARN_ON(signaller->semaphore_register[VCS] == MI_SEMAPHORE_SYNC_INVALID);
return intel_ring_sync(waiter,
signaller,
VCS,
seqno);
}
 
/* RCS->BCS (BRSYNC) or VCS->BCS (BVSYNC) */
int
gen6_blt_ring_sync_to(struct intel_ring_buffer *waiter,
struct intel_ring_buffer *signaller,
u32 seqno)
{
// WARN_ON(signaller->semaphore_register[BCS] == MI_SEMAPHORE_SYNC_INVALID);
return intel_ring_sync(waiter,
signaller,
BCS,
seqno);
}
 
 
 
#define PIPE_CONTROL_FLUSH(ring__, addr__) \
do { \
intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \
PIPE_CONTROL_DEPTH_STALL | 2); \
intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | \
PIPE_CONTROL_DEPTH_STALL); \
intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \
intel_ring_emit(ring__, 0); \
intel_ring_emit(ring__, 0); \
418,8 → 582,9
if (ret)
return ret;
 
intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
PIPE_CONTROL_WRITE_FLUSH |
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
intel_ring_emit(ring, seqno);
intel_ring_emit(ring, 0);
434,8 → 599,9
PIPE_CONTROL_FLUSH(ring, scratch_addr);
scratch_addr += 128;
PIPE_CONTROL_FLUSH(ring, scratch_addr);
intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
PIPE_CONTROL_WRITE_FLUSH |
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
PIPE_CONTROL_NOTIFY);
intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
intel_ring_emit(ring, seqno);
469,6 → 635,19
}
 
static u32
gen6_ring_get_seqno(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
 
/* Workaround to force correct ordering between irq and seqno writes on
* ivb (and maybe also on snb) by reading from a CS register (like
* ACTHD) before reading the status page. */
if (IS_GEN7(dev))
intel_ring_get_active_head(ring);
return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
}
 
static u32
ring_get_seqno(struct intel_ring_buffer *ring)
{
return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
513,7 → 692,6
POSTING_READ(IMR);
}
 
#if 0
static bool
render_ring_get_irq(struct intel_ring_buffer *ring)
{
553,7 → 731,6
}
spin_unlock(&ring->irq_lock);
}
#endif
 
void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
{
626,8 → 803,6
return 0;
}
 
#if 0
 
static bool
gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
{
637,6 → 812,12
if (!dev->irq_enabled)
return false;
 
/* It looks like we need to prevent the gt from suspending while waiting
* for an notifiy irq, otherwise irqs seem to get lost on at least the
* blt/bsd rings on ivb. */
if (IS_GEN7(dev))
gen6_gt_force_wake_get(dev_priv);
 
spin_lock(&ring->irq_lock);
if (ring->irq_refcount++ == 0) {
ring->irq_mask &= ~rflag;
661,6 → 842,9
ironlake_disable_irq(dev_priv, gflag);
}
spin_unlock(&ring->irq_lock);
 
if (IS_GEN7(dev))
gen6_gt_force_wake_put(dev_priv);
}
 
static bool
698,7 → 882,6
}
spin_unlock(&ring->irq_lock);
}
#endif
 
static int
ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
828,7 → 1011,7
INIT_LIST_HEAD(&ring->gpu_write_list);
 
// init_waitqueue_head(&ring->irq_queue);
// spin_lock_init(&ring->irq_lock);
spin_lock_init(&ring->irq_lock);
ring->irq_mask = ~0;
 
if (I915_NEED_GFX_HWS(dev)) {
1038,11 → 1221,16
.write_tail = ring_write_tail,
.flush = render_ring_flush,
.add_request = render_ring_add_request,
// .get_seqno = ring_get_seqno,
// .irq_get = render_ring_get_irq,
// .irq_put = render_ring_put_irq,
.get_seqno = ring_get_seqno,
.irq_get = render_ring_get_irq,
.irq_put = render_ring_put_irq,
.dispatch_execbuffer = render_ring_dispatch_execbuffer,
// .cleanup = render_ring_cleanup,
.sync_to = render_ring_sync_to,
.semaphore_register = {MI_SEMAPHORE_SYNC_INVALID,
MI_SEMAPHORE_SYNC_RV,
MI_SEMAPHORE_SYNC_RB},
.signal_mbox = {GEN6_VRSYNC, GEN6_BRSYNC},
};
 
/* ring buffer for bit-stream decoder */
1056,9 → 1244,9
.write_tail = ring_write_tail,
.flush = bsd_ring_flush,
.add_request = ring_add_request,
// .get_seqno = ring_get_seqno,
// .irq_get = bsd_ring_get_irq,
// .irq_put = bsd_ring_put_irq,
.get_seqno = ring_get_seqno,
.irq_get = bsd_ring_get_irq,
.irq_put = bsd_ring_put_irq,
.dispatch_execbuffer = ring_dispatch_execbuffer,
};
 
1124,8 → 1312,6
return 0;
}
 
#if 0
 
static bool
gen6_render_ring_get_irq(struct intel_ring_buffer *ring)
{
1158,8 → 1344,6
GEN6_BSD_USER_INTERRUPT);
}
 
#endif
 
/* ring buffer for Video Codec for Gen6+ */
static const struct intel_ring_buffer gen6_bsd_ring = {
.name = "gen6 bsd ring",
1170,13 → 1354,17
.write_tail = gen6_bsd_ring_write_tail,
.flush = gen6_ring_flush,
.add_request = gen6_add_request,
// .get_seqno = ring_get_seqno,
// .irq_get = gen6_bsd_ring_get_irq,
// .irq_put = gen6_bsd_ring_put_irq,
.get_seqno = gen6_ring_get_seqno,
.irq_get = gen6_bsd_ring_get_irq,
.irq_put = gen6_bsd_ring_put_irq,
.dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
.sync_to = gen6_bsd_ring_sync_to,
.semaphore_register = {MI_SEMAPHORE_SYNC_VR,
MI_SEMAPHORE_SYNC_INVALID,
MI_SEMAPHORE_SYNC_VB},
.signal_mbox = {GEN6_RVSYNC, GEN6_BVSYNC},
};
 
#if 0
/* Blitter support (SandyBridge+) */
 
static bool
1194,7 → 1382,6
GT_BLT_USER_INTERRUPT,
GEN6_BLITTER_USER_INTERRUPT);
}
#endif
 
 
/* Workaround for some stepping of SNB,
1302,11 → 1489,16
.write_tail = ring_write_tail,
.flush = blt_ring_flush,
.add_request = gen6_add_request,
// .get_seqno = ring_get_seqno,
// .irq_get = blt_ring_get_irq,
// .irq_put = blt_ring_put_irq,
.get_seqno = gen6_ring_get_seqno,
.irq_get = blt_ring_get_irq,
.irq_put = blt_ring_put_irq,
.dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
// .cleanup = blt_ring_cleanup,
.sync_to = gen6_blt_ring_sync_to,
.semaphore_register = {MI_SEMAPHORE_SYNC_BR,
MI_SEMAPHORE_SYNC_BV,
MI_SEMAPHORE_SYNC_INVALID},
.signal_mbox = {GEN6_RBSYNC, GEN6_VBSYNC},
};
 
int intel_init_render_ring_buffer(struct drm_device *dev)
1317,11 → 1509,13
*ring = render_ring;
if (INTEL_INFO(dev)->gen >= 6) {
ring->add_request = gen6_add_request;
// ring->irq_get = gen6_render_ring_get_irq;
// ring->irq_put = gen6_render_ring_put_irq;
ring->flush = gen6_render_ring_flush;
ring->irq_get = gen6_render_ring_get_irq;
ring->irq_put = gen6_render_ring_put_irq;
ring->get_seqno = gen6_ring_get_seqno;
} else if (IS_GEN5(dev)) {
ring->add_request = pc_render_add_request;
// ring->get_seqno = pc_render_get_seqno;
ring->get_seqno = pc_render_get_seqno;
}
 
if (!I915_NEED_GFX_HWS(dev)) {
/drivers/video/drm/i915/intel_ringbuffer.h
75,7 → 75,12
int (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
u32 offset, u32 length);
void (*cleanup)(struct intel_ring_buffer *ring);
int (*sync_to)(struct intel_ring_buffer *ring,
struct intel_ring_buffer *to,
u32 seqno);
 
u32 semaphore_register[3]; /*our mbox written by others */
u32 signal_mbox[2]; /* mboxes this ring signals to */
/**
* List of objects currently involved in rendering from the
* ringbuffer.
180,9 → 185,6
void intel_ring_advance(struct intel_ring_buffer *ring);
 
u32 intel_ring_get_seqno(struct intel_ring_buffer *ring);
int intel_ring_sync(struct intel_ring_buffer *ring,
struct intel_ring_buffer *to,
u32 seqno);
 
int intel_init_render_ring_buffer(struct drm_device *dev);
int intel_init_bsd_ring_buffer(struct drm_device *dev);
/drivers/video/drm/i915/intel_sdvo.c
58,6 → 58,7
#define IS_TMDS(c) (c->output_flag & SDVO_TMDS_MASK)
#define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK)
#define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK))
#define IS_DIGITAL(c) (c->output_flag & (SDVO_TMDS_MASK | SDVO_LVDS_MASK))
 
 
static const char *tv_format_names[] = {
101,6 → 102,11
*/
uint16_t attached_output;
 
/*
* Hotplug activation bits for this device
*/
uint8_t hotplug_active[2];
 
/**
* This is used to select the color range of RBG outputs in HDMI mode.
* It is only valid when using TMDS encoding and 8 bit per color mode.
1068,15 → 1074,13
 
/* Set the SDVO control regs. */
if (INTEL_INFO(dev)->gen >= 4) {
sdvox = 0;
/* The real mode polarity is set by the SDVO commands, using
* struct intel_sdvo_dtd. */
sdvox = SDVO_VSYNC_ACTIVE_HIGH | SDVO_HSYNC_ACTIVE_HIGH;
if (intel_sdvo->is_hdmi)
sdvox |= intel_sdvo->color_range;
if (INTEL_INFO(dev)->gen < 5)
sdvox |= SDVO_BORDER_ENABLE;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
sdvox |= SDVO_VSYNC_ACTIVE_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
sdvox |= SDVO_HSYNC_ACTIVE_HIGH;
} else {
sdvox = I915_READ(intel_sdvo->sdvo_reg);
switch (intel_sdvo->sdvo_reg) {
1089,8 → 1093,12
}
sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
}
if (intel_crtc->pipe == 1)
sdvox |= SDVO_PIPE_B_SELECT;
 
if (INTEL_PCH_TYPE(dev) >= PCH_CPT)
sdvox |= TRANSCODER_CPT(intel_crtc->pipe);
else
sdvox |= TRANSCODER(intel_crtc->pipe);
 
if (intel_sdvo->has_hdmi_audio)
sdvox |= SDVO_AUDIO_ENABLE;
 
1217,81 → 1225,26
return true;
}
 
/* No use! */
#if 0
struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB)
static int intel_sdvo_supports_hotplug(struct intel_sdvo *intel_sdvo)
{
struct drm_connector *connector = NULL;
struct intel_sdvo *iout = NULL;
struct intel_sdvo *sdvo;
 
/* find the sdvo connector */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
iout = to_intel_sdvo(connector);
 
if (iout->type != INTEL_OUTPUT_SDVO)
continue;
 
sdvo = iout->dev_priv;
 
if (sdvo->sdvo_reg == SDVOB && sdvoB)
return connector;
 
if (sdvo->sdvo_reg == SDVOC && !sdvoB)
return connector;
 
}
 
return NULL;
}
 
int intel_sdvo_supports_hotplug(struct drm_connector *connector)
{
u8 response[2];
u8 status;
struct intel_sdvo *intel_sdvo;
DRM_DEBUG_KMS("\n");
 
if (!connector)
return 0;
 
intel_sdvo = to_intel_sdvo(connector);
 
return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
&response, 2) && response[0];
}
 
void intel_sdvo_set_hotplug(struct drm_connector *connector, int on)
static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder)
{
u8 response[2];
u8 status;
struct intel_sdvo *intel_sdvo = to_intel_sdvo(connector);
struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
 
intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
intel_sdvo_read_response(intel_sdvo, &response, 2);
 
if (on) {
intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0);
status = intel_sdvo_read_response(intel_sdvo, &response, 2);
 
intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
} else {
response[0] = 0;
response[1] = 0;
intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &intel_sdvo->hotplug_active, 2);
}
 
intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
intel_sdvo_read_response(intel_sdvo, &response, 2);
}
#endif
 
static bool
intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo)
{
/* Is there more than one type of output? */
int caps = intel_sdvo->caps.output_flags & 0xf;
return caps & -caps;
return hweight16(intel_sdvo->caps.output_flags) > 1;
}
 
static struct edid *
1312,7 → 1265,7
}
 
enum drm_connector_status
intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
intel_sdvo_tmds_sink_detect(struct drm_connector *connector)
{
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
enum drm_connector_status status;
1372,6 → 1325,18
return status;
}
 
static bool
intel_sdvo_connector_matches_edid(struct intel_sdvo_connector *sdvo,
struct edid *edid)
{
bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
bool connector_is_digital = !!IS_DIGITAL(sdvo);
 
DRM_DEBUG_KMS("connector_is_digital? %d, monitor_is_digital? %d\n",
connector_is_digital, monitor_is_digital);
return connector_is_digital == monitor_is_digital;
}
 
static enum drm_connector_status
intel_sdvo_detect(struct drm_connector *connector, bool force)
{
1407,7 → 1372,7
if ((intel_sdvo_connector->output_flag & response) == 0)
ret = connector_status_disconnected;
else if (IS_TMDS(intel_sdvo_connector))
ret = intel_sdvo_hdmi_sink_detect(connector);
ret = intel_sdvo_tmds_sink_detect(connector);
else {
struct edid *edid;
 
1416,10 → 1381,12
if (edid == NULL)
edid = intel_sdvo_get_analog_edid(connector);
if (edid != NULL) {
if (edid->input & DRM_EDID_INPUT_DIGITAL)
if (intel_sdvo_connector_matches_edid(intel_sdvo_connector,
edid))
ret = connector_status_connected;
else
ret = connector_status_disconnected;
else
ret = connector_status_connected;
 
connector->display_info.raw_edid = NULL;
kfree(edid);
} else
1460,11 → 1427,8
edid = intel_sdvo_get_analog_edid(connector);
 
if (edid != NULL) {
struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
bool connector_is_digital = !!IS_TMDS(intel_sdvo_connector);
 
if (connector_is_digital == monitor_is_digital) {
if (intel_sdvo_connector_matches_edid(to_intel_sdvo_connector(connector),
edid)) {
drm_mode_connector_update_edid_property(connector, edid);
drm_add_edid_modes(connector, edid);
}
1944,7 → 1908,7
struct intel_sdvo *sdvo, u32 reg)
{
struct sdvo_device_mapping *mapping;
u8 pin, speed;
u8 pin;
 
if (IS_SDVOB(reg))
mapping = &dev_priv->sdvo_mappings[0];
1952,19 → 1916,17
mapping = &dev_priv->sdvo_mappings[1];
 
pin = GMBUS_PORT_DPB;
speed = GMBUS_RATE_1MHZ >> 8;
if (mapping->initialized) {
if (mapping->initialized)
pin = mapping->i2c_pin;
speed = mapping->i2c_speed;
}
 
if (pin < GMBUS_NUM_PORTS) {
sdvo->i2c = &dev_priv->gmbus[pin].adapter;
intel_gmbus_set_speed(sdvo->i2c, speed);
intel_gmbus_set_speed(sdvo->i2c, GMBUS_RATE_1MHZ);
intel_gmbus_force_bit(sdvo->i2c, true);
} else
} else {
sdvo->i2c = &dev_priv->gmbus[GMBUS_PORT_DPB].adapter;
}
}
 
static bool
intel_sdvo_is_hdmi_connector(struct intel_sdvo *intel_sdvo, int device)
2044,6 → 2006,7
{
struct drm_encoder *encoder = &intel_sdvo->base.base;
struct drm_connector *connector;
struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
struct intel_connector *intel_connector;
struct intel_sdvo_connector *intel_sdvo_connector;
 
2061,6 → 2024,16
 
intel_connector = &intel_sdvo_connector->base;
connector = &intel_connector->base;
if (intel_sdvo_supports_hotplug(intel_sdvo) & (1 << device)) {
connector->polled = DRM_CONNECTOR_POLL_HPD;
intel_sdvo->hotplug_active[0] |= 1 << device;
/* Some SDVO devices have one-shot hotplug interrupts.
* Ensure that they get re-enabled when an interrupt happens.
*/
intel_encoder->hot_plug = intel_sdvo_enable_hotplug;
intel_sdvo_enable_hotplug(intel_encoder);
}
else
connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
connector->connector_type = DRM_MODE_CONNECTOR_DVID;
2243,7 → 2216,7
bytes[0], bytes[1]);
return false;
}
intel_sdvo->base.crtc_mask = (1 << 0) | (1 << 1);
intel_sdvo->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
 
return true;
}
2568,6 → 2541,14
if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps))
goto err;
 
/* Set up hotplug command - note paranoia about contents of reply.
* We assume that the hardware is in a sane state, and only touch
* the bits we think we understand.
*/
intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG,
&intel_sdvo->hotplug_active, 2);
intel_sdvo->hotplug_active[0] &= ~0x3;
 
if (intel_sdvo_output_setup(intel_sdvo,
intel_sdvo->caps.output_flags) != true) {
DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n",
/drivers/video/drm/i915/intel_sprite.c
0,0 → 1,664
/*
* Copyright © 2011 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Jesse Barnes <jbarnes@virtuousgeek.org>
*
* New plane/sprite handling.
*
* The older chips had a separate interface for programming plane related
* registers; newer ones are much simpler and we can use the new DRM plane
* support.
*/
#include "drmP.h"
#include "drm_crtc.h"
#include "drm_fourcc.h"
#include "intel_drv.h"
#include "i915_drm.h"
#include "i915_drv.h"
 
static void
ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t x, uint32_t y,
uint32_t src_w, uint32_t src_h)
{
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(plane);
int pipe = intel_plane->pipe;
u32 sprctl, sprscale = 0;
int pixel_size;
 
sprctl = I915_READ(SPRCTL(pipe));
 
/* Mask out pixel format bits in case we change it */
sprctl &= ~SPRITE_PIXFORMAT_MASK;
sprctl &= ~SPRITE_RGB_ORDER_RGBX;
sprctl &= ~SPRITE_YUV_BYTE_ORDER_MASK;
 
switch (fb->pixel_format) {
case DRM_FORMAT_XBGR8888:
sprctl |= SPRITE_FORMAT_RGBX888;
pixel_size = 4;
break;
case DRM_FORMAT_XRGB8888:
sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX;
pixel_size = 4;
break;
case DRM_FORMAT_YUYV:
sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YUYV;
pixel_size = 2;
break;
case DRM_FORMAT_YVYU:
sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YVYU;
pixel_size = 2;
break;
case DRM_FORMAT_UYVY:
sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_UYVY;
pixel_size = 2;
break;
case DRM_FORMAT_VYUY:
sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_VYUY;
pixel_size = 2;
break;
default:
DRM_DEBUG_DRIVER("bad pixel format, assuming RGBX888\n");
sprctl |= DVS_FORMAT_RGBX888;
pixel_size = 4;
break;
}
 
if (obj->tiling_mode != I915_TILING_NONE)
sprctl |= SPRITE_TILED;
 
/* must disable */
sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
sprctl |= SPRITE_ENABLE;
sprctl |= SPRITE_DEST_KEY;
 
/* Sizes are 0 based */
src_w--;
src_h--;
crtc_w--;
crtc_h--;
 
intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size);
 
/*
* IVB workaround: must disable low power watermarks for at least
* one frame before enabling scaling. LP watermarks can be re-enabled
* when scaling is disabled.
*/
if (crtc_w != src_w || crtc_h != src_h) {
dev_priv->sprite_scaling_enabled = true;
sandybridge_update_wm(dev);
intel_wait_for_vblank(dev, pipe);
sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
} else {
dev_priv->sprite_scaling_enabled = false;
/* potentially re-enable LP watermarks */
sandybridge_update_wm(dev);
}
 
I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
if (obj->tiling_mode != I915_TILING_NONE) {
I915_WRITE(SPRTILEOFF(pipe), (y << 16) | x);
} else {
unsigned long offset;
 
offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
I915_WRITE(SPRLINOFF(pipe), offset);
}
I915_WRITE(SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
I915_WRITE(SPRSCALE(pipe), sprscale);
I915_WRITE(SPRCTL(pipe), sprctl);
I915_WRITE(SPRSURF(pipe), obj->gtt_offset);
POSTING_READ(SPRSURF(pipe));
}
 
static void
ivb_disable_plane(struct drm_plane *plane)
{
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(plane);
int pipe = intel_plane->pipe;
 
I915_WRITE(SPRCTL(pipe), I915_READ(SPRCTL(pipe)) & ~SPRITE_ENABLE);
/* Can't leave the scaler enabled... */
I915_WRITE(SPRSCALE(pipe), 0);
/* Activate double buffered register update */
I915_WRITE(SPRSURF(pipe), 0);
POSTING_READ(SPRSURF(pipe));
}
 
static int
ivb_update_colorkey(struct drm_plane *plane,
struct drm_intel_sprite_colorkey *key)
{
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane;
u32 sprctl;
int ret = 0;
 
intel_plane = to_intel_plane(plane);
 
I915_WRITE(SPRKEYVAL(intel_plane->pipe), key->min_value);
I915_WRITE(SPRKEYMAX(intel_plane->pipe), key->max_value);
I915_WRITE(SPRKEYMSK(intel_plane->pipe), key->channel_mask);
 
sprctl = I915_READ(SPRCTL(intel_plane->pipe));
sprctl &= ~(SPRITE_SOURCE_KEY | SPRITE_DEST_KEY);
if (key->flags & I915_SET_COLORKEY_DESTINATION)
sprctl |= SPRITE_DEST_KEY;
else if (key->flags & I915_SET_COLORKEY_SOURCE)
sprctl |= SPRITE_SOURCE_KEY;
I915_WRITE(SPRCTL(intel_plane->pipe), sprctl);
 
POSTING_READ(SPRKEYMSK(intel_plane->pipe));
 
return ret;
}
 
static void
ivb_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
{
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane;
u32 sprctl;
 
intel_plane = to_intel_plane(plane);
 
key->min_value = I915_READ(SPRKEYVAL(intel_plane->pipe));
key->max_value = I915_READ(SPRKEYMAX(intel_plane->pipe));
key->channel_mask = I915_READ(SPRKEYMSK(intel_plane->pipe));
key->flags = 0;
 
sprctl = I915_READ(SPRCTL(intel_plane->pipe));
 
if (sprctl & SPRITE_DEST_KEY)
key->flags = I915_SET_COLORKEY_DESTINATION;
else if (sprctl & SPRITE_SOURCE_KEY)
key->flags = I915_SET_COLORKEY_SOURCE;
else
key->flags = I915_SET_COLORKEY_NONE;
}
 
static void
snb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t x, uint32_t y,
uint32_t src_w, uint32_t src_h)
{
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(plane);
int pipe = intel_plane->pipe, pixel_size;
u32 dvscntr, dvsscale = 0;
 
dvscntr = I915_READ(DVSCNTR(pipe));
 
/* Mask out pixel format bits in case we change it */
dvscntr &= ~DVS_PIXFORMAT_MASK;
dvscntr &= ~DVS_RGB_ORDER_RGBX;
dvscntr &= ~DVS_YUV_BYTE_ORDER_MASK;
 
switch (fb->pixel_format) {
case DRM_FORMAT_XBGR8888:
dvscntr |= DVS_FORMAT_RGBX888;
pixel_size = 4;
break;
case DRM_FORMAT_XRGB8888:
dvscntr |= DVS_FORMAT_RGBX888 | DVS_RGB_ORDER_RGBX;
pixel_size = 4;
break;
case DRM_FORMAT_YUYV:
dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YUYV;
pixel_size = 2;
break;
case DRM_FORMAT_YVYU:
dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YVYU;
pixel_size = 2;
break;
case DRM_FORMAT_UYVY:
dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_UYVY;
pixel_size = 2;
break;
case DRM_FORMAT_VYUY:
dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_VYUY;
pixel_size = 2;
break;
default:
DRM_DEBUG_DRIVER("bad pixel format, assuming RGBX888\n");
dvscntr |= DVS_FORMAT_RGBX888;
pixel_size = 4;
break;
}
 
if (obj->tiling_mode != I915_TILING_NONE)
dvscntr |= DVS_TILED;
 
/* must disable */
dvscntr |= DVS_TRICKLE_FEED_DISABLE;
dvscntr |= DVS_ENABLE;
 
/* Sizes are 0 based */
src_w--;
src_h--;
crtc_w--;
crtc_h--;
 
intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size);
 
if (crtc_w != src_w || crtc_h != src_h)
dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h;
 
I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
if (obj->tiling_mode != I915_TILING_NONE) {
I915_WRITE(DVSTILEOFF(pipe), (y << 16) | x);
} else {
unsigned long offset;
 
offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
I915_WRITE(DVSLINOFF(pipe), offset);
}
I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
I915_WRITE(DVSSCALE(pipe), dvsscale);
I915_WRITE(DVSCNTR(pipe), dvscntr);
I915_WRITE(DVSSURF(pipe), obj->gtt_offset);
POSTING_READ(DVSSURF(pipe));
}
 
static void
snb_disable_plane(struct drm_plane *plane)
{
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(plane);
int pipe = intel_plane->pipe;
 
I915_WRITE(DVSCNTR(pipe), I915_READ(DVSCNTR(pipe)) & ~DVS_ENABLE);
/* Disable the scaler */
I915_WRITE(DVSSCALE(pipe), 0);
/* Flush double buffered register updates */
I915_WRITE(DVSSURF(pipe), 0);
POSTING_READ(DVSSURF(pipe));
}
 
static void
intel_enable_primary(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int reg = DSPCNTR(intel_crtc->plane);
 
I915_WRITE(reg, I915_READ(reg) | DISPLAY_PLANE_ENABLE);
}
 
static void
intel_disable_primary(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int reg = DSPCNTR(intel_crtc->plane);
 
I915_WRITE(reg, I915_READ(reg) & ~DISPLAY_PLANE_ENABLE);
}
 
static int
snb_update_colorkey(struct drm_plane *plane,
struct drm_intel_sprite_colorkey *key)
{
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane;
u32 dvscntr;
int ret = 0;
 
intel_plane = to_intel_plane(plane);
 
I915_WRITE(DVSKEYVAL(intel_plane->pipe), key->min_value);
I915_WRITE(DVSKEYMAX(intel_plane->pipe), key->max_value);
I915_WRITE(DVSKEYMSK(intel_plane->pipe), key->channel_mask);
 
dvscntr = I915_READ(DVSCNTR(intel_plane->pipe));
dvscntr &= ~(DVS_SOURCE_KEY | DVS_DEST_KEY);
if (key->flags & I915_SET_COLORKEY_DESTINATION)
dvscntr |= DVS_DEST_KEY;
else if (key->flags & I915_SET_COLORKEY_SOURCE)
dvscntr |= DVS_SOURCE_KEY;
I915_WRITE(DVSCNTR(intel_plane->pipe), dvscntr);
 
POSTING_READ(DVSKEYMSK(intel_plane->pipe));
 
return ret;
}
 
static void
snb_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
{
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane;
u32 dvscntr;
 
intel_plane = to_intel_plane(plane);
 
key->min_value = I915_READ(DVSKEYVAL(intel_plane->pipe));
key->max_value = I915_READ(DVSKEYMAX(intel_plane->pipe));
key->channel_mask = I915_READ(DVSKEYMSK(intel_plane->pipe));
key->flags = 0;
 
dvscntr = I915_READ(DVSCNTR(intel_plane->pipe));
 
if (dvscntr & DVS_DEST_KEY)
key->flags = I915_SET_COLORKEY_DESTINATION;
else if (dvscntr & DVS_SOURCE_KEY)
key->flags = I915_SET_COLORKEY_SOURCE;
else
key->flags = I915_SET_COLORKEY_NONE;
}
 
static int
intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb, int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h)
{
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_plane *intel_plane = to_intel_plane(plane);
struct intel_framebuffer *intel_fb;
struct drm_i915_gem_object *obj, *old_obj;
int pipe = intel_plane->pipe;
int ret = 0;
int x = src_x >> 16, y = src_y >> 16;
int primary_w = crtc->mode.hdisplay, primary_h = crtc->mode.vdisplay;
bool disable_primary = false;
 
intel_fb = to_intel_framebuffer(fb);
obj = intel_fb->obj;
 
old_obj = intel_plane->obj;
 
/* Pipe must be running... */
if (!(I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE))
return -EINVAL;
 
if (crtc_x >= primary_w || crtc_y >= primary_h)
return -EINVAL;
 
/* Don't modify another pipe's plane */
if (intel_plane->pipe != intel_crtc->pipe)
return -EINVAL;
 
/*
* Clamp the width & height into the visible area. Note we don't
* try to scale the source if part of the visible region is offscreen.
* The caller must handle that by adjusting source offset and size.
*/
if ((crtc_x < 0) && ((crtc_x + crtc_w) > 0)) {
crtc_w += crtc_x;
crtc_x = 0;
}
if ((crtc_x + crtc_w) <= 0) /* Nothing to display */
goto out;
if ((crtc_x + crtc_w) > primary_w)
crtc_w = primary_w - crtc_x;
 
if ((crtc_y < 0) && ((crtc_y + crtc_h) > 0)) {
crtc_h += crtc_y;
crtc_y = 0;
}
if ((crtc_y + crtc_h) <= 0) /* Nothing to display */
goto out;
if (crtc_y + crtc_h > primary_h)
crtc_h = primary_h - crtc_y;
 
if (!crtc_w || !crtc_h) /* Again, nothing to display */
goto out;
 
/*
* We can take a larger source and scale it down, but
* only so much... 16x is the max on SNB.
*/
if (((src_w * src_h) / (crtc_w * crtc_h)) > intel_plane->max_downscale)
return -EINVAL;
 
/*
* If the sprite is completely covering the primary plane,
* we can disable the primary and save power.
*/
if ((crtc_x == 0) && (crtc_y == 0) &&
(crtc_w == primary_w) && (crtc_h == primary_h))
disable_primary = true;
 
mutex_lock(&dev->struct_mutex);
 
ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
if (ret)
goto out_unlock;
 
intel_plane->obj = obj;
 
/*
* Be sure to re-enable the primary before the sprite is no longer
* covering it fully.
*/
if (!disable_primary && intel_plane->primary_disabled) {
intel_enable_primary(crtc);
intel_plane->primary_disabled = false;
}
 
intel_plane->update_plane(plane, fb, obj, crtc_x, crtc_y,
crtc_w, crtc_h, x, y, src_w, src_h);
 
if (disable_primary) {
intel_disable_primary(crtc);
intel_plane->primary_disabled = true;
}
 
/* Unpin old obj after new one is active to avoid ugliness */
if (old_obj) {
/*
* It's fairly common to simply update the position of
* an existing object. In that case, we don't need to
* wait for vblank to avoid ugliness, we only need to
* do the pin & ref bookkeeping.
*/
if (old_obj != obj) {
mutex_unlock(&dev->struct_mutex);
intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe);
mutex_lock(&dev->struct_mutex);
}
// i915_gem_object_unpin(old_obj);
}
 
out_unlock:
mutex_unlock(&dev->struct_mutex);
out:
return ret;
}
 
static int
intel_disable_plane(struct drm_plane *plane)
{
struct drm_device *dev = plane->dev;
struct intel_plane *intel_plane = to_intel_plane(plane);
int ret = 0;
 
if (intel_plane->primary_disabled) {
intel_enable_primary(plane->crtc);
intel_plane->primary_disabled = false;
}
 
intel_plane->disable_plane(plane);
 
if (!intel_plane->obj)
goto out;
 
mutex_lock(&dev->struct_mutex);
// i915_gem_object_unpin(intel_plane->obj);
intel_plane->obj = NULL;
mutex_unlock(&dev->struct_mutex);
out:
 
return ret;
}
 
static void intel_destroy_plane(struct drm_plane *plane)
{
struct intel_plane *intel_plane = to_intel_plane(plane);
intel_disable_plane(plane);
drm_plane_cleanup(plane);
kfree(intel_plane);
}
 
int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_intel_sprite_colorkey *set = data;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_mode_object *obj;
struct drm_plane *plane;
struct intel_plane *intel_plane;
int ret = 0;
 
if (!dev_priv)
return -EINVAL;
 
/* Make sure we don't try to enable both src & dest simultaneously */
if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
return -EINVAL;
 
mutex_lock(&dev->mode_config.mutex);
 
obj = drm_mode_object_find(dev, set->plane_id, DRM_MODE_OBJECT_PLANE);
if (!obj) {
ret = -EINVAL;
goto out_unlock;
}
 
plane = obj_to_plane(obj);
intel_plane = to_intel_plane(plane);
ret = intel_plane->update_colorkey(plane, set);
 
out_unlock:
mutex_unlock(&dev->mode_config.mutex);
return ret;
}
 
int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_intel_sprite_colorkey *get = data;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_mode_object *obj;
struct drm_plane *plane;
struct intel_plane *intel_plane;
int ret = 0;
 
if (!dev_priv)
return -EINVAL;
 
mutex_lock(&dev->mode_config.mutex);
 
obj = drm_mode_object_find(dev, get->plane_id, DRM_MODE_OBJECT_PLANE);
if (!obj) {
ret = -EINVAL;
goto out_unlock;
}
 
plane = obj_to_plane(obj);
intel_plane = to_intel_plane(plane);
intel_plane->get_colorkey(plane, get);
 
out_unlock:
mutex_unlock(&dev->mode_config.mutex);
return ret;
}
 
static const struct drm_plane_funcs intel_plane_funcs = {
.update_plane = intel_update_plane,
.disable_plane = intel_disable_plane,
.destroy = intel_destroy_plane,
};
 
static uint32_t snb_plane_formats[] = {
DRM_FORMAT_XBGR8888,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_YUYV,
DRM_FORMAT_YVYU,
DRM_FORMAT_UYVY,
DRM_FORMAT_VYUY,
};
 
int
intel_plane_init(struct drm_device *dev, enum pipe pipe)
{
struct intel_plane *intel_plane;
unsigned long possible_crtcs;
int ret;
 
if (!(IS_GEN6(dev) || IS_GEN7(dev)))
return -ENODEV;
 
intel_plane = kzalloc(sizeof(struct intel_plane), GFP_KERNEL);
if (!intel_plane)
return -ENOMEM;
 
if (IS_GEN6(dev)) {
intel_plane->max_downscale = 16;
intel_plane->update_plane = snb_update_plane;
intel_plane->disable_plane = snb_disable_plane;
intel_plane->update_colorkey = snb_update_colorkey;
intel_plane->get_colorkey = snb_get_colorkey;
} else if (IS_GEN7(dev)) {
intel_plane->max_downscale = 2;
intel_plane->update_plane = ivb_update_plane;
intel_plane->disable_plane = ivb_disable_plane;
intel_plane->update_colorkey = ivb_update_colorkey;
intel_plane->get_colorkey = ivb_get_colorkey;
}
 
intel_plane->pipe = pipe;
possible_crtcs = (1 << pipe);
ret = drm_plane_init(dev, &intel_plane->base, possible_crtcs,
&intel_plane_funcs, snb_plane_formats,
ARRAY_SIZE(snb_plane_formats), false);
if (ret)
kfree(intel_plane);
 
return ret;
}
 
/drivers/video/drm/i915/kms_display.c
191,48 → 191,31
};
safe_sti(ifl);
 
{
#define XY_COLOR_BLT ((2<<29)|(0x50<<22)|(0x4))
#define BLT_WRITE_ALPHA (1<<21)
#define BLT_WRITE_RGB (1<<20)
 
#if 1
{
 
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
struct intel_ring_buffer *ring;
 
u32_t br13, cmd, *b;
 
int n=0;
 
cmd = XY_COLOR_BLT | BLT_WRITE_ALPHA | BLT_WRITE_RGB;
br13 = os_display->pitch;
br13 |= 0xF0 << 16;
br13 |= 3 << 24;
 
obj = i915_gem_alloc_object(dev, 4096);
i915_gem_object_pin(obj, 4096, true);
 
cmd_buffer = MapIoMem(obj->pages[0], 4096, PG_SW|PG_NOCACHE);
cmd_offset = obj->gtt_offset;
};
#endif
 
b = (u32_t*)cmd_buffer;
b[n++] = cmd;
b[n++] = br13;
b[n++] = 0; // top, left
b[n++] = (128 << 16) | 128; // bottom, right
b[n++] = 0; // dst
b[n++] = 0x0000FF00;
b[n++] = MI_BATCH_BUFFER_END;
if( n & 1)
b[n++] = MI_NOOP;
int err;
 
// cmd_buffer = (u32_t)&b[n];
// i915_gem_object_set_to_gtt_domain(obj, false);
 
 
ring = &dev_priv->ring[BCS];
ring->dispatch_execbuffer(ring,cmd_offset, n*4);
 
err = init_bitmaps();
if( !err )
{
printf("Initialize bitmap manager\n");
};
 
LEAVE();
303,7 → 286,11
 
fb->width = reqmode->width;
fb->height = reqmode->height;
fb->pitch = ALIGN(reqmode->width * 4, 64);
fb->pitches[0] = ALIGN(reqmode->width * 4, 64);
fb->pitches[1] = ALIGN(reqmode->width * 4, 64);
fb->pitches[2] = ALIGN(reqmode->width * 4, 64);
fb->pitches[3] = ALIGN(reqmode->width * 4, 64);
 
fb->bits_per_pixel = 32;
fb->depth == 24;
 
320,13 → 307,13
{
os_display->width = fb->width;
os_display->height = fb->height;
os_display->pitch = fb->pitch;
os_display->pitch = fb->pitches[0];
os_display->vrefresh = drm_mode_vrefresh(mode);
 
sysSetScreen(fb->width, fb->height, fb->pitch);
sysSetScreen(fb->width, fb->height, fb->pitches[0]);
 
dbgprintf("new mode %d x %d pitch %d\n",
fb->width, fb->height, fb->pitch);
fb->width, fb->height, fb->pitches[0]);
}
else
DRM_ERROR("failed to set mode %d_%d on crtc %p\n",
406,6 → 393,8
 
void __attribute__((regparm(1))) destroy_cursor(cursor_t *cursor)
{
/* FIXME synchronization */
 
list_del(&cursor->list);
// radeon_bo_unpin(cursor->robj);
// KernelFree(cursor->data);
592,113 → 581,227
 
#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6)
 
int video_blit(uint64_t src_offset, int x, int y,
int w, int h, int pitch)
 
typedef struct
{
int left;
int top;
int right;
int bottom;
}rect_t;
 
 
#include "clip.inc"
 
void FASTCALL GetWindowRect(rect_t *rc)__asm__("GetWindowRect");
 
#define CURRENT_TASK (0x80003000)
 
static u32_t get_display_map()
{
u32_t addr;
 
addr = (u32_t)os_display;
addr+= sizeof(display_t); /* shoot me */
return *(u32_t*)addr;
}
 
#define XY_SRC_COPY_CHROMA_CMD ((2<<29)|(0x73<<22)|8)
#define ROP_COPY_SRC 0xCC
#define FORMAT8888 3
 
typedef int v4si __attribute__ ((vector_size (16)));
 
 
int blit_video(u32 hbitmap, int dst_x, int dst_y,
int src_x, int src_y, u32 w, u32 h)
{
drm_i915_private_t *dev_priv = main_device->dev_private;
struct intel_ring_buffer *ring;
 
u32_t br13, cmd, *b;
bitmap_t *bitmap;
rect_t winrc;
clip_t dst_clip;
clip_t src_clip;
u32_t width;
u32_t height;
 
u32_t br13, cmd, slot_mask, *b;
u32_t offset;
 
u8 slot;
int n=0;
 
// if( cmd_buffer & 0xF80 )
// cmd_buffer&= 0xFFFFF000;
if(unlikely(hbitmap==0))
return -1;
 
// b = (u32_t*)ALIGN(cmd_buffer,16);
bitmap = (bitmap_t*)hman_get_data(&bm_man, hbitmap);
 
// offset = cmd_offset + ((u32_t)b & 0xFFF);
if(unlikely(bitmap==NULL))
return -1;
 
b = cmd_buffer;
 
cmd = XY_SRC_COPY_BLT_CMD | BLT_WRITE_RGB;
br13 = os_display->pitch;
br13 |= 0xCC << 16;
br13 |= 3 << 24;
GetWindowRect(&winrc);
 
b[n++] = cmd;
b[n++] = br13;
b[n++] = (y << 16) | x;
b[n++] = ( (y+h) << 16) | (x+w); // bottom, right
b[n++] = 0; // dst_offset
b[n++] = 0; //src_top|src_left
dst_clip.xmin = 0;
dst_clip.ymin = 0;
dst_clip.xmax = winrc.right-winrc.left-1;
dst_clip.ymax = winrc.bottom -winrc.top -1;
 
b[n++] = pitch;
b[n++] = (u32_t)src_offset;
src_clip.xmin = 0;
src_clip.ymin = 0;
src_clip.xmax = bitmap->width - 1;
src_clip.ymax = bitmap->height - 1;
 
b[n++] = MI_BATCH_BUFFER_END;
if( n & 1)
b[n++] = MI_NOOP;
width = w;
height = h;
 
// i915_gem_object_set_to_gtt_domain(obj, false);
if( blit_clip(&dst_clip, &dst_x, &dst_y,
&src_clip, &src_x, &src_y,
&width, &height) )
return 0;
 
ring = &dev_priv->ring[BCS];
ring->dispatch_execbuffer(ring, cmd_offset, n*4);
dst_x+= winrc.left;
dst_y+= winrc.top;
 
intel_ring_begin(ring, 4);
// if (ret)
// return ret;
slot = *((u8*)CURRENT_TASK);
 
// cmd = MI_FLUSH_DW;
// if (invalidate & I915_GEM_GPU_DOMAINS)
// cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
intel_ring_emit(ring, MI_FLUSH_DW);
intel_ring_emit(ring, 0);
intel_ring_emit(ring, 0);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
slot_mask = (u32_t)slot<<24;
 
{
#if 0
static v4si write_mask = {0xFF000000, 0xFF000000,
0xFF000000, 0xFF000000};
 
fail:
return -1;
u8* src_offset;
u8* dst_offset;
 
src_offset = (u8*)(src_y*bitmap->pitch + src_x*4);
src_offset += (u32)bitmap->uaddr;
 
dst_offset = (u8*)(dst_y*os_display->width + dst_x);
dst_offset+= get_display_map();
 
u32_t tmp_h = height;
 
__asm__ __volatile__ (
"movdqa %[write_mask], %%xmm7 \n"
"movd %[slot_mask], %%xmm6 \n"
"punpckldq %%xmm6, %%xmm6 \n"
"punpcklqdq %%xmm6, %%xmm6 \n"
:: [write_mask] "m" (write_mask),
[slot_mask] "g" (slot_mask)
:"xmm7", "xmm6");
 
while( tmp_h--)
{
u32_t tmp_w = width;
 
u8* tmp_src = src_offset;
u8* tmp_dst = dst_offset;
 
src_offset+= bitmap->pitch;
dst_offset+= os_display->width;
 
while( tmp_w >= 8 )
{
__asm__ __volatile__ (
"movq (%0), %%xmm0 \n"
"punpcklbw %%xmm0, %%xmm0 \n"
"movdqa %%xmm0, %%xmm1 \n"
"punpcklwd %%xmm0, %%xmm0 \n"
"punpckhwd %%xmm1, %%xmm1 \n"
"pcmpeqb %%xmm6, %%xmm0 \n"
"pcmpeqb %%xmm6, %%xmm1 \n"
"maskmovdqu %%xmm7, %%xmm0 \n"
"addl $16, %%edi \n"
"maskmovdqu %%xmm7, %%xmm1 \n"
:: "r" (tmp_dst), "D" (tmp_src)
:"xmm0", "xmm1");
__asm__ __volatile__ ("":::"edi");
tmp_w -= 8;
tmp_src += 32;
tmp_dst += 8;
};
 
if( tmp_w >= 4 )
{
__asm__ __volatile__ (
"movd (%0), %%xmm0 \n"
"punpcklbw %%xmm0, %%xmm0 \n"
"punpcklwd %%xmm0, %%xmm0 \n"
"pcmpeqb %%xmm6, %%xmm0 \n"
"maskmovdqu %%xmm7, %%xmm0 \n"
:: "r" (tmp_dst), "D" (tmp_src)
:"xmm0");
tmp_w -= 4;
tmp_src += 16;
tmp_dst += 4;
};
 
int blit_video(u32 hbitmap, int dst_x, int dst_y,
int src_x, int src_y, u32 w, u32 h)
while( tmp_w--)
{
drm_i915_private_t *dev_priv = main_device->dev_private;
struct intel_ring_buffer *ring;
*(tmp_src+3) = (*tmp_dst==slot)?0xFF:0x00;
tmp_src+=4;
tmp_dst++;
};
};
#else
u8* src_offset;
u8* dst_offset;
 
bitmap_t *bitmap;
u32_t br13, cmd, *b;
u32_t offset;
src_offset = (u8*)(src_y*bitmap->pitch + src_x*4);
src_offset += (u32)bitmap->uaddr;
 
int n=0;
dst_offset = (u8*)(dst_y*os_display->width + dst_x);
dst_offset+= get_display_map();
 
if(unlikely(hbitmap==0))
return -1;
u32_t tmp_h = height;
 
bitmap = hman_get_data(&bm_man, hbitmap);
while( tmp_h--)
{
u32_t tmp_w = width;
 
if(unlikely(bitmap==NULL))
return -1;
u8* tmp_src = src_offset;
u8* tmp_dst = dst_offset;
 
// if( cmd_buffer & 0xF80 )
// cmd_buffer&= 0xFFFFF000;
src_offset+= bitmap->pitch;
dst_offset+= os_display->width;
 
// b = (u32_t*)ALIGN(cmd_buffer,16);
while( tmp_w--)
{
*(tmp_src+3) = (*tmp_dst==slot)?0xFF:0x00;
tmp_src+=4;
tmp_dst++;
};
};
}
#endif
 
// offset = cmd_offset + ((u32_t)b & 0xFFF);
if((cmd_buffer & 0xFC0)==0xFC0)
cmd_buffer&= 0xFFFFF000;
 
b = cmd_buffer;
b = (u32_t*)ALIGN(cmd_buffer,16);
 
cmd = XY_SRC_COPY_BLT_CMD | BLT_WRITE_RGB;
offset = cmd_offset + ((u32_t)b & 0xFFF);
 
cmd = XY_SRC_COPY_CHROMA_CMD | BLT_WRITE_RGB | BLT_WRITE_ALPHA;
cmd |= 3 << 17;
 
br13 = os_display->pitch;
br13 |= 0xCC << 16;
br13 |= 3 << 24;
br13|= ROP_COPY_SRC << 16;
br13|= FORMAT8888 << 24;
 
b[n++] = cmd;
b[n++] = br13;
b[n++] = (dst_y << 16) | dst_x;
b[n++] = ( (dst_y+h) << 16) | (dst_x+w); // bottom, right
b[n++] = 0; // dst_offset
b[n++] = (src_y << 16) | src_x;
b[n++] = (dst_y << 16) | dst_x; // left, top
b[n++] = ((dst_y+height-1)<< 16)|(dst_x+width-1); // bottom, right
b[n++] = 0; // destination
b[n++] = (src_y << 16) | src_x; // source left & top
b[n++] = bitmap->pitch; // source pitch
b[n++] = bitmap->gaddr; // source
 
b[n++] = bitmap->pitch;
b[n++] = bitmap->gaddr;
b[n++] = 0; // Transparency Color Low
b[n++] = 0x00FFFFFF; // Transparency Color High
 
b[n++] = MI_BATCH_BUFFER_END;
if( n & 1)
706,7 → 809,11
 
// i915_gem_object_set_to_gtt_domain(obj, false);
 
if (HAS_BLT(main_device))
ring = &dev_priv->ring[BCS];
else
ring = &dev_priv->ring[RCS];
 
ring->dispatch_execbuffer(ring, cmd_offset, n*4);
 
intel_ring_begin(ring, 4);
/drivers/video/drm/i915/main.c
12,7 → 12,7
#include <linux/pci.h>
#include <syscall.h>
 
typedef struct bitmap bitmap_t;
#include "bitmap.h"
 
void parse_cmdline(char *cmdline, char *log);
int _stdcall display_handler(ioctl_t *io);
19,7 → 19,6
int init_agp(void);
 
int create_video(int width, int height, u32_t *outp);
int create_bitmap(bitmap_t **pbitmap, int width, int height);
int video_blit(uint64_t src_offset, int x, int y,
int w, int h, int pitch);
 
52,7 → 51,7
return 0;
};
}
dbgprintf("i915_early_preview second edition\n cmdline: %s\n", cmdline);
dbgprintf("i915 blitter preview\n cmdline: %s\n", cmdline);
 
enum_pci_devices();
 
78,10 → 77,10
#define SRV_ENUM_MODES 1
#define SRV_SET_MODE 2
 
#define SRV_CREATE_VIDEO 9
#define SRV_BLIT_VIDEO 10
#define SRV_CREATE_BITMAP 11
#define SRV_CREATE_BITMAP 10
 
#define SRV_BLIT_VIDEO 20
 
#define check_input(size) \
if( unlikely((inp==NULL)||(io->inp_size != (size))) ) \
break;
124,25 → 123,18
retval = set_user_mode((videomode_t*)inp);
break;
 
case SRV_CREATE_VIDEO:
check_input(2);
check_output(4);
retval = create_video(inp[0], inp[1], outp);
case SRV_CREATE_BITMAP:
check_input(5);
retval = create_bitmap((struct ubitmap*)inp);
break;
 
 
case SRV_BLIT_VIDEO:
video_blit( ((uint64_t*)inp)[0], inp[2], inp[3],
inp[4], inp[5], inp[6]);
blit_video( inp[0], inp[1], inp[2],
inp[3], inp[4], inp[5], inp[6]);
 
retval = 0;
break;
 
case SRV_CREATE_BITMAP:
check_input(8);
check_output(4);
retval = create_bitmap((bitmap_t**)outp, inp[0], inp[1]);
break;
 
};
 
return retval;