Subversion Repositories Kolibri OS

Compare Revisions

Regard whitespace Rev 5353 → Rev 5354

/drivers/video/drm/i915/Gtt/agp.h
29,16 → 29,8
#ifndef _AGP_BACKEND_PRIV_H
#define _AGP_BACKEND_PRIV_H 1
 
//#include <asm/agp.h> /* for flush_agp_cache() */
#include <asm/agp.h> /* for flush_agp_cache() */
 
 
enum chipset_type {
NOT_SUPPORTED,
SUPPORTED,
};
 
struct agp_memory;
 
#define PFX "agpgart: "
 
//#define AGP_DEBUG 1
/drivers/video/drm/i915/Gtt/intel-agp.c
2,19 → 2,21
* Intel AGPGART routines.
*/
 
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <errno-base.h>
#include <linux/gfp.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/agp_backend.h>
#include "agp.h"
#include "intel-agp.h"
#include <drm/intel-gtt.h>
 
//#include <linux/agp_backend.h>
//#include <asm/smp.h>
#include <linux/spinlock.h>
 
#include "agp.h"
#include "intel-agp.h"
 
 
#include <syscall.h>
 
#define __devinit
/drivers/video/drm/i915/Gtt/intel-gtt.c
18,15 → 18,14
#include <syscall.h>
 
#include <linux/module.h>
#include <errno-base.h>
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/agp_backend.h>
#include <linux/delay.h>
#
#include <linux/export.h>
#include <linux/scatterlist.h>
 
//#include <linux/pagemap.h>
//#include <linux/agp_backend.h>
//#include <asm/smp.h>
#include <linux/spinlock.h>
#include "agp.h"
#include "intel-agp.h"
/drivers/video/drm/i915/Makefile
1,16 → 1,20
 
 
CC = gcc.exe
CC = gcc
FASM = e:/fasm/fasm.exe
 
DEFINES = -D__KERNEL__ -DCONFIG_X86_32 -DCONFIG_DRM_I915_FBDEV -DCONFIG_DMI
DEFINES = -D__KERNEL__ -DCONFIG_X86_32 -DCONFIG_TINY_RCU -DCONFIG_X86_L1_CACHE_SHIFT=6
DEFINES += -DCONFIG_ARCH_HAS_CACHE_LINE_SIZE -DCONFIG_DRM_I915_FBDEV -DCONFIG_DMI -DKBUILD_MODNAME=\"i915.dll\"
 
 
DDK_TOPDIR = /d/kos/kolibri/drivers/ddk
DRV_INCLUDES = /d/kos/kolibri/drivers/include
DRM_TOPDIR = $(CURDIR)/..
 
INCLUDES = -I$(DRV_INCLUDES) -I$(DRV_INCLUDES)/linux \
-I$(DRV_INCLUDES)/linux/asm -I$(DRV_INCLUDES)/linux/uapi -I./
INCLUDES = -I$(DRV_INCLUDES) \
-I$(DRV_INCLUDES)/asm \
-I$(DRV_INCLUDES)/uapi \
-I$(DRV_INCLUDES)/drm -I./ -I$(DRV_INCLUDES)
 
CFLAGS= -c -O2 $(INCLUDES) $(DEFINES) -march=i686 -msse2 -fomit-frame-pointer -fno-ident -fno-builtin-printf
CFLAGS+= -mno-stack-arg-probe -mpreferred-stack-boundary=2 -mincoming-stack-boundary=2 -mno-ms-bitfields
62,6 → 66,7
i915_gpu_error.c \
i915_irq.c \
i915_params.c \
intel_audio.c \
intel_bios.c \
intel_crt.c \
intel_ddi.c \
74,16 → 79,22
intel_dsi_pll.c \
intel_dvo.c \
intel_fbdev.c \
intel_fifo_underrun.c \
intel_frontbuffer.c \
intel_hdmi.c \
intel_i2c.c \
intel_lrc.c \
intel_lvds.c \
intel_modes.c \
intel_panel.c \
intel_pm.c \
intel_psr.c \
intel_renderstate_gen6.c \
intel_renderstate_gen7.c \
intel_renderstate_gen8.c \
intel_renderstate_gen9.c \
intel_ringbuffer.c \
intel_runtime_pm.c \
intel_sdvo.c \
intel_sideband.c \
intel_sprite.c \
104,6 → 115,7
$(DRM_TOPDIR)/drm_crtc_helper.c \
$(DRM_TOPDIR)/drm_dp_helper.c \
../drm_dp_mst_topology.c \
$(DRM_TOPDIR)/drm_atomic.c \
$(DRM_TOPDIR)/drm_edid.c \
$(DRM_TOPDIR)/drm_fb_helper.c \
$(DRM_TOPDIR)/drm_gem.c \
/drivers/video/drm/i915/Makefile.lto
2,14 → 2,18
CC = gcc
FASM = e:/fasm/fasm.exe
 
DEFINES = -D__KERNEL__ -DCONFIG_X86_32 -DCONFIG_DRM_I915_FBDEV -DCONFIG_DMI
DEFINES = -D__KERNEL__ -DCONFIG_X86_32 -DCONFIG_TINY_RCU -DCONFIG_X86_L1_CACHE_SHIFT=6
DEFINES += -DCONFIG_ARCH_HAS_CACHE_LINE_SIZE -DCONFIG_DRM_I915_FBDEV -DCONFIG_DMI -DKBUILD_MODNAME=\"i915.dll\"
 
 
DDK_TOPDIR = /d/kos/kolibri/drivers/ddk
DRV_INCLUDES = /d/kos/kolibri/drivers/include
DRM_TOPDIR = $(CURDIR)/..
 
INCLUDES = -I$(DRV_INCLUDES) -I$(DRV_INCLUDES)/linux \
-I$(DRV_INCLUDES)/linux/asm -I$(DRV_INCLUDES)/linux/uapi -I./
INCLUDES = -I$(DRV_INCLUDES) \
-I$(DRV_INCLUDES)/asm \
-I$(DRV_INCLUDES)/uapi \
-I$(DRV_INCLUDES)/drm -I./ -I$(DRV_INCLUDES)
 
CFLAGS_OPT = -Os -march=i686 -msse2 -fomit-frame-pointer -fno-builtin-printf -fno-ident -mno-stack-arg-probe
CFLAGS_OPT+= -mpreferred-stack-boundary=2 -mincoming-stack-boundary=2 -mno-ms-bitfields -flto
60,6 → 64,7
i915_gpu_error.c \
i915_irq.c \
i915_params.c \
intel_audio.c \
intel_bios.c \
intel_crt.c \
intel_ddi.c \
72,16 → 77,22
intel_dsi_pll.c \
intel_dvo.c \
intel_fbdev.c \
intel_fifo_underrun.c \
intel_frontbuffer.c \
intel_hdmi.c \
intel_i2c.c \
intel_lrc.c \
intel_lvds.c \
intel_modes.c \
intel_panel.c \
intel_pm.c \
intel_psr.c \
intel_renderstate_gen6.c \
intel_renderstate_gen7.c \
intel_renderstate_gen8.c \
intel_renderstate_gen9.c \
intel_ringbuffer.c \
intel_runtime_pm.c \
intel_sdvo.c \
intel_sideband.c \
intel_sprite.c \
102,6 → 113,7
$(DRM_TOPDIR)/drm_crtc_helper.c \
$(DRM_TOPDIR)/drm_dp_helper.c \
../drm_dp_mst_topology.c \
$(DRM_TOPDIR)/drm_atomic.c \
$(DRM_TOPDIR)/drm_edid.c \
$(DRM_TOPDIR)/drm_fb_helper.c \
$(DRM_TOPDIR)/drm_gem.c \
/drivers/video/drm/i915/dvo_ns2501.c
60,16 → 60,297
 
#define NS2501_REGC 0x0c
 
enum {
MODE_640x480,
MODE_800x600,
MODE_1024x768,
};
 
struct ns2501_reg {
uint8_t offset;
uint8_t value;
};
 
/*
* Magic values based on what the BIOS on
* Fujitsu-Siemens Lifebook S6010 programs (1024x768 panel).
*/
static const struct ns2501_reg regs_1024x768[][86] = {
[MODE_640x480] = {
[0] = { .offset = 0x0a, .value = 0x81, },
[1] = { .offset = 0x18, .value = 0x07, },
[2] = { .offset = 0x19, .value = 0x00, },
[3] = { .offset = 0x1a, .value = 0x00, },
[4] = { .offset = 0x1b, .value = 0x11, },
[5] = { .offset = 0x1c, .value = 0x54, },
[6] = { .offset = 0x1d, .value = 0x03, },
[7] = { .offset = 0x1e, .value = 0x02, },
[8] = { .offset = 0xf3, .value = 0x90, },
[9] = { .offset = 0xf9, .value = 0x00, },
[10] = { .offset = 0xc1, .value = 0x90, },
[11] = { .offset = 0xc2, .value = 0x00, },
[12] = { .offset = 0xc3, .value = 0x0f, },
[13] = { .offset = 0xc4, .value = 0x03, },
[14] = { .offset = 0xc5, .value = 0x16, },
[15] = { .offset = 0xc6, .value = 0x00, },
[16] = { .offset = 0xc7, .value = 0x02, },
[17] = { .offset = 0xc8, .value = 0x02, },
[18] = { .offset = 0xf4, .value = 0x00, },
[19] = { .offset = 0x80, .value = 0xff, },
[20] = { .offset = 0x81, .value = 0x07, },
[21] = { .offset = 0x82, .value = 0x3d, },
[22] = { .offset = 0x83, .value = 0x05, },
[23] = { .offset = 0x94, .value = 0x00, },
[24] = { .offset = 0x95, .value = 0x00, },
[25] = { .offset = 0x96, .value = 0x05, },
[26] = { .offset = 0x97, .value = 0x00, },
[27] = { .offset = 0x9a, .value = 0x88, },
[28] = { .offset = 0x9b, .value = 0x00, },
[29] = { .offset = 0x98, .value = 0x00, },
[30] = { .offset = 0x99, .value = 0x00, },
[31] = { .offset = 0xf7, .value = 0x88, },
[32] = { .offset = 0xf8, .value = 0x0a, },
[33] = { .offset = 0x9c, .value = 0x24, },
[34] = { .offset = 0x9d, .value = 0x00, },
[35] = { .offset = 0x9e, .value = 0x25, },
[36] = { .offset = 0x9f, .value = 0x03, },
[37] = { .offset = 0xa0, .value = 0x28, },
[38] = { .offset = 0xa1, .value = 0x01, },
[39] = { .offset = 0xa2, .value = 0x28, },
[40] = { .offset = 0xa3, .value = 0x05, },
[41] = { .offset = 0xb6, .value = 0x09, },
[42] = { .offset = 0xb8, .value = 0x00, },
[43] = { .offset = 0xb9, .value = 0xa0, },
[44] = { .offset = 0xba, .value = 0x00, },
[45] = { .offset = 0xbb, .value = 0x20, },
[46] = { .offset = 0x10, .value = 0x00, },
[47] = { .offset = 0x11, .value = 0xa0, },
[48] = { .offset = 0x12, .value = 0x02, },
[49] = { .offset = 0x20, .value = 0x00, },
[50] = { .offset = 0x22, .value = 0x00, },
[51] = { .offset = 0x23, .value = 0x00, },
[52] = { .offset = 0x24, .value = 0x00, },
[53] = { .offset = 0x25, .value = 0x00, },
[54] = { .offset = 0x8c, .value = 0x10, },
[55] = { .offset = 0x8d, .value = 0x02, },
[56] = { .offset = 0x8e, .value = 0x10, },
[57] = { .offset = 0x8f, .value = 0x00, },
[58] = { .offset = 0x90, .value = 0xff, },
[59] = { .offset = 0x91, .value = 0x07, },
[60] = { .offset = 0x92, .value = 0xa0, },
[61] = { .offset = 0x93, .value = 0x02, },
[62] = { .offset = 0xa5, .value = 0x00, },
[63] = { .offset = 0xa6, .value = 0x00, },
[64] = { .offset = 0xa7, .value = 0x00, },
[65] = { .offset = 0xa8, .value = 0x00, },
[66] = { .offset = 0xa9, .value = 0x04, },
[67] = { .offset = 0xaa, .value = 0x70, },
[68] = { .offset = 0xab, .value = 0x4f, },
[69] = { .offset = 0xac, .value = 0x00, },
[70] = { .offset = 0xa4, .value = 0x84, },
[71] = { .offset = 0x7e, .value = 0x18, },
[72] = { .offset = 0x84, .value = 0x00, },
[73] = { .offset = 0x85, .value = 0x00, },
[74] = { .offset = 0x86, .value = 0x00, },
[75] = { .offset = 0x87, .value = 0x00, },
[76] = { .offset = 0x88, .value = 0x00, },
[77] = { .offset = 0x89, .value = 0x00, },
[78] = { .offset = 0x8a, .value = 0x00, },
[79] = { .offset = 0x8b, .value = 0x00, },
[80] = { .offset = 0x26, .value = 0x00, },
[81] = { .offset = 0x27, .value = 0x00, },
[82] = { .offset = 0xad, .value = 0x00, },
[83] = { .offset = 0x08, .value = 0x30, }, /* 0x31 */
[84] = { .offset = 0x41, .value = 0x00, },
[85] = { .offset = 0xc0, .value = 0x05, },
},
[MODE_800x600] = {
[0] = { .offset = 0x0a, .value = 0x81, },
[1] = { .offset = 0x18, .value = 0x07, },
[2] = { .offset = 0x19, .value = 0x00, },
[3] = { .offset = 0x1a, .value = 0x00, },
[4] = { .offset = 0x1b, .value = 0x19, },
[5] = { .offset = 0x1c, .value = 0x64, },
[6] = { .offset = 0x1d, .value = 0x02, },
[7] = { .offset = 0x1e, .value = 0x02, },
[8] = { .offset = 0xf3, .value = 0x90, },
[9] = { .offset = 0xf9, .value = 0x00, },
[10] = { .offset = 0xc1, .value = 0xd7, },
[11] = { .offset = 0xc2, .value = 0x00, },
[12] = { .offset = 0xc3, .value = 0xf8, },
[13] = { .offset = 0xc4, .value = 0x03, },
[14] = { .offset = 0xc5, .value = 0x1a, },
[15] = { .offset = 0xc6, .value = 0x00, },
[16] = { .offset = 0xc7, .value = 0x73, },
[17] = { .offset = 0xc8, .value = 0x02, },
[18] = { .offset = 0xf4, .value = 0x00, },
[19] = { .offset = 0x80, .value = 0x27, },
[20] = { .offset = 0x81, .value = 0x03, },
[21] = { .offset = 0x82, .value = 0x41, },
[22] = { .offset = 0x83, .value = 0x05, },
[23] = { .offset = 0x94, .value = 0x00, },
[24] = { .offset = 0x95, .value = 0x00, },
[25] = { .offset = 0x96, .value = 0x05, },
[26] = { .offset = 0x97, .value = 0x00, },
[27] = { .offset = 0x9a, .value = 0x88, },
[28] = { .offset = 0x9b, .value = 0x00, },
[29] = { .offset = 0x98, .value = 0x00, },
[30] = { .offset = 0x99, .value = 0x00, },
[31] = { .offset = 0xf7, .value = 0x88, },
[32] = { .offset = 0xf8, .value = 0x06, },
[33] = { .offset = 0x9c, .value = 0x23, },
[34] = { .offset = 0x9d, .value = 0x00, },
[35] = { .offset = 0x9e, .value = 0x25, },
[36] = { .offset = 0x9f, .value = 0x03, },
[37] = { .offset = 0xa0, .value = 0x28, },
[38] = { .offset = 0xa1, .value = 0x01, },
[39] = { .offset = 0xa2, .value = 0x28, },
[40] = { .offset = 0xa3, .value = 0x05, },
[41] = { .offset = 0xb6, .value = 0x09, },
[42] = { .offset = 0xb8, .value = 0x30, },
[43] = { .offset = 0xb9, .value = 0xc8, },
[44] = { .offset = 0xba, .value = 0x00, },
[45] = { .offset = 0xbb, .value = 0x20, },
[46] = { .offset = 0x10, .value = 0x20, },
[47] = { .offset = 0x11, .value = 0xc8, },
[48] = { .offset = 0x12, .value = 0x02, },
[49] = { .offset = 0x20, .value = 0x00, },
[50] = { .offset = 0x22, .value = 0x00, },
[51] = { .offset = 0x23, .value = 0x00, },
[52] = { .offset = 0x24, .value = 0x00, },
[53] = { .offset = 0x25, .value = 0x00, },
[54] = { .offset = 0x8c, .value = 0x10, },
[55] = { .offset = 0x8d, .value = 0x02, },
[56] = { .offset = 0x8e, .value = 0x04, },
[57] = { .offset = 0x8f, .value = 0x00, },
[58] = { .offset = 0x90, .value = 0xff, },
[59] = { .offset = 0x91, .value = 0x07, },
[60] = { .offset = 0x92, .value = 0xa0, },
[61] = { .offset = 0x93, .value = 0x02, },
[62] = { .offset = 0xa5, .value = 0x00, },
[63] = { .offset = 0xa6, .value = 0x00, },
[64] = { .offset = 0xa7, .value = 0x00, },
[65] = { .offset = 0xa8, .value = 0x00, },
[66] = { .offset = 0xa9, .value = 0x83, },
[67] = { .offset = 0xaa, .value = 0x40, },
[68] = { .offset = 0xab, .value = 0x32, },
[69] = { .offset = 0xac, .value = 0x00, },
[70] = { .offset = 0xa4, .value = 0x80, },
[71] = { .offset = 0x7e, .value = 0x18, },
[72] = { .offset = 0x84, .value = 0x00, },
[73] = { .offset = 0x85, .value = 0x00, },
[74] = { .offset = 0x86, .value = 0x00, },
[75] = { .offset = 0x87, .value = 0x00, },
[76] = { .offset = 0x88, .value = 0x00, },
[77] = { .offset = 0x89, .value = 0x00, },
[78] = { .offset = 0x8a, .value = 0x00, },
[79] = { .offset = 0x8b, .value = 0x00, },
[80] = { .offset = 0x26, .value = 0x00, },
[81] = { .offset = 0x27, .value = 0x00, },
[82] = { .offset = 0xad, .value = 0x00, },
[83] = { .offset = 0x08, .value = 0x30, }, /* 0x31 */
[84] = { .offset = 0x41, .value = 0x00, },
[85] = { .offset = 0xc0, .value = 0x07, },
},
[MODE_1024x768] = {
[0] = { .offset = 0x0a, .value = 0x81, },
[1] = { .offset = 0x18, .value = 0x07, },
[2] = { .offset = 0x19, .value = 0x00, },
[3] = { .offset = 0x1a, .value = 0x00, },
[4] = { .offset = 0x1b, .value = 0x11, },
[5] = { .offset = 0x1c, .value = 0x54, },
[6] = { .offset = 0x1d, .value = 0x03, },
[7] = { .offset = 0x1e, .value = 0x02, },
[8] = { .offset = 0xf3, .value = 0x90, },
[9] = { .offset = 0xf9, .value = 0x00, },
[10] = { .offset = 0xc1, .value = 0x90, },
[11] = { .offset = 0xc2, .value = 0x00, },
[12] = { .offset = 0xc3, .value = 0x0f, },
[13] = { .offset = 0xc4, .value = 0x03, },
[14] = { .offset = 0xc5, .value = 0x16, },
[15] = { .offset = 0xc6, .value = 0x00, },
[16] = { .offset = 0xc7, .value = 0x02, },
[17] = { .offset = 0xc8, .value = 0x02, },
[18] = { .offset = 0xf4, .value = 0x00, },
[19] = { .offset = 0x80, .value = 0xff, },
[20] = { .offset = 0x81, .value = 0x07, },
[21] = { .offset = 0x82, .value = 0x3d, },
[22] = { .offset = 0x83, .value = 0x05, },
[23] = { .offset = 0x94, .value = 0x00, },
[24] = { .offset = 0x95, .value = 0x00, },
[25] = { .offset = 0x96, .value = 0x05, },
[26] = { .offset = 0x97, .value = 0x00, },
[27] = { .offset = 0x9a, .value = 0x88, },
[28] = { .offset = 0x9b, .value = 0x00, },
[29] = { .offset = 0x98, .value = 0x00, },
[30] = { .offset = 0x99, .value = 0x00, },
[31] = { .offset = 0xf7, .value = 0x88, },
[32] = { .offset = 0xf8, .value = 0x0a, },
[33] = { .offset = 0x9c, .value = 0x24, },
[34] = { .offset = 0x9d, .value = 0x00, },
[35] = { .offset = 0x9e, .value = 0x25, },
[36] = { .offset = 0x9f, .value = 0x03, },
[37] = { .offset = 0xa0, .value = 0x28, },
[38] = { .offset = 0xa1, .value = 0x01, },
[39] = { .offset = 0xa2, .value = 0x28, },
[40] = { .offset = 0xa3, .value = 0x05, },
[41] = { .offset = 0xb6, .value = 0x09, },
[42] = { .offset = 0xb8, .value = 0x00, },
[43] = { .offset = 0xb9, .value = 0xa0, },
[44] = { .offset = 0xba, .value = 0x00, },
[45] = { .offset = 0xbb, .value = 0x20, },
[46] = { .offset = 0x10, .value = 0x00, },
[47] = { .offset = 0x11, .value = 0xa0, },
[48] = { .offset = 0x12, .value = 0x02, },
[49] = { .offset = 0x20, .value = 0x00, },
[50] = { .offset = 0x22, .value = 0x00, },
[51] = { .offset = 0x23, .value = 0x00, },
[52] = { .offset = 0x24, .value = 0x00, },
[53] = { .offset = 0x25, .value = 0x00, },
[54] = { .offset = 0x8c, .value = 0x10, },
[55] = { .offset = 0x8d, .value = 0x02, },
[56] = { .offset = 0x8e, .value = 0x10, },
[57] = { .offset = 0x8f, .value = 0x00, },
[58] = { .offset = 0x90, .value = 0xff, },
[59] = { .offset = 0x91, .value = 0x07, },
[60] = { .offset = 0x92, .value = 0xa0, },
[61] = { .offset = 0x93, .value = 0x02, },
[62] = { .offset = 0xa5, .value = 0x00, },
[63] = { .offset = 0xa6, .value = 0x00, },
[64] = { .offset = 0xa7, .value = 0x00, },
[65] = { .offset = 0xa8, .value = 0x00, },
[66] = { .offset = 0xa9, .value = 0x04, },
[67] = { .offset = 0xaa, .value = 0x70, },
[68] = { .offset = 0xab, .value = 0x4f, },
[69] = { .offset = 0xac, .value = 0x00, },
[70] = { .offset = 0xa4, .value = 0x84, },
[71] = { .offset = 0x7e, .value = 0x18, },
[72] = { .offset = 0x84, .value = 0x00, },
[73] = { .offset = 0x85, .value = 0x00, },
[74] = { .offset = 0x86, .value = 0x00, },
[75] = { .offset = 0x87, .value = 0x00, },
[76] = { .offset = 0x88, .value = 0x00, },
[77] = { .offset = 0x89, .value = 0x00, },
[78] = { .offset = 0x8a, .value = 0x00, },
[79] = { .offset = 0x8b, .value = 0x00, },
[80] = { .offset = 0x26, .value = 0x00, },
[81] = { .offset = 0x27, .value = 0x00, },
[82] = { .offset = 0xad, .value = 0x00, },
[83] = { .offset = 0x08, .value = 0x34, }, /* 0x35 */
[84] = { .offset = 0x41, .value = 0x00, },
[85] = { .offset = 0xc0, .value = 0x01, },
},
};
 
static const struct ns2501_reg regs_init[] = {
[0] = { .offset = 0x35, .value = 0xff, },
[1] = { .offset = 0x34, .value = 0x00, },
[2] = { .offset = 0x08, .value = 0x30, },
};
 
struct ns2501_priv {
//I2CDevRec d;
bool quiet;
int reg_8_shadow;
int reg_8_set;
// Shadow registers for i915
int dvoc;
int pll_a;
int srcdim;
int fw_blc;
const struct ns2501_reg *regs;
};
 
#define NSPTR(d) ((NS2501Ptr)(d->DriverPrivate.ptr))
205,11 → 486,9
goto out;
}
ns->quiet = false;
ns->reg_8_set = 0;
ns->reg_8_shadow =
NS2501_8_PD | NS2501_8_BPAS | NS2501_8_VEN | NS2501_8_HEN;
 
DRM_DEBUG_KMS("init ns2501 dvo controller successfully!\n");
 
return true;
 
out:
242,9 → 521,9
* of the panel in here so we could always accept it
* by disabling the scaler.
*/
if ((mode->hdisplay == 800 && mode->vdisplay == 600) ||
(mode->hdisplay == 640 && mode->vdisplay == 480) ||
(mode->hdisplay == 1024 && mode->vdisplay == 768)) {
if ((mode->hdisplay == 640 && mode->vdisplay == 480 && mode->clock == 25175) ||
(mode->hdisplay == 800 && mode->vdisplay == 600 && mode->clock == 40000) ||
(mode->hdisplay == 1024 && mode->vdisplay == 768 && mode->clock == 65000)) {
return MODE_OK;
} else {
return MODE_ONE_SIZE; /* Is this a reasonable error? */
255,181 → 534,31
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
bool ok;
int retries = 10;
struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
int mode_idx, i;
 
DRM_DEBUG_KMS
("set mode (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d).\n",
mode->hdisplay, mode->htotal, mode->vdisplay, mode->vtotal);
 
/*
* Where do I find the native resolution for which scaling is not required???
*
* First trigger the DVO on as otherwise the chip does not appear on the i2c
* bus.
*/
do {
ok = true;
if (mode->hdisplay == 640 && mode->vdisplay == 480)
mode_idx = MODE_640x480;
else if (mode->hdisplay == 800 && mode->vdisplay == 600)
mode_idx = MODE_800x600;
else if (mode->hdisplay == 1024 && mode->vdisplay == 768)
mode_idx = MODE_1024x768;
else
return;
 
if (mode->hdisplay == 800 && mode->vdisplay == 600) {
/* mode 277 */
ns->reg_8_shadow &= ~NS2501_8_BPAS;
DRM_DEBUG_KMS("switching to 800x600\n");
/* Hopefully doing it every time won't hurt... */
for (i = 0; i < ARRAY_SIZE(regs_init); i++)
ns2501_writeb(dvo, regs_init[i].offset, regs_init[i].value);
 
/*
* No, I do not know where this data comes from.
* It is just what the video bios left in the DVO, so
* I'm just copying it here over.
* This also means that I cannot support any other modes
* except the ones supported by the bios.
*/
ok &= ns2501_writeb(dvo, 0x11, 0xc8); // 0xc7 also works.
ok &= ns2501_writeb(dvo, 0x1b, 0x19);
ok &= ns2501_writeb(dvo, 0x1c, 0x62); // VBIOS left 0x64 here, but 0x62 works nicer
ok &= ns2501_writeb(dvo, 0x1d, 0x02);
ns->regs = regs_1024x768[mode_idx];
 
ok &= ns2501_writeb(dvo, 0x34, 0x03);
ok &= ns2501_writeb(dvo, 0x35, 0xff);
 
ok &= ns2501_writeb(dvo, 0x80, 0x27);
ok &= ns2501_writeb(dvo, 0x81, 0x03);
ok &= ns2501_writeb(dvo, 0x82, 0x41);
ok &= ns2501_writeb(dvo, 0x83, 0x05);
 
ok &= ns2501_writeb(dvo, 0x8d, 0x02);
ok &= ns2501_writeb(dvo, 0x8e, 0x04);
ok &= ns2501_writeb(dvo, 0x8f, 0x00);
 
ok &= ns2501_writeb(dvo, 0x90, 0xfe); /* vertical. VBIOS left 0xff here, but 0xfe works better */
ok &= ns2501_writeb(dvo, 0x91, 0x07);
ok &= ns2501_writeb(dvo, 0x94, 0x00);
ok &= ns2501_writeb(dvo, 0x95, 0x00);
 
ok &= ns2501_writeb(dvo, 0x96, 0x00);
 
ok &= ns2501_writeb(dvo, 0x99, 0x00);
ok &= ns2501_writeb(dvo, 0x9a, 0x88);
 
ok &= ns2501_writeb(dvo, 0x9c, 0x23); /* Looks like first and last line of the image. */
ok &= ns2501_writeb(dvo, 0x9d, 0x00);
ok &= ns2501_writeb(dvo, 0x9e, 0x25);
ok &= ns2501_writeb(dvo, 0x9f, 0x03);
 
ok &= ns2501_writeb(dvo, 0xa4, 0x80);
 
ok &= ns2501_writeb(dvo, 0xb6, 0x00);
 
ok &= ns2501_writeb(dvo, 0xb9, 0xc8); /* horizontal? */
ok &= ns2501_writeb(dvo, 0xba, 0x00); /* horizontal? */
 
ok &= ns2501_writeb(dvo, 0xc0, 0x05); /* horizontal? */
ok &= ns2501_writeb(dvo, 0xc1, 0xd7);
 
ok &= ns2501_writeb(dvo, 0xc2, 0x00);
ok &= ns2501_writeb(dvo, 0xc3, 0xf8);
 
ok &= ns2501_writeb(dvo, 0xc4, 0x03);
ok &= ns2501_writeb(dvo, 0xc5, 0x1a);
 
ok &= ns2501_writeb(dvo, 0xc6, 0x00);
ok &= ns2501_writeb(dvo, 0xc7, 0x73);
ok &= ns2501_writeb(dvo, 0xc8, 0x02);
 
} else if (mode->hdisplay == 640 && mode->vdisplay == 480) {
/* mode 274 */
DRM_DEBUG_KMS("switching to 640x480\n");
/*
* No, I do not know where this data comes from.
* It is just what the video bios left in the DVO, so
* I'm just copying it here over.
* This also means that I cannot support any other modes
* except the ones supported by the bios.
*/
ns->reg_8_shadow &= ~NS2501_8_BPAS;
 
ok &= ns2501_writeb(dvo, 0x11, 0xa0);
ok &= ns2501_writeb(dvo, 0x1b, 0x11);
ok &= ns2501_writeb(dvo, 0x1c, 0x54);
ok &= ns2501_writeb(dvo, 0x1d, 0x03);
 
ok &= ns2501_writeb(dvo, 0x34, 0x03);
ok &= ns2501_writeb(dvo, 0x35, 0xff);
 
ok &= ns2501_writeb(dvo, 0x80, 0xff);
ok &= ns2501_writeb(dvo, 0x81, 0x07);
ok &= ns2501_writeb(dvo, 0x82, 0x3d);
ok &= ns2501_writeb(dvo, 0x83, 0x05);
 
ok &= ns2501_writeb(dvo, 0x8d, 0x02);
ok &= ns2501_writeb(dvo, 0x8e, 0x10);
ok &= ns2501_writeb(dvo, 0x8f, 0x00);
 
ok &= ns2501_writeb(dvo, 0x90, 0xff); /* vertical */
ok &= ns2501_writeb(dvo, 0x91, 0x07);
ok &= ns2501_writeb(dvo, 0x94, 0x00);
ok &= ns2501_writeb(dvo, 0x95, 0x00);
 
ok &= ns2501_writeb(dvo, 0x96, 0x05);
 
ok &= ns2501_writeb(dvo, 0x99, 0x00);
ok &= ns2501_writeb(dvo, 0x9a, 0x88);
 
ok &= ns2501_writeb(dvo, 0x9c, 0x24);
ok &= ns2501_writeb(dvo, 0x9d, 0x00);
ok &= ns2501_writeb(dvo, 0x9e, 0x25);
ok &= ns2501_writeb(dvo, 0x9f, 0x03);
 
ok &= ns2501_writeb(dvo, 0xa4, 0x84);
 
ok &= ns2501_writeb(dvo, 0xb6, 0x09);
 
ok &= ns2501_writeb(dvo, 0xb9, 0xa0); /* horizontal? */
ok &= ns2501_writeb(dvo, 0xba, 0x00); /* horizontal? */
 
ok &= ns2501_writeb(dvo, 0xc0, 0x05); /* horizontal? */
ok &= ns2501_writeb(dvo, 0xc1, 0x90);
 
ok &= ns2501_writeb(dvo, 0xc2, 0x00);
ok &= ns2501_writeb(dvo, 0xc3, 0x0f);
 
ok &= ns2501_writeb(dvo, 0xc4, 0x03);
ok &= ns2501_writeb(dvo, 0xc5, 0x16);
 
ok &= ns2501_writeb(dvo, 0xc6, 0x00);
ok &= ns2501_writeb(dvo, 0xc7, 0x02);
ok &= ns2501_writeb(dvo, 0xc8, 0x02);
 
} else if (mode->hdisplay == 1024 && mode->vdisplay == 768) {
/* mode 280 */
DRM_DEBUG_KMS("switching to 1024x768\n");
/*
* This might or might not work, actually. I'm silently
* assuming here that the native panel resolution is
* 1024x768. If not, then this leaves the scaler disabled
* generating a picture that is likely not the expected.
*
* Problem is that I do not know where to take the panel
* dimensions from.
*
* Enable the bypass, scaling not required.
*
* The scaler registers are irrelevant here....
*
*/
ns->reg_8_shadow |= NS2501_8_BPAS;
ok &= ns2501_writeb(dvo, 0x37, 0x44);
} else {
/*
* Data not known. Bummer!
* Hopefully, the code should not go here
* as mode_OK delivered no other modes.
*/
ns->reg_8_shadow |= NS2501_8_BPAS;
for (i = 0; i < 84; i++)
ns2501_writeb(dvo, ns->regs[i].offset, ns->regs[i].value);
}
ok &= ns2501_writeb(dvo, NS2501_REG8, ns->reg_8_shadow);
} while (!ok && retries--);
}
 
/* set the NS2501 power state */
static bool ns2501_get_hw_state(struct intel_dvo_device *dvo)
439,61 → 568,47
if (!ns2501_readb(dvo, NS2501_REG8, &ch))
return false;
 
if (ch & NS2501_8_PD)
return true;
else
return false;
return ch & NS2501_8_PD;
}
 
/* set the NS2501 power state */
static void ns2501_dpms(struct intel_dvo_device *dvo, bool enable)
{
bool ok;
int retries = 10;
struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
unsigned char ch;
 
DRM_DEBUG_KMS("Trying set the dpms of the DVO to %i\n", enable);
 
ch = ns->reg_8_shadow;
if (enable) {
if (WARN_ON(ns->regs[83].offset != 0x08 ||
ns->regs[84].offset != 0x41 ||
ns->regs[85].offset != 0xc0))
return;
 
if (enable)
ch |= NS2501_8_PD;
else
ch &= ~NS2501_8_PD;
ns2501_writeb(dvo, 0xc0, ns->regs[85].value | 0x08);
 
if (ns->reg_8_set == 0 || ns->reg_8_shadow != ch) {
ns->reg_8_set = 1;
ns->reg_8_shadow = ch;
ns2501_writeb(dvo, 0x41, ns->regs[84].value);
 
do {
ok = true;
ok &= ns2501_writeb(dvo, NS2501_REG8, ch);
ok &=
ns2501_writeb(dvo, 0x34,
enable ? 0x03 : 0x00);
ok &=
ns2501_writeb(dvo, 0x35,
enable ? 0xff : 0x00);
} while (!ok && retries--);
}
}
ns2501_writeb(dvo, 0x34, 0x01);
msleep(15);
 
static void ns2501_dump_regs(struct intel_dvo_device *dvo)
{
uint8_t val;
ns2501_writeb(dvo, 0x08, 0x35);
if (!(ns->regs[83].value & NS2501_8_BPAS))
ns2501_writeb(dvo, 0x08, 0x31);
msleep(200);
 
ns2501_readb(dvo, NS2501_FREQ_LO, &val);
DRM_DEBUG_KMS("NS2501_FREQ_LO: 0x%02x\n", val);
ns2501_readb(dvo, NS2501_FREQ_HI, &val);
DRM_DEBUG_KMS("NS2501_FREQ_HI: 0x%02x\n", val);
ns2501_readb(dvo, NS2501_REG8, &val);
DRM_DEBUG_KMS("NS2501_REG8: 0x%02x\n", val);
ns2501_readb(dvo, NS2501_REG9, &val);
DRM_DEBUG_KMS("NS2501_REG9: 0x%02x\n", val);
ns2501_readb(dvo, NS2501_REGC, &val);
DRM_DEBUG_KMS("NS2501_REGC: 0x%02x\n", val);
ns2501_writeb(dvo, 0x34, 0x03);
 
ns2501_writeb(dvo, 0xc0, ns->regs[85].value);
} else {
ns2501_writeb(dvo, 0x34, 0x01);
msleep(200);
 
ns2501_writeb(dvo, 0x08, 0x34);
msleep(15);
 
ns2501_writeb(dvo, 0x34, 0x00);
}
}
 
static void ns2501_destroy(struct intel_dvo_device *dvo)
{
512,6 → 627,5
.mode_set = ns2501_mode_set,
.dpms = ns2501_dpms,
.get_hw_state = ns2501_get_hw_state,
.dump_regs = ns2501_dump_regs,
.destroy = ns2501_destroy,
};
/drivers/video/drm/i915/i915_cmd_parser.c
73,7 → 73,7
* those commands required by the parser. This generally works because command
* opcode ranges have standard command length encodings. So for commands that
* the parser does not need to check, it can easily skip them. This is
* implementated via a per-ring length decoding vfunc.
* implemented via a per-ring length decoding vfunc.
*
* Unfortunately, there are a number of commands that do not follow the standard
* length encoding for their opcode range, primarily amongst the MI_* commands.
138,6 → 138,11
.mask = MI_GLOBAL_GTT,
.expected = 0,
}}, ),
/*
* MI_BATCH_BUFFER_START requires some special handling. It's not
* really a 'skip' action but it doesn't seem like it's worth adding
* a new action. See i915_parse_cmds().
*/
CMD( MI_BATCH_BUFFER_START, SMI, !F, 0xFF, S ),
};
 
408,6 → 413,8
REG64(PS_INVOCATION_COUNT),
REG64(PS_DEPTH_COUNT),
OACONTROL, /* Only allowed for LRI and SRM. See below. */
REG64(MI_PREDICATE_SRC0),
REG64(MI_PREDICATE_SRC1),
GEN7_3DPRIM_END_OFFSET,
GEN7_3DPRIM_START_VERTEX,
GEN7_3DPRIM_VERTEX_COUNT,
709,6 → 716,7
BUG_ON(!validate_cmds_sorted(ring, cmd_tables, cmd_table_count));
BUG_ON(!validate_regs_sorted(ring));
 
if (hash_empty(ring->cmd_hash)) {
ret = init_hash_table(ring, cmd_tables, cmd_table_count);
if (ret) {
DRM_ERROR("CMD: cmd_parser_init failed!\n");
715,6 → 723,7
fini_hash_table(ring);
return ret;
}
}
 
ring->needs_cmd_parser = true;
 
836,23 → 845,16
* @ring: the ring in question
*
* Only certain platforms require software batch buffer command parsing, and
* only when enabled via module paramter.
* only when enabled via module parameter.
*
* Return: true if the ring requires software command parsing
*/
bool i915_needs_cmd_parser(struct intel_engine_cs *ring)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
 
if (!ring->needs_cmd_parser)
return false;
 
/*
* XXX: VLV is Gen7 and therefore has cmd_tables, but has PPGTT
* disabled. That will cause all of the parser's PPGTT checks to
* fail. For now, disable parsing when PPGTT is off.
*/
if (!dev_priv->mm.aliasing_ppgtt)
if (!USES_PPGTT(ring->dev))
return false;
 
return (i915.enable_cmd_parser == 1);
888,8 → 890,10
* OACONTROL writes to only MI_LOAD_REGISTER_IMM commands.
*/
if (reg_addr == OACONTROL) {
if (desc->cmd.value == MI_LOAD_REGISTER_MEM)
if (desc->cmd.value == MI_LOAD_REGISTER_MEM) {
DRM_DEBUG_DRIVER("CMD: Rejected LRM to OACONTROL\n");
return false;
}
 
if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1))
*oacontrol_set = (cmd[2] != 0);
959,7 → 963,8
* Parses the specified batch buffer looking for privilege violations as
* described in the overview.
*
* Return: non-zero if the parser finds violations or otherwise fails
* Return: non-zero if the parser finds violations or otherwise fails; -EACCES
* if the batch appears legal but should use hardware parsing
*/
int i915_parse_cmds(struct intel_engine_cs *ring,
struct drm_i915_gem_object *batch_obj,
1006,6 → 1011,16
break;
}
 
/*
* If the batch buffer contains a chained batch, return an
* error that tells the caller to abort and dispatch the
* workload as a non-secure batch.
*/
if (desc->cmd.value == MI_BATCH_BUFFER_START) {
ret = -EACCES;
break;
}
 
if (desc->flags & CMD_DESC_FIXED)
length = desc->length.fixed;
else
1061,6 → 1076,8
*
* 1. Initial version. Checks batches and reports violations, but leaves
* hardware parsing enabled (so does not allow new use cases).
* 2. Allow access to the MI_PREDICATE_SRC0 and
* MI_PREDICATE_SRC1 registers.
*/
return 1;
return 2;
}
/drivers/video/drm/i915/i915_dma.c
28,15 → 28,17
 
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
#include <linux/async.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_legacy.h>
#include "intel_drv.h"
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_trace.h"
#include <linux/pci.h>
//#include <linux/vgaarb.h>
#include <linux/vgaarb.h>
//#include <linux/acpi.h>
//#include <linux/pnp.h>
//#include <linux/vga_switcheroo.h>
45,890 → 47,6
 
void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen);
 
#define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
 
#define BEGIN_LP_RING(n) \
intel_ring_begin(LP_RING(dev_priv), (n))
 
#define OUT_RING(x) \
intel_ring_emit(LP_RING(dev_priv), x)
 
#define ADVANCE_LP_RING() \
__intel_ring_advance(LP_RING(dev_priv))
 
/**
* Lock test for when it's just for synchronization of ring access.
*
* In that case, we don't need to do it when GEM is initialized as nobody else
* has access to the ring.
*/
#define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \
if (LP_RING(dev->dev_private)->buffer->obj == NULL) \
LOCK_TEST_WITH_RETURN(dev, file); \
} while (0)
 
static inline u32
intel_read_legacy_status_page(struct drm_i915_private *dev_priv, int reg)
{
if (I915_NEED_GFX_HWS(dev_priv->dev))
return ioread32(dev_priv->dri1.gfx_hws_cpu_addr + reg);
else
return intel_read_status_page(LP_RING(dev_priv), reg);
}
 
#define READ_HWSP(dev_priv, reg) intel_read_legacy_status_page(dev_priv, reg)
#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
#define I915_BREADCRUMB_INDEX 0x21
 
void i915_update_dri1_breadcrumb(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv;
 
/*
* The dri breadcrumb update races against the drm master disappearing.
* Instead of trying to fix this (this is by far not the only ums issue)
* just don't do the update in kms mode.
*/
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
 
if (dev->primary->master) {
master_priv = dev->primary->master->driver_priv;
if (master_priv->sarea_priv)
master_priv->sarea_priv->last_dispatch =
READ_BREADCRUMB(dev_priv);
}
}
 
static void i915_write_hws_pga(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 addr;
 
addr = dev_priv->status_page_dmah->busaddr;
if (INTEL_INFO(dev)->gen >= 4)
addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
I915_WRITE(HWS_PGA, addr);
}
 
/**
* Frees the hardware status page, whether it's a physical address or a virtual
* address set up by the X Server.
*/
static void i915_free_hws(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring = LP_RING(dev_priv);
 
if (dev_priv->status_page_dmah) {
drm_pci_free(dev, dev_priv->status_page_dmah);
dev_priv->status_page_dmah = NULL;
}
 
if (ring->status_page.gfx_addr) {
ring->status_page.gfx_addr = 0;
iounmap(dev_priv->dri1.gfx_hws_cpu_addr);
}
 
/* Need to rewrite hardware status page */
I915_WRITE(HWS_PGA, 0x1ffff000);
}
 
#if 0
 
void i915_kernel_lost_context(struct drm_device * dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv;
struct intel_engine_cs *ring = LP_RING(dev_priv);
struct intel_ringbuffer *ringbuf = ring->buffer;
 
/*
* We should never lose context on the ring with modesetting
* as we don't expose it to userspace
*/
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
 
ringbuf->head = I915_READ_HEAD(ring) & HEAD_ADDR;
ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
ringbuf->space = ringbuf->head - (ringbuf->tail + I915_RING_FREE_SPACE);
if (ringbuf->space < 0)
ringbuf->space += ringbuf->size;
 
if (!dev->primary->master)
return;
 
master_priv = dev->primary->master->driver_priv;
if (ringbuf->head == ringbuf->tail && master_priv->sarea_priv)
master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
}
 
static int i915_dma_cleanup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
 
/* Make sure interrupts are disabled here because the uninstall ioctl
* may not have been called from userspace and after dev_private
* is freed, it's too late.
*/
if (dev->irq_enabled)
drm_irq_uninstall(dev);
 
mutex_lock(&dev->struct_mutex);
for (i = 0; i < I915_NUM_RINGS; i++)
intel_cleanup_ring_buffer(&dev_priv->ring[i]);
mutex_unlock(&dev->struct_mutex);
 
/* Clear the HWS virtual address at teardown */
if (I915_NEED_GFX_HWS(dev))
i915_free_hws(dev);
 
return 0;
}
 
static int i915_initialize(struct drm_device *dev, drm_i915_init_t *init)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
int ret;
 
master_priv->sarea = drm_getsarea(dev);
if (master_priv->sarea) {
master_priv->sarea_priv = (drm_i915_sarea_t *)
((u8 *)master_priv->sarea->handle + init->sarea_priv_offset);
} else {
DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n");
}
 
if (init->ring_size != 0) {
if (LP_RING(dev_priv)->buffer->obj != NULL) {
i915_dma_cleanup(dev);
DRM_ERROR("Client tried to initialize ringbuffer in "
"GEM mode\n");
return -EINVAL;
}
 
ret = intel_render_ring_init_dri(dev,
init->ring_start,
init->ring_size);
if (ret) {
i915_dma_cleanup(dev);
return ret;
}
}
 
dev_priv->dri1.cpp = init->cpp;
dev_priv->dri1.back_offset = init->back_offset;
dev_priv->dri1.front_offset = init->front_offset;
dev_priv->dri1.current_page = 0;
if (master_priv->sarea_priv)
master_priv->sarea_priv->pf_current_page = 0;
 
/* Allow hardware batchbuffers unless told otherwise.
*/
dev_priv->dri1.allow_batchbuffer = 1;
 
return 0;
}
 
static int i915_dma_resume(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring = LP_RING(dev_priv);
 
DRM_DEBUG_DRIVER("%s\n", __func__);
 
if (ring->buffer->virtual_start == NULL) {
DRM_ERROR("can not ioremap virtual address for"
" ring buffer\n");
return -ENOMEM;
}
 
/* Program Hardware Status Page */
if (!ring->status_page.page_addr) {
DRM_ERROR("Can not find hardware status page\n");
return -EINVAL;
}
DRM_DEBUG_DRIVER("hw status page @ %p\n",
ring->status_page.page_addr);
if (ring->status_page.gfx_addr != 0)
intel_ring_setup_status_page(ring);
else
i915_write_hws_pga(dev);
 
DRM_DEBUG_DRIVER("Enabled hardware status page\n");
 
return 0;
}
 
static int i915_dma_init(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_i915_init_t *init = data;
int retcode = 0;
 
if (drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
 
switch (init->func) {
case I915_INIT_DMA:
retcode = i915_initialize(dev, init);
break;
case I915_CLEANUP_DMA:
retcode = i915_dma_cleanup(dev);
break;
case I915_RESUME_DMA:
retcode = i915_dma_resume(dev);
break;
default:
retcode = -EINVAL;
break;
}
 
return retcode;
}
 
/* Implement basically the same security restrictions as hardware does
* for MI_BATCH_NON_SECURE. These can be made stricter at any time.
*
* Most of the calculations below involve calculating the size of a
* particular instruction. It's important to get the size right as
* that tells us where the next instruction to check is. Any illegal
* instruction detected will be given a size of zero, which is a
* signal to abort the rest of the buffer.
*/
static int validate_cmd(int cmd)
{
switch (((cmd >> 29) & 0x7)) {
case 0x0:
switch ((cmd >> 23) & 0x3f) {
case 0x0:
return 1; /* MI_NOOP */
case 0x4:
return 1; /* MI_FLUSH */
default:
return 0; /* disallow everything else */
}
break;
case 0x1:
return 0; /* reserved */
case 0x2:
return (cmd & 0xff) + 2; /* 2d commands */
case 0x3:
if (((cmd >> 24) & 0x1f) <= 0x18)
return 1;
 
switch ((cmd >> 24) & 0x1f) {
case 0x1c:
return 1;
case 0x1d:
switch ((cmd >> 16) & 0xff) {
case 0x3:
return (cmd & 0x1f) + 2;
case 0x4:
return (cmd & 0xf) + 2;
default:
return (cmd & 0xffff) + 2;
}
case 0x1e:
if (cmd & (1 << 23))
return (cmd & 0xffff) + 1;
else
return 1;
case 0x1f:
if ((cmd & (1 << 23)) == 0) /* inline vertices */
return (cmd & 0x1ffff) + 2;
else if (cmd & (1 << 17)) /* indirect random */
if ((cmd & 0xffff) == 0)
return 0; /* unknown length, too hard */
else
return (((cmd & 0xffff) + 1) / 2) + 1;
else
return 2; /* indirect sequential */
default:
return 0;
}
default:
return 0;
}
 
return 0;
}
 
static int i915_emit_cmds(struct drm_device *dev, int *buffer, int dwords)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i, ret;
 
if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->buffer->size - 8)
return -EINVAL;
 
for (i = 0; i < dwords;) {
int sz = validate_cmd(buffer[i]);
 
if (sz == 0 || i + sz > dwords)
return -EINVAL;
i += sz;
}
 
ret = BEGIN_LP_RING((dwords+1)&~1);
if (ret)
return ret;
 
for (i = 0; i < dwords; i++)
OUT_RING(buffer[i]);
if (dwords & 1)
OUT_RING(0);
 
ADVANCE_LP_RING();
 
return 0;
}
#endif
 
int
i915_emit_box(struct drm_device *dev,
struct drm_clip_rect *box,
int DR1, int DR4)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
 
if (box->y2 <= box->y1 || box->x2 <= box->x1 ||
box->y2 <= 0 || box->x2 <= 0) {
DRM_ERROR("Bad box %d,%d..%d,%d\n",
box->x1, box->y1, box->x2, box->y2);
return -EINVAL;
}
 
if (INTEL_INFO(dev)->gen >= 4) {
ret = BEGIN_LP_RING(4);
if (ret)
return ret;
 
OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
OUT_RING(DR4);
} else {
ret = BEGIN_LP_RING(6);
if (ret)
return ret;
 
OUT_RING(GFX_OP_DRAWRECT_INFO);
OUT_RING(DR1);
OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
OUT_RING(DR4);
OUT_RING(0);
}
ADVANCE_LP_RING();
 
return 0;
}
 
#if 0
/* XXX: Emitting the counter should really be moved to part of the IRQ
* emit. For now, do it in both places:
*/
 
static void i915_emit_breadcrumb(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
 
dev_priv->dri1.counter++;
if (dev_priv->dri1.counter > 0x7FFFFFFFUL)
dev_priv->dri1.counter = 0;
if (master_priv->sarea_priv)
master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter;
 
if (BEGIN_LP_RING(4) == 0) {
OUT_RING(MI_STORE_DWORD_INDEX);
OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
OUT_RING(dev_priv->dri1.counter);
OUT_RING(0);
ADVANCE_LP_RING();
}
}
 
static int i915_dispatch_cmdbuffer(struct drm_device *dev,
drm_i915_cmdbuffer_t *cmd,
struct drm_clip_rect *cliprects,
void *cmdbuf)
{
int nbox = cmd->num_cliprects;
int i = 0, count, ret;
 
if (cmd->sz & 0x3) {
DRM_ERROR("alignment");
return -EINVAL;
}
 
i915_kernel_lost_context(dev);
 
count = nbox ? nbox : 1;
 
for (i = 0; i < count; i++) {
if (i < nbox) {
ret = i915_emit_box(dev, &cliprects[i],
cmd->DR1, cmd->DR4);
if (ret)
return ret;
}
 
ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4);
if (ret)
return ret;
}
 
i915_emit_breadcrumb(dev);
return 0;
}
 
static int i915_dispatch_batchbuffer(struct drm_device *dev,
drm_i915_batchbuffer_t *batch,
struct drm_clip_rect *cliprects)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int nbox = batch->num_cliprects;
int i, count, ret;
 
if ((batch->start | batch->used) & 0x7) {
DRM_ERROR("alignment");
return -EINVAL;
}
 
i915_kernel_lost_context(dev);
 
count = nbox ? nbox : 1;
for (i = 0; i < count; i++) {
if (i < nbox) {
ret = i915_emit_box(dev, &cliprects[i],
batch->DR1, batch->DR4);
if (ret)
return ret;
}
 
if (!IS_I830(dev) && !IS_845G(dev)) {
ret = BEGIN_LP_RING(2);
if (ret)
return ret;
 
if (INTEL_INFO(dev)->gen >= 4) {
OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
OUT_RING(batch->start);
} else {
OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
OUT_RING(batch->start | MI_BATCH_NON_SECURE);
}
} else {
ret = BEGIN_LP_RING(4);
if (ret)
return ret;
 
OUT_RING(MI_BATCH_BUFFER);
OUT_RING(batch->start | MI_BATCH_NON_SECURE);
OUT_RING(batch->start + batch->used - 4);
OUT_RING(0);
}
ADVANCE_LP_RING();
}
 
 
if (IS_G4X(dev) || IS_GEN5(dev)) {
if (BEGIN_LP_RING(2) == 0) {
OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP);
OUT_RING(MI_NOOP);
ADVANCE_LP_RING();
}
}
 
i915_emit_breadcrumb(dev);
return 0;
}
 
static int i915_dispatch_flip(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv =
dev->primary->master->driver_priv;
int ret;
 
if (!master_priv->sarea_priv)
return -EINVAL;
 
DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n",
__func__,
dev_priv->dri1.current_page,
master_priv->sarea_priv->pf_current_page);
 
i915_kernel_lost_context(dev);
 
ret = BEGIN_LP_RING(10);
if (ret)
return ret;
 
OUT_RING(MI_FLUSH | MI_READ_FLUSH);
OUT_RING(0);
 
OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
OUT_RING(0);
if (dev_priv->dri1.current_page == 0) {
OUT_RING(dev_priv->dri1.back_offset);
dev_priv->dri1.current_page = 1;
} else {
OUT_RING(dev_priv->dri1.front_offset);
dev_priv->dri1.current_page = 0;
}
OUT_RING(0);
 
OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
OUT_RING(0);
 
ADVANCE_LP_RING();
 
master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter++;
 
if (BEGIN_LP_RING(4) == 0) {
OUT_RING(MI_STORE_DWORD_INDEX);
OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
OUT_RING(dev_priv->dri1.counter);
OUT_RING(0);
ADVANCE_LP_RING();
}
 
master_priv->sarea_priv->pf_current_page = dev_priv->dri1.current_page;
return 0;
}
 
static int i915_quiescent(struct drm_device *dev)
{
i915_kernel_lost_context(dev);
return intel_ring_idle(LP_RING(dev->dev_private));
}
 
static int i915_flush_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
int ret;
 
if (drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
 
RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
 
mutex_lock(&dev->struct_mutex);
ret = i915_quiescent(dev);
mutex_unlock(&dev->struct_mutex);
 
return ret;
}
 
static int i915_batchbuffer(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv;
drm_i915_sarea_t *sarea_priv;
drm_i915_batchbuffer_t *batch = data;
int ret;
struct drm_clip_rect *cliprects = NULL;
 
if (drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
 
master_priv = dev->primary->master->driver_priv;
sarea_priv = (drm_i915_sarea_t *) master_priv->sarea_priv;
 
if (!dev_priv->dri1.allow_batchbuffer) {
DRM_ERROR("Batchbuffer ioctl disabled\n");
return -EINVAL;
}
 
DRM_DEBUG_DRIVER("i915 batchbuffer, start %x used %d cliprects %d\n",
batch->start, batch->used, batch->num_cliprects);
 
RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
 
if (batch->num_cliprects < 0)
return -EINVAL;
 
if (batch->num_cliprects) {
cliprects = kcalloc(batch->num_cliprects,
sizeof(*cliprects),
GFP_KERNEL);
if (cliprects == NULL)
return -ENOMEM;
 
ret = copy_from_user(cliprects, batch->cliprects,
batch->num_cliprects *
sizeof(struct drm_clip_rect));
if (ret != 0) {
ret = -EFAULT;
goto fail_free;
}
}
 
mutex_lock(&dev->struct_mutex);
ret = i915_dispatch_batchbuffer(dev, batch, cliprects);
mutex_unlock(&dev->struct_mutex);
 
if (sarea_priv)
sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
 
fail_free:
kfree(cliprects);
 
return ret;
}
 
static int i915_cmdbuffer(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv;
drm_i915_sarea_t *sarea_priv;
drm_i915_cmdbuffer_t *cmdbuf = data;
struct drm_clip_rect *cliprects = NULL;
void *batch_data;
int ret;
 
DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
 
if (drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
 
master_priv = dev->primary->master->driver_priv;
sarea_priv = (drm_i915_sarea_t *) master_priv->sarea_priv;
 
RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
 
if (cmdbuf->num_cliprects < 0)
return -EINVAL;
 
batch_data = kmalloc(cmdbuf->sz, GFP_KERNEL);
if (batch_data == NULL)
return -ENOMEM;
 
ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz);
if (ret != 0) {
ret = -EFAULT;
goto fail_batch_free;
}
 
if (cmdbuf->num_cliprects) {
cliprects = kcalloc(cmdbuf->num_cliprects,
sizeof(*cliprects), GFP_KERNEL);
if (cliprects == NULL) {
ret = -ENOMEM;
goto fail_batch_free;
}
 
ret = copy_from_user(cliprects, cmdbuf->cliprects,
cmdbuf->num_cliprects *
sizeof(struct drm_clip_rect));
if (ret != 0) {
ret = -EFAULT;
goto fail_clip_free;
}
}
 
mutex_lock(&dev->struct_mutex);
ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data);
mutex_unlock(&dev->struct_mutex);
if (ret) {
DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
goto fail_clip_free;
}
 
if (sarea_priv)
sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
 
fail_clip_free:
kfree(cliprects);
fail_batch_free:
kfree(batch_data);
 
return ret;
}
 
static int i915_emit_irq(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
 
i915_kernel_lost_context(dev);
 
DRM_DEBUG_DRIVER("\n");
 
dev_priv->dri1.counter++;
if (dev_priv->dri1.counter > 0x7FFFFFFFUL)
dev_priv->dri1.counter = 1;
if (master_priv->sarea_priv)
master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter;
 
if (BEGIN_LP_RING(4) == 0) {
OUT_RING(MI_STORE_DWORD_INDEX);
OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
OUT_RING(dev_priv->dri1.counter);
OUT_RING(MI_USER_INTERRUPT);
ADVANCE_LP_RING();
}
 
return dev_priv->dri1.counter;
}
 
static int i915_wait_irq(struct drm_device *dev, int irq_nr)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
int ret = 0;
struct intel_engine_cs *ring = LP_RING(dev_priv);
 
DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
READ_BREADCRUMB(dev_priv));
 
if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
if (master_priv->sarea_priv)
master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
return 0;
}
 
if (master_priv->sarea_priv)
master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
 
if (ring->irq_get(ring)) {
DRM_WAIT_ON(ret, ring->irq_queue, 3 * HZ,
READ_BREADCRUMB(dev_priv) >= irq_nr);
ring->irq_put(ring);
} else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000))
ret = -EBUSY;
 
if (ret == -EBUSY) {
DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
READ_BREADCRUMB(dev_priv), (int)dev_priv->dri1.counter);
}
 
return ret;
}
 
/* Needs the lock as it touches the ring.
*/
static int i915_irq_emit(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_i915_private *dev_priv = dev->dev_private;
drm_i915_irq_emit_t *emit = data;
int result;
 
if (drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
 
if (!dev_priv || !LP_RING(dev_priv)->buffer->virtual_start) {
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
 
RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
 
mutex_lock(&dev->struct_mutex);
result = i915_emit_irq(dev);
mutex_unlock(&dev->struct_mutex);
 
if (copy_to_user(emit->irq_seq, &result, sizeof(int))) {
DRM_ERROR("copy_to_user\n");
return -EFAULT;
}
 
return 0;
}
 
/* Doesn't need the hardware lock.
*/
static int i915_irq_wait(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_i915_private *dev_priv = dev->dev_private;
drm_i915_irq_wait_t *irqwait = data;
 
if (drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
 
if (!dev_priv) {
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
 
return i915_wait_irq(dev, irqwait->irq_seq);
}
 
static int i915_vblank_pipe_get(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_i915_private *dev_priv = dev->dev_private;
drm_i915_vblank_pipe_t *pipe = data;
 
if (drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
 
if (!dev_priv) {
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
 
pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
 
return 0;
}
 
/**
* Schedule buffer swap at given vertical blank.
*/
static int i915_vblank_swap(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
/* The delayed swap mechanism was fundamentally racy, and has been
* removed. The model was that the client requested a delayed flip/swap
* from the kernel, then waited for vblank before continuing to perform
* rendering. The problem was that the kernel might wake the client
* up before it dispatched the vblank swap (since the lock has to be
* held while touching the ringbuffer), in which case the client would
* clear and start the next frame before the swap occurred, and
* flicker would occur in addition to likely missing the vblank.
*
* In the absence of this ioctl, userland falls back to a correct path
* of waiting for a vblank, then dispatching the swap on its own.
* Context switching to userland and back is plenty fast enough for
* meeting the requirements of vblank swapping.
*/
return -EINVAL;
}
 
static int i915_flip_bufs(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
int ret;
 
if (drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
 
DRM_DEBUG_DRIVER("%s\n", __func__);
 
RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
 
mutex_lock(&dev->struct_mutex);
ret = i915_dispatch_flip(dev);
mutex_unlock(&dev->struct_mutex);
 
return ret;
}
#endif
 
int i915_getparam(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
936,21 → 54,12
drm_i915_getparam_t *param = data;
int value;
 
if (!dev_priv) {
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
 
switch (param->param) {
case I915_PARAM_IRQ_ACTIVE:
value = dev->pdev->irq ? 1 : 0;
break;
case I915_PARAM_ALLOW_BATCHBUFFER:
value = dev_priv->dri1.allow_batchbuffer ? 1 : 0;
break;
case I915_PARAM_LAST_DISPATCH:
value = READ_BREADCRUMB(dev_priv);
break;
/* Reject all old ums/dri params. */
return -ENODEV;
case I915_PARAM_CHIPSET_ID:
value = dev->pdev->device;
break;
1001,7 → 110,7
value = HAS_WT(dev);
break;
case I915_PARAM_HAS_ALIASING_PPGTT:
value = dev_priv->mm.aliasing_ppgtt || USES_FULL_PPGTT(dev);
value = USES_PPGTT(dev);
break;
case I915_PARAM_HAS_WAIT_TIMEOUT:
value = 1;
1027,6 → 136,9
case I915_PARAM_CMD_PARSER_VERSION:
value = i915_cmd_parser_get_version();
break;
case I915_PARAM_HAS_COHERENT_PHYS_GTT:
value = 1;
break;
default:
DRM_DEBUG("Unknown parameter %d\n", param->param);
return -EINVAL;
1044,19 → 156,13
struct drm_i915_private *dev_priv = dev->dev_private;
drm_i915_setparam_t *param = data;
 
if (!dev_priv) {
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
 
switch (param->param) {
case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
break;
case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
break;
case I915_SETPARAM_ALLOW_BATCHBUFFER:
dev_priv->dri1.allow_batchbuffer = param->value ? 1 : 0;
break;
/* Reject all old ums/dri params. */
return -ENODEV;
 
case I915_SETPARAM_NUM_USED_FENCES:
if (param->value > dev_priv->num_fence_regs ||
param->value < 0)
1074,8 → 180,6
}
#endif
 
 
 
static int i915_get_bridge_dev(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
1182,14 → 286,7
 
intel_power_domains_init_hw(dev_priv);
 
/*
* We enable some interrupt sources in our postinstall hooks, so mark
* interrupts as enabled _before_ actually enabling them to avoid
* special cases in our ordering checks.
*/
dev_priv->pm._irqs_disabled = false;
 
ret = drm_irq_install(dev, dev->pdev->irq);
ret = intel_irq_install(dev_priv);
if (ret)
goto cleanup_gem_stolen;
 
1201,7 → 298,6
if (ret)
goto cleanup_irq;
 
 
intel_modeset_gem_init(dev);
 
/* Always safe in the mode setting case. */
1215,7 → 311,7
goto cleanup_gem;
 
/* Only enable hotplug handling once the fbdev is fully set up. */
intel_hpd_init(dev);
intel_hpd_init(dev_priv);
 
/*
* Some ports require correctly set-up hpd registers for detection to
1227,7 → 323,7
* scanning against hotplug events. Hence do this first and ignore the
* tiny window where we will loose hotplug notifactions.
*/
intel_fbdev_initial_config(dev);
intel_fbdev_initial_config(dev_priv, 0);
 
drm_kms_helper_poll_init(dev);
 
1238,7 → 334,6
i915_gem_cleanup_ringbuffer(dev);
i915_gem_context_fini(dev);
mutex_unlock(&dev->struct_mutex);
WARN_ON(dev_priv->mm.aliasing_ppgtt);
cleanup_irq:
// drm_irq_uninstall(dev);
cleanup_gem_stolen:
1251,8 → 346,6
return ret;
}
 
 
 
#if IS_ENABLED(CONFIG_FB)
static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
{
1325,11 → 418,11
 
info = (struct intel_device_info *)&dev_priv->info;
 
if (IS_VALLEYVIEW(dev))
for_each_pipe(pipe)
if (IS_VALLEYVIEW(dev) || INTEL_INFO(dev)->gen == 9)
for_each_pipe(dev_priv, pipe)
info->num_sprites[pipe] = 2;
else
for_each_pipe(pipe)
for_each_pipe(dev_priv, pipe)
info->num_sprites[pipe] = 1;
 
if (i915.disable_display) {
1388,13 → 481,14
dev->dev_private = dev_priv;
dev_priv->dev = dev;
 
/* copy initial configuration to dev_priv->info */
/* Setup the write-once "constant" device info */
device_info = (struct intel_device_info *)&dev_priv->info;
*device_info = *info;
memcpy(device_info, info, sizeof(dev_priv->info));
device_info->device_id = dev->pdev->device;
 
spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->gpu_error.lock);
spin_lock_init(&dev_priv->backlight_lock);
mutex_init(&dev_priv->backlight_lock);
spin_lock_init(&dev_priv->uncore.lock);
spin_lock_init(&dev_priv->mm.object_stat_lock);
spin_lock_init(&dev_priv->mmio_flip_lock);
1449,8 → 543,6
if (ret)
goto out_regs;
 
if (drm_core_check_feature(dev, DRIVER_MODESET))
i915_kick_out_firmware_fb(dev_priv);
 
pci_set_master(dev->pdev);
 
1495,7 → 587,7
system_wq = dev_priv->wq;
 
 
intel_irq_init(dev);
intel_irq_init(dev_priv);
intel_uncore_sanitize(dev);
 
/* Try to make sure MCHBAR is enabled before poking at it */
1535,9 → 627,6
DRM_ERROR("failed to init modeset\n");
goto out_power_well;
}
} else {
/* Start out suspended in ums mode. */
dev_priv->ums.mm_suspended = 1;
}
 
 
1549,7 → 638,7
if (IS_GEN5(dev))
intel_gpu_ips_init(dev_priv);
 
intel_init_runtime_pm(dev_priv);
intel_runtime_pm_enable(dev_priv);
 
main_device = dev;
 
1580,7 → 669,6
return ret;
}
 
intel_fini_runtime_pm(dev_priv);
 
intel_gpu_ips_teardown();
 
1601,9 → 689,7
acpi_video_unregister();
 
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
intel_fbdev_fini(dev);
intel_modeset_cleanup(dev);
cancel_work_sync(&dev_priv->console_resume_work);
 
/*
* free the memory space allocated for the child device
1636,18 → 722,10
mutex_lock(&dev->struct_mutex);
i915_gem_cleanup_ringbuffer(dev);
i915_gem_context_fini(dev);
WARN_ON(dev_priv->mm.aliasing_ppgtt);
mutex_unlock(&dev->struct_mutex);
i915_gem_cleanup_stolen(dev);
 
if (!I915_NEED_GFX_HWS(dev))
i915_free_hws(dev);
}
 
WARN_ON(!list_empty(&dev_priv->vm_list));
 
drm_vblank_cleanup(dev);
 
intel_teardown_gmbus(dev);
intel_teardown_mchbar(dev);
 
1655,7 → 733,7
destroy_workqueue(dev_priv->wq);
pm_qos_remove_request(&dev_priv->pm_qos);
 
dev_priv->gtt.base.cleanup(&dev_priv->gtt.base);
i915_global_gtt_cleanup(dev);
 
intel_uncore_fini(dev);
if (dev_priv->regs != NULL)
1697,25 → 775,10
*/
void i915_driver_lastclose(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
/* On gen6+ we refuse to init without kms enabled, but then the drm core
* goes right around and calls lastclose. Check for this and don't clean
* up anything. */
if (!dev_priv)
return;
 
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
intel_fbdev_restore_mode(dev);
vga_switcheroo_process_delayed_switch();
return;
}
 
i915_gem_lastclose(dev);
 
i915_dma_cleanup(dev);
}
 
void i915_driver_preclose(struct drm_device *dev, struct drm_file *file)
{
mutex_lock(&dev->struct_mutex);
1722,6 → 785,9
i915_gem_context_close(dev, file);
i915_gem_release(dev, file);
mutex_unlock(&dev->struct_mutex);
 
if (drm_core_check_feature(dev, DRIVER_MODESET))
intel_modeset_preclose(dev, file);
}
 
void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
1734,24 → 800,24
}
 
const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, drm_noop, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
1760,8 → 826,8
DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1798,4 → 864,3
return 1;
}
#endif
 
/drivers/video/drm/i915/i915_drv.c
36,7 → 36,6
 
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <errno-base.h>
#include <linux/pci.h>
#include <drm/i915_pciids.h>
 
44,8 → 43,7
 
#include <syscall.h>
 
#define __read_mostly
 
#
static struct drm_driver driver;
 
#define GEN_DEFAULT_PIPEOFFSETS \
336,6 → 334,19
CURSOR_OFFSETS,
};
 
static const struct intel_device_info intel_skylake_info = {
.is_preliminary = 1,
.is_skylake = 1,
.gen = 9, .num_pipes = 3,
.need_gfx_hws = 1, .has_hotplug = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
.has_llc = 1,
.has_ddi = 1,
.has_fbc = 1,
GEN_DEFAULT_PIPEOFFSETS,
IVB_CURSOR_OFFSETS,
};
 
/*
* Make sure any device matches here are from most specific to most
* general. For example, since the Quanta match is based on the subsystem
368,7 → 379,8
INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info), \
INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info), \
INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), \
INTEL_CHV_IDS(&intel_cherryview_info)
INTEL_CHV_IDS(&intel_cherryview_info), \
INTEL_SKL_IDS(&intel_skylake_info)
 
static const struct pci_device_id pciidlist[] = { /* aka */
INTEL_PCI_IDS,
427,7 → 439,7
dev_priv->pch_type = PCH_LPT;
DRM_DEBUG_KMS("Found LynxPoint PCH\n");
WARN_ON(!IS_HASWELL(dev));
WARN_ON(IS_ULT(dev));
WARN_ON(IS_HSW_ULT(dev));
} else if (IS_BROADWELL(dev)) {
dev_priv->pch_type = PCH_LPT;
dev_priv->pch_id =
438,7 → 450,15
dev_priv->pch_type = PCH_LPT;
DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
WARN_ON(!IS_HASWELL(dev));
WARN_ON(!IS_ULT(dev));
WARN_ON(!IS_HSW_ULT(dev));
} else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_SPT;
DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
WARN_ON(!IS_SKYLAKE(dev));
} else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_SPT;
DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
WARN_ON(!IS_SKYLAKE(dev));
} else
continue;
 
459,6 → 479,10
if (i915.semaphores >= 0)
return i915.semaphores;
 
/* TODO: make semaphores and Execlists play nicely together */
if (i915.enable_execlists)
return false;
 
/* Until we get further testing... */
if (IS_GEN8(dev))
return false;
488,7 → 512,11
drm_modeset_unlock_all(dev);
}
 
static int i915_drm_freeze(struct drm_device *dev)
static int intel_suspend_complete(struct drm_i915_private *dev_priv);
static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
bool rpm_resume);
 
static int i915_drm_suspend(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
518,6 → 546,8
return error;
}
 
intel_suspend_gt_powersave(dev);
 
/*
* Disable CRTCs directly since we want to preserve sw state
* for _thaw. Also, power gate the CRTC power wells.
529,14 → 559,12
 
intel_dp_mst_suspend(dev);
 
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
intel_runtime_pm_disable_interrupts(dev_priv);
intel_hpd_cancel_work(dev_priv);
 
intel_runtime_pm_disable_interrupts(dev);
intel_suspend_encoders(dev_priv);
 
intel_suspend_gt_powersave(dev);
 
intel_modeset_suspend_hw(dev);
intel_suspend_hw(dev);
}
 
i915_gem_suspend_gtt_mappings(dev);
553,9 → 581,7
intel_uncore_forcewake_reset(dev, false);
intel_opregion_fini(dev);
 
console_lock();
intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED);
console_unlock();
intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
 
dev_priv->suspend_count++;
 
564,8 → 590,27
return 0;
}
 
int i915_suspend(struct drm_device *dev, pm_message_t state)
static int i915_drm_suspend_late(struct drm_device *drm_dev)
{
struct drm_i915_private *dev_priv = drm_dev->dev_private;
int ret;
 
ret = intel_suspend_complete(dev_priv);
 
if (ret) {
DRM_ERROR("Suspend complete failed: %d\n", ret);
 
return ret;
}
 
pci_disable_device(drm_dev->pdev);
pci_set_power_state(drm_dev->pdev, PCI_D3hot);
 
return 0;
}
 
int i915_suspend_legacy(struct drm_device *dev, pm_message_t state)
{
int error;
 
if (!dev || !dev->dev_private) {
574,58 → 619,25
return -ENODEV;
}
 
if (state.event == PM_EVENT_PRETHAW)
return 0;
if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND &&
state.event != PM_EVENT_FREEZE))
return -EINVAL;
 
 
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
 
error = i915_drm_freeze(dev);
error = i915_drm_suspend(dev);
if (error)
return error;
 
if (state.event == PM_EVENT_SUSPEND) {
/* Shut down the device */
pci_disable_device(dev->pdev);
pci_set_power_state(dev->pdev, PCI_D3hot);
return i915_drm_suspend_late(dev);
}
 
return 0;
}
 
void intel_console_resume(struct work_struct *work)
static int i915_drm_resume(struct drm_device *dev)
{
struct drm_i915_private *dev_priv =
container_of(work, struct drm_i915_private,
console_resume_work);
struct drm_device *dev = dev_priv->dev;
 
console_lock();
intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING);
console_unlock();
}
 
static int i915_drm_thaw_early(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
hsw_disable_pc8(dev_priv);
 
intel_uncore_early_sanitize(dev, true);
intel_uncore_sanitize(dev);
intel_power_domains_init_hw(dev_priv);
 
return 0;
}
 
static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (drm_core_check_feature(dev, DRIVER_MODESET) &&
restore_gtt_mappings) {
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
mutex_lock(&dev->struct_mutex);
i915_gem_restore_gtt_mappings(dev);
mutex_unlock(&dev->struct_mutex);
646,23 → 658,22
}
mutex_unlock(&dev->struct_mutex);
 
intel_runtime_pm_restore_interrupts(dev);
/* We need working interrupts for modeset enabling ... */
intel_runtime_pm_enable_interrupts(dev_priv);
 
intel_modeset_init_hw(dev);
 
{
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display.hpd_irq_setup)
dev_priv->display.hpd_irq_setup(dev);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
spin_unlock_irq(&dev_priv->irq_lock);
 
intel_dp_mst_resume(dev);
drm_modeset_lock_all(dev);
intel_modeset_setup_hw_state(dev, true);
drm_modeset_unlock_all(dev);
 
intel_dp_mst_resume(dev);
 
/*
* ... but also need to make sure that hotplug processing
* doesn't cause havoc. Like in the driver load code we don't
669,7 → 680,7
* bother with the tiny race here where we might loose hotplug
* notifications.
* */
intel_hpd_init(dev);
intel_hpd_init(dev_priv);
/* Config may have changed between suspend and resume */
drm_helper_hpd_irq_event(dev);
}
676,17 → 687,7
 
intel_opregion_init(dev);
 
/*
* The console lock can be pretty contented on resume due
* to all the printk activity. Try to keep it out of the hot
* path of resume if possible.
*/
if (console_trylock()) {
intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING);
console_unlock();
} else {
schedule_work(&dev_priv->console_resume_work);
}
intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
 
mutex_lock(&dev_priv->modeset_restore_lock);
dev_priv->modeset_restore = MODESET_DONE;
694,22 → 695,16
 
intel_opregion_notify_adapter(dev, PCI_D0);
 
drm_kms_helper_poll_enable(dev);
 
return 0;
}
 
static int i915_drm_thaw(struct drm_device *dev)
static int i915_drm_resume_early(struct drm_device *dev)
{
if (drm_core_check_feature(dev, DRIVER_MODESET))
i915_check_and_clear_faults(dev);
struct drm_i915_private *dev_priv = dev->dev_private;
int ret = 0;
 
return __i915_drm_thaw(dev, true);
}
 
static int i915_resume_early(struct drm_device *dev)
{
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
 
/*
* We have a resume ordering issue with the snd-hda driver also
* requiring our device to be power up. Due to the lack of a
724,35 → 719,36
 
pci_set_master(dev->pdev);
 
return i915_drm_thaw_early(dev);
if (IS_VALLEYVIEW(dev_priv))
ret = vlv_resume_prepare(dev_priv, false);
if (ret)
DRM_ERROR("Resume prepare failed: %d,Continuing resume\n", ret);
 
intel_uncore_early_sanitize(dev, true);
 
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
hsw_disable_pc8(dev_priv);
 
intel_uncore_sanitize(dev);
intel_power_domains_init_hw(dev_priv);
 
return ret;
}
 
int i915_resume(struct drm_device *dev)
int i915_resume_legacy(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
 
/*
* Platforms with opregion should have sane BIOS, older ones (gen3 and
* earlier) need to restore the GTT mappings since the BIOS might clear
* all our scratch PTEs.
*/
ret = __i915_drm_thaw(dev, !dev_priv->opregion.header);
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
 
ret = i915_drm_resume_early(dev);
if (ret)
return ret;
 
drm_kms_helper_poll_enable(dev);
return 0;
return i915_drm_resume(dev);
}
 
static int i915_resume_legacy(struct drm_device *dev)
{
i915_resume_early(dev);
i915_resume(dev);
 
return 0;
}
 
/**
* i915_reset - reset chip after a hang
* @dev: drm device to reset
796,6 → 792,9
}
}
 
if (i915_stop_ring_allow_warn(dev_priv))
pr_notice("drm/i915: Resetting chip after gpu hang\n");
 
if (ret) {
DRM_ERROR("Failed to reset chip: %i\n", ret);
mutex_unlock(&dev->struct_mutex);
816,11 → 815,14
* was running at the time of the reset (i.e. we weren't VT
* switched away).
*/
if (drm_core_check_feature(dev, DRIVER_MODESET) ||
!dev_priv->ums.mm_suspended) {
dev_priv->ums.mm_suspended = 0;
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
/* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */
dev_priv->gpu_error.reload_in_reset = true;
 
ret = i915_gem_init_hw(dev);
 
dev_priv->gpu_error.reload_in_reset = false;
 
mutex_unlock(&dev->struct_mutex);
if (ret) {
DRM_ERROR("Failed hw init on reset %d\n", ret);
841,8 → 843,6
*/
if (INTEL_INFO(dev)->gen > 5)
intel_reset_gt_powersave(dev);
 
intel_hpd_init(dev);
} else {
mutex_unlock(&dev->struct_mutex);
}
895,7 → 895,7
if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
 
return i915_drm_freeze(drm_dev);
return i915_drm_suspend(drm_dev);
}
 
static int i915_pm_suspend_late(struct device *dev)
902,7 → 902,6
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
struct drm_i915_private *dev_priv = drm_dev->dev_private;
 
/*
* We have a suspedn ordering issue with the snd-hda driver also
916,13 → 915,7
if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
 
if (IS_HASWELL(drm_dev) || IS_BROADWELL(drm_dev))
hsw_enable_pc8(dev_priv);
 
pci_disable_device(pdev);
pci_set_power_state(pdev, PCI_D3hot);
 
return 0;
return i915_drm_suspend_late(drm_dev);
}
 
static int i915_pm_resume_early(struct device *dev)
930,7 → 923,10
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
 
return i915_resume_early(drm_dev);
if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
 
return i915_drm_resume_early(drm_dev);
}
 
static int i915_pm_resume(struct device *dev)
938,69 → 934,19
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
 
return i915_resume(drm_dev);
}
if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
 
static int i915_pm_freeze(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
 
if (!drm_dev || !drm_dev->dev_private) {
dev_err(dev, "DRM not initialized, aborting suspend.\n");
return -ENODEV;
return i915_drm_resume(drm_dev);
}
 
return i915_drm_freeze(drm_dev);
}
 
static int i915_pm_thaw_early(struct device *dev)
static int hsw_suspend_complete(struct drm_i915_private *dev_priv)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
 
return i915_drm_thaw_early(drm_dev);
}
 
static int i915_pm_thaw(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
 
return i915_drm_thaw(drm_dev);
}
 
static int i915_pm_poweroff(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
 
return i915_drm_freeze(drm_dev);
}
 
static int hsw_runtime_suspend(struct drm_i915_private *dev_priv)
{
hsw_enable_pc8(dev_priv);
 
return 0;
}
 
static int snb_runtime_resume(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
 
intel_init_pch_refclk(dev);
 
return 0;
}
 
static int hsw_runtime_resume(struct drm_i915_private *dev_priv)
{
hsw_disable_pc8(dev_priv);
 
return 0;
}
 
/*
* Save all Gunit registers that may be lost after a D3 and a subsequent
* S0i[R123] transition. The list of registers needing a save/restore is
1291,7 → 1237,7
I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
}
 
static int vlv_runtime_suspend(struct drm_i915_private *dev_priv)
static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
{
u32 mask;
int err;
1331,7 → 1277,8
return err;
}
 
static int vlv_runtime_resume(struct drm_i915_private *dev_priv)
static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
bool rpm_resume)
{
struct drm_device *dev = dev_priv->dev;
int err;
1356,8 → 1303,10
 
vlv_check_no_gt_access(dev_priv);
 
if (rpm_resume) {
intel_init_clock_gating(dev);
i915_gem_restore_fences(dev);
}
 
return ret;
}
1372,7 → 1321,9
if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev))))
return -ENODEV;
 
WARN_ON(!HAS_RUNTIME_PM(dev));
if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
return -ENODEV;
 
assert_force_wake_inactive(dev_priv);
 
DRM_DEBUG_KMS("Suspending device\n");
1401,28 → 1352,13
i915_gem_release_all_mmaps(dev_priv);
mutex_unlock(&dev->struct_mutex);
 
/*
* rps.work can't be rearmed here, since we get here only after making
* sure the GPU is idle and the RPS freq is set to the minimum. See
* intel_mark_idle().
*/
cancel_work_sync(&dev_priv->rps.work);
intel_runtime_pm_disable_interrupts(dev);
intel_suspend_gt_powersave(dev);
intel_runtime_pm_disable_interrupts(dev_priv);
 
if (IS_GEN6(dev)) {
ret = 0;
} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
ret = hsw_runtime_suspend(dev_priv);
} else if (IS_VALLEYVIEW(dev)) {
ret = vlv_runtime_suspend(dev_priv);
} else {
ret = -ENODEV;
WARN_ON(1);
}
 
ret = intel_suspend_complete(dev_priv);
if (ret) {
DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
intel_runtime_pm_restore_interrupts(dev);
intel_runtime_pm_enable_interrupts(dev_priv);
 
return ret;
}
1431,13 → 1367,29
dev_priv->pm.suspended = true;
 
/*
* FIXME: We really should find a document that references the arguments
* used below!
*/
if (IS_HASWELL(dev)) {
/*
* current versions of firmware which depend on this opregion
* notification have repurposed the D1 definition to mean
* "runtime suspended" vs. what you would normally expect (D3)
* to distinguish it from notifications that might be sent
* via the suspend path.
* to distinguish it from notifications that might be sent via
* the suspend path.
*/
intel_opregion_notify_adapter(dev, PCI_D1);
} else {
/*
* On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
* being detected, and the call we do at intel_runtime_resume()
* won't be able to restore them. Since PCI_D3hot matches the
* actual specification and appears to be working, use it. Let's
* assume the other non-Haswell platforms will stay the same as
* Broadwell.
*/
intel_opregion_notify_adapter(dev, PCI_D3hot);
}
 
DRM_DEBUG_KMS("Device suspended\n");
return 0;
1448,9 → 1400,10
struct pci_dev *pdev = to_pci_dev(device);
struct drm_device *dev = pci_get_drvdata(pdev);
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
int ret = 0;
 
WARN_ON(!HAS_RUNTIME_PM(dev));
if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
return -ENODEV;
 
DRM_DEBUG_KMS("Resuming device\n");
 
1457,16 → 1410,12
intel_opregion_notify_adapter(dev, PCI_D0);
dev_priv->pm.suspended = false;
 
if (IS_GEN6(dev)) {
ret = snb_runtime_resume(dev_priv);
} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
ret = hsw_runtime_resume(dev_priv);
} else if (IS_VALLEYVIEW(dev)) {
ret = vlv_runtime_resume(dev_priv);
} else {
WARN_ON(1);
ret = -ENODEV;
}
if (IS_GEN6(dev_priv))
intel_init_pch_refclk(dev);
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
hsw_disable_pc8(dev_priv);
else if (IS_VALLEYVIEW(dev_priv))
ret = vlv_resume_prepare(dev_priv, true);
 
/*
* No point of rolling back things in case of an error, as the best
1475,8 → 1424,8
i915_gem_init_swizzling(dev);
gen6_update_ring_freq(dev);
 
intel_runtime_pm_restore_interrupts(dev);
intel_reset_gt_powersave(dev);
intel_runtime_pm_enable_interrupts(dev_priv);
intel_enable_gt_powersave(dev);
 
if (ret)
DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
1486,17 → 1435,60
return ret;
}
 
/*
* This function implements common functionality of runtime and system
* suspend sequence.
*/
static int intel_suspend_complete(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
int ret;
 
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
ret = hsw_suspend_complete(dev_priv);
else if (IS_VALLEYVIEW(dev))
ret = vlv_suspend_complete(dev_priv);
else
ret = 0;
 
return ret;
}
 
static const struct dev_pm_ops i915_pm_ops = {
/*
* S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
* PMSG_RESUME]
*/
.suspend = i915_pm_suspend,
.suspend_late = i915_pm_suspend_late,
.resume_early = i915_pm_resume_early,
.resume = i915_pm_resume,
.freeze = i915_pm_freeze,
.thaw_early = i915_pm_thaw_early,
.thaw = i915_pm_thaw,
.poweroff = i915_pm_poweroff,
 
/*
* S4 event handlers
* @freeze, @freeze_late : called (1) before creating the
* hibernation image [PMSG_FREEZE] and
* (2) after rebooting, before restoring
* the image [PMSG_QUIESCE]
* @thaw, @thaw_early : called (1) after creating the hibernation
* image, before writing it [PMSG_THAW]
* and (2) after failing to create or
* restore the image [PMSG_RECOVER]
* @poweroff, @poweroff_late: called after writing the hibernation
* image, before rebooting [PMSG_HIBERNATE]
* @restore, @restore_early : called after rebooting and restoring the
* hibernation image [PMSG_RESTORE]
*/
.freeze = i915_pm_suspend,
.freeze_late = i915_pm_suspend_late,
.thaw_early = i915_pm_resume_early,
.thaw = i915_pm_resume,
.poweroff = i915_pm_suspend,
.poweroff_late = i915_pm_suspend_late,
.restore_early = i915_pm_resume_early,
.restore = i915_pm_resume,
 
/* S0ix (via runtime suspend) event handlers */
.runtime_suspend = intel_runtime_suspend,
.runtime_resume = intel_runtime_resume,
};
1542,8 → 1534,6
// .resume = i915_resume,
 
// .device_is_agp = i915_driver_device_is_agp,
// .master_create = i915_master_create,
// .master_destroy = i915_master_destroy,
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = i915_debugfs_init,
.debugfs_cleanup = i915_debugfs_cleanup,
/drivers/video/drm/i915/i915_drv.h
35,11 → 35,15
#include "i915_reg.h"
#include "intel_bios.h"
#include "intel_ringbuffer.h"
#include "intel_lrc.h"
#include "i915_gem_gtt.h"
#include "i915_gem_render_state.h"
//#include <linux/io-mapping.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
#include <drm/intel-gtt.h>
#include <drm/drm_legacy.h> /* for struct drm_dma_handle */
#include <drm/drm_gem.h>
//#include <linux/backlight.h>
#include <linux/hashtable.h>
 
50,22 → 54,13
/* General customization:
*/
 
#define I915_TILING_NONE 0
 
#define VGA_RSRC_NONE 0x00
#define VGA_RSRC_LEGACY_IO 0x01
#define VGA_RSRC_LEGACY_MEM 0x02
#define VGA_RSRC_LEGACY_MASK (VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM)
/* Non-legacy access */
#define VGA_RSRC_NORMAL_IO 0x04
#define VGA_RSRC_NORMAL_MEM 0x08
 
#define DRIVER_AUTHOR "Tungsten Graphics, Inc."
 
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
#define DRIVER_DATE "20140725"
#define DRIVER_DATE "20141121"
 
#undef WARN_ON
#define WARN_ON(x) WARN(x, "WARN_ON(" #x ")")
 
enum pipe {
INVALID_PIPE = -1,
PIPE_A = 0,
85,6 → 80,14
};
#define transcoder_name(t) ((t) + 'A')
 
/*
* This is the maximum (across all platforms) number of planes (primary +
* sprites) that can be active at the same time on one pipe.
*
* This value doesn't count the cursor plane.
*/
#define I915_MAX_PLANES 3
 
enum plane {
PLANE_A = 0,
PLANE_B,
173,7 → 176,10
I915_GEM_DOMAIN_INSTRUCTION | \
I915_GEM_DOMAIN_VERTEX)
 
#define for_each_pipe(p) for ((p) = 0; (p) < INTEL_INFO(dev)->num_pipes; (p)++)
#define for_each_pipe(__dev_priv, __p) \
for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++)
#define for_each_plane(pipe, p) \
for ((p) = 0; (p) < INTEL_INFO(dev)->num_sprites[(pipe)] + 1; (p)++)
#define for_each_sprite(p, s) for ((s) = 0; (s) < INTEL_INFO(dev)->num_sprites[(p)]; (s)++)
 
#define for_each_crtc(dev, crtc) \
182,6 → 188,11
#define for_each_intel_crtc(dev, intel_crtc) \
list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head)
 
#define for_each_intel_encoder(dev, intel_encoder) \
list_for_each_entry(intel_encoder, \
&(dev)->mode_config.encoder_list, \
base.head)
 
#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
if ((intel_encoder)->base.crtc == (__crtc))
203,27 → 214,52
/* real shared dpll ids must be >= 0 */
DPLL_ID_PCH_PLL_A = 0,
DPLL_ID_PCH_PLL_B = 1,
/* hsw/bdw */
DPLL_ID_WRPLL1 = 0,
DPLL_ID_WRPLL2 = 1,
/* skl */
DPLL_ID_SKL_DPLL1 = 0,
DPLL_ID_SKL_DPLL2 = 1,
DPLL_ID_SKL_DPLL3 = 2,
};
#define I915_NUM_PLLS 2
#define I915_NUM_PLLS 3
 
struct intel_dpll_hw_state {
/* i9xx, pch plls */
uint32_t dpll;
uint32_t dpll_md;
uint32_t fp0;
uint32_t fp1;
 
/* hsw, bdw */
uint32_t wrpll;
 
/* skl */
/*
* DPLL_CTRL1 has 6 bits for each each this DPLL. We store those in
* lower part of crtl1 and they get shifted into position when writing
* the register. This allows us to easily compare the state to share
* the DPLL.
*/
uint32_t ctrl1;
/* HDMI only, 0 when used for DP */
uint32_t cfgcr1, cfgcr2;
};
 
struct intel_shared_dpll_config {
unsigned crtc_mask; /* mask of CRTCs sharing this PLL */
struct intel_dpll_hw_state hw_state;
};
 
struct intel_shared_dpll {
int refcount; /* count of number of CRTCs sharing this PLL */
struct intel_shared_dpll_config config;
struct intel_shared_dpll_config *new_config;
 
int active; /* count of number of active CRTCs (i.e. DPMS on) */
bool on; /* is the PLL actually active? Disabled during modeset */
const char *name;
/* should match the index in the dev_priv->shared_dplls array */
enum intel_dpll_id id;
struct intel_dpll_hw_state hw_state;
/* The mode_set hook is optional and should be used together with the
* intel_prepare_shared_dpll function. */
void (*mode_set)(struct drm_i915_private *dev_priv,
237,6 → 273,11
struct intel_dpll_hw_state *hw_state);
};
 
#define SKL_DPLL0 0
#define SKL_DPLL1 1
#define SKL_DPLL2 2
#define SKL_DPLL3 3
 
/* Used by dp and fdi links */
struct intel_link_m_n {
uint32_t tu;
265,7 → 306,6
#define DRIVER_PATCHLEVEL 0
 
#define WATCH_LISTS 0
#define WATCH_GTT 0
 
struct opregion_header;
struct opregion_acpi;
288,10 → 328,6
struct intel_overlay;
struct intel_overlay_error_state;
 
struct drm_i915_master_private {
drm_local_map_t *sarea;
struct _drm_i915_sarea *sarea_priv;
};
#define I915_FENCE_REG_NONE -1
#define I915_MAX_NUM_FENCES 32
/* 32 fences + sign bit for FENCE_REG_NONE */
398,6 → 434,7
pid_t pid;
char comm[TASK_COMM_LEN];
} ring[I915_NUM_RINGS];
 
struct drm_i915_error_buffer {
u32 size;
u32 name;
416,9 → 453,11
} **active_bo, **pinned_bo;
 
u32 *active_bo_count, *pinned_bo_count;
u32 vm_count;
};
 
struct intel_connector;
struct intel_encoder;
struct intel_crtc_config;
struct intel_plane_config;
struct intel_crtc;
445,7 → 484,7
* Returns true on success, false on failure.
*/
bool (*find_dpll)(const struct intel_limit *limit,
struct drm_crtc *crtc,
struct intel_crtc *crtc,
int target, int refclk,
struct dpll *match_clock,
struct dpll *best_clock);
461,15 → 500,14
struct intel_crtc_config *);
void (*get_plane_config)(struct intel_crtc *,
struct intel_plane_config *);
int (*crtc_mode_set)(struct drm_crtc *crtc,
int x, int y,
struct drm_framebuffer *old_fb);
int (*crtc_compute_clock)(struct intel_crtc *crtc);
void (*crtc_enable)(struct drm_crtc *crtc);
void (*crtc_disable)(struct drm_crtc *crtc);
void (*off)(struct drm_crtc *crtc);
void (*write_eld)(struct drm_connector *connector,
struct drm_crtc *crtc,
void (*audio_codec_enable)(struct drm_connector *connector,
struct intel_encoder *encoder,
struct drm_display_mode *mode);
void (*audio_codec_disable)(struct intel_encoder *encoder);
void (*fdi_link_train)(struct drm_crtc *crtc);
void (*init_clock_gating)(struct drm_device *dev);
int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
487,7 → 525,7
/* display clock increase/decrease */
/* pll clock increase/decrease */
 
int (*setup_backlight)(struct intel_connector *connector);
int (*setup_backlight)(struct intel_connector *connector, enum pipe pipe);
uint32_t (*get_backlight)(struct intel_connector *connector);
void (*set_backlight)(struct intel_connector *connector,
uint32_t level);
526,6 → 564,7
 
unsigned fw_rendercount;
unsigned fw_mediacount;
unsigned fw_blittercount;
 
struct timer_list force_wake_timer;
};
544,6 → 583,7
func(is_ivybridge) sep \
func(is_valleyview) sep \
func(is_haswell) sep \
func(is_skylake) sep \
func(is_preliminary) sep \
func(has_fbc) sep \
func(has_pipe_cxsr) sep \
561,6 → 601,7
 
struct intel_device_info {
u32 display_mmio_offset;
u16 device_id;
u8 num_pipes:3;
u8 num_sprites[I915_MAX_PIPES];
u8 gen;
625,13 → 666,22
uint8_t remap_slice;
struct drm_i915_file_private *file_priv;
struct i915_ctx_hang_stats hang_stats;
struct i915_address_space *vm;
struct i915_hw_ppgtt *ppgtt;
 
/* Legacy ring buffer submission */
struct {
struct drm_i915_gem_object *rcs_state;
bool initialized;
} legacy_hw_ctx;
 
/* Execlists */
bool rcs_initialized;
struct {
struct drm_i915_gem_object *state;
struct intel_ringbuffer *ringbuf;
int unpin_count;
} engine[I915_NUM_RINGS];
 
struct list_head link;
};
 
645,6 → 695,20
struct drm_mm_node compressed_fb;
struct drm_mm_node *compressed_llb;
 
bool false_color;
 
/* Tracks whether the HW is actually enabled, not whether the feature is
* possible. */
bool enabled;
 
/* On gen8 some rings cannont perform fbc clean operation so for now
* we are doing this on SW with mmio.
* This variable works in the opposite information direction
* of ring->fbc_dirty telling software on frontbuffer tracking
* to perform the cache clean on sw side.
*/
bool need_sw_cache_clean;
 
struct intel_fbc_work {
struct delayed_work work;
struct drm_crtc *crtc;
686,6 → 750,7
PCH_IBX, /* Ibexpeak PCH */
PCH_CPT, /* Cougarpoint PCH */
PCH_LPT, /* Lynxpoint PCH */
PCH_SPT, /* Sunrisepoint PCH */
PCH_NOP,
};
 
698,6 → 763,8
#define QUIRK_LVDS_SSC_DISABLE (1<<1)
#define QUIRK_INVERT_BRIGHTNESS (1<<2)
#define QUIRK_BACKLIGHT_PRESENT (1<<3)
#define QUIRK_PIPEB_FORCE (1<<4)
#define QUIRK_PIN_SWIZZLED_PAGES (1<<5)
 
struct intel_fbdev;
struct intel_fbc_work;
749,7 → 816,6
u32 saveBLC_HIST_CTL;
u32 saveBLC_PWM_CTL;
u32 saveBLC_PWM_CTL2;
u32 saveBLC_HIST_CTL_B;
u32 saveBLC_CPU_PWM_CTL;
u32 saveBLC_CPU_PWM_CTL2;
u32 saveFPB0;
858,6 → 924,7
u32 savePIPEB_LINK_N1;
u32 saveMCHBAR_RENDER_STANDBY;
u32 savePCH_PORT_HOTPLUG;
u16 saveGCDGMBUS;
};
 
struct vlv_s0ix_state {
928,8 → 995,12
};
 
struct intel_gen6_power_mgmt {
/* work and pm_iir are protected by dev_priv->irq_lock */
/*
* work, interrupts_enabled and pm_iir are protected by
* dev_priv->irq_lock
*/
struct work_struct work;
bool interrupts_enabled;
u32 pm_iir;
 
/* Frequencies are stored in potentially platform dependent multiples.
1052,31 → 1123,6
struct i915_power_well *power_wells;
};
 
struct i915_dri1_state {
unsigned allow_batchbuffer : 1;
u32 __iomem *gfx_hws_cpu_addr;
 
unsigned int cpp;
int back_offset;
int front_offset;
int current_page;
int page_flipping;
 
uint32_t counter;
};
 
struct i915_ums_state {
/**
* Flag if the X Server, and thus DRM, is not currently in
* control of the device.
*
* This is set between LeaveVT and EnterVT. It needs to be
* replaced with a semaphore. It also needs to be
* transitioned away from for kernel modesetting.
*/
int mm_suspended;
};
 
#define MAX_L3_SLICES 2
struct intel_l3_parity {
u32 *remap_info[MAX_L3_SLICES];
1153,6 → 1199,7
};
 
struct drm_i915_error_state_buf {
struct drm_i915_private *i915;
unsigned bytes;
unsigned size;
int err;
1225,6 → 1272,9
 
/* For missed irq/seqno simulation. */
unsigned int test_irq_rings;
 
/* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */
bool reload_in_reset;
};
 
enum modeset_restore {
1234,6 → 1284,12
};
 
struct ddi_vbt_port_info {
/*
* This is an index in the HDMI/DVI DDI buffer translation table.
* The special value HDMI_LEVEL_SHIFT_UNKNOWN means the VBT didn't
* populate this field.
*/
#define HDMI_LEVEL_SHIFT_UNKNOWN 0xff
uint8_t hdmi_level_shift;
 
uint8_t supports_dvi:1;
1324,6 → 1380,49
enum intel_ddb_partitioning partitioning;
};
 
struct skl_ddb_entry {
uint16_t start, end; /* in number of blocks, 'end' is exclusive */
};
 
static inline uint16_t skl_ddb_entry_size(const struct skl_ddb_entry *entry)
{
return entry->end - entry->start;
}
 
static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1,
const struct skl_ddb_entry *e2)
{
if (e1->start == e2->start && e1->end == e2->end)
return true;
 
return false;
}
 
struct skl_ddb_allocation {
struct skl_ddb_entry pipe[I915_MAX_PIPES];
struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES];
struct skl_ddb_entry cursor[I915_MAX_PIPES];
};
 
struct skl_wm_values {
bool dirty[I915_MAX_PIPES];
struct skl_ddb_allocation ddb;
uint32_t wm_linetime[I915_MAX_PIPES];
uint32_t plane[I915_MAX_PIPES][I915_MAX_PLANES][8];
uint32_t cursor[I915_MAX_PIPES][8];
uint32_t plane_trans[I915_MAX_PIPES][I915_MAX_PLANES];
uint32_t cursor_trans[I915_MAX_PIPES];
};
 
struct skl_wm_level {
bool plane_en[I915_MAX_PLANES];
bool cursor_en;
uint16_t plane_res_b[I915_MAX_PLANES];
uint8_t plane_res_l[I915_MAX_PLANES];
uint16_t cursor_res_b;
uint8_t cursor_res_l;
};
 
/*
* This struct helps tracking the state needed for runtime PM, which puts the
* device in PCI D3 state. Notice that when this happens, nothing on the
1336,7 → 1435,7
*
* Our driver uses the autosuspend delay feature, which means we'll only really
* suspend if we stay with zero refcount for a certain amount of time. The
* default value is currently very conservative (see intel_init_runtime_pm), but
* default value is currently very conservative (see intel_runtime_pm_enable), but
* it can be changed with the standard runtime PM files from sysfs.
*
* The irqs_disabled variable becomes true exactly after we disable the IRQs and
1349,7 → 1448,7
*/
struct i915_runtime_pm {
bool suspended;
bool _irqs_disabled;
bool irqs_enabled;
};
 
enum intel_pipe_crc_source {
1393,6 → 1492,20
unsigned flip_bits;
};
 
struct i915_wa_reg {
u32 addr;
u32 value;
/* bitmask representing WA bits */
u32 mask;
};
 
#define I915_MAX_WA_REGS 16
 
struct i915_workarounds {
struct i915_wa_reg reg[I915_MAX_WA_REGS];
u32 count;
};
 
struct drm_i915_private {
struct drm_device *dev;
 
1426,7 → 1539,7
struct drm_i915_gem_object *semaphore_obj;
uint32_t last_seqno, next_seqno;
 
drm_dma_handle_t *status_page_dmah;
struct drm_dma_handle *status_page_dmah;
struct resource mch_res;
 
/* protects the irq masks */
1471,15 → 1584,20
struct intel_opregion opregion;
struct intel_vbt_data vbt;
 
bool preserve_bios_swizzle;
 
/* overlay */
struct intel_overlay *overlay;
 
/* backlight registers and fields in struct intel_panel */
spinlock_t backlight_lock;
struct mutex backlight_lock;
 
/* LVDS info */
bool no_aux_handshake;
 
/* protects panel power sequencer state */
struct mutex pps_mutex;
 
struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
int num_fence_regs; /* 8 on pre-965, 16 otherwise */
1486,6 → 1604,7
 
unsigned int fsb_freq, mem_freq, is_ddr3;
unsigned int vlv_cdclk_freq;
unsigned int hpll_freq;
 
/**
* wq - Driver workqueue for GEM.
1531,6 → 1650,8
struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
 
struct i915_workarounds workarounds;
 
/* Reclocking support */
bool render_reclock_avail;
bool lvds_downclock_avail;
1566,14 → 1687,9
#ifdef CONFIG_DRM_I915_FBDEV
/* list of fbdev register on this device */
struct intel_fbdev *fbdev;
struct work_struct fbdev_suspend_work;
#endif
 
/*
* The console may be contended at resume, but we don't
* want it to block on it.
*/
struct work_struct console_resume_work;
 
struct drm_property *broadcast_rgb_property;
struct drm_property *force_audio_property;
 
1598,9 → 1714,25
uint16_t spr_latency[5];
/* cursor */
uint16_t cur_latency[5];
/*
* Raw watermark memory latency values
* for SKL for all 8 levels
* in 1us units.
*/
uint16_t skl_latency[8];
 
/*
* The skl_wm_values structure is a bit too big for stack
* allocation, so we keep the staging struct where we store
* intermediate results here instead.
*/
struct skl_wm_values skl_results;
 
/* current hardware state */
union {
struct ilk_wm_values hw;
struct skl_wm_values skl_hw;
};
} wm;
 
struct i915_runtime_pm pm;
1619,12 → 1751,22
*/
struct workqueue_struct *dp_wq;
 
/* Old dri1 support infrastructure, beware the dragons ya fools entering
* here! */
struct i915_dri1_state dri1;
/* Old ums support infrastructure, same warning applies. */
struct i915_ums_state ums;
uint32_t bios_vgacntr;
 
/* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
struct {
int (*do_execbuf)(struct drm_device *dev, struct drm_file *file,
struct intel_engine_cs *ring,
struct intel_context *ctx,
struct drm_i915_gem_execbuffer2 *args,
struct list_head *vmas,
struct drm_i915_gem_object *batch_obj,
u64 exec_start, u32 flags);
int (*init_rings)(struct drm_device *dev);
void (*cleanup_ring)(struct intel_engine_cs *ring);
void (*stop_ring)(struct intel_engine_cs *ring);
} gt;
 
/*
* NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
* will be rejected. Instead look for a better place.
1766,17 → 1908,8
* Only honoured if hardware has relevant pte bit
*/
unsigned long gt_ro:1;
 
/*
* Is the GPU currently using a fence to access this buffer,
*/
unsigned int pending_fenced_gpu_access:1;
unsigned int fenced_gpu_access:1;
 
unsigned int cache_level:3;
 
unsigned int has_aliasing_ppgtt_mapping:1;
unsigned int has_global_gtt_mapping:1;
unsigned int has_dma_mapping:1;
 
unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS;
1809,10 → 1942,10
unsigned long user_pin_count;
struct drm_file *pin_filp;
 
union {
/** for phy allocated objects */
drm_dma_handle_t *phys_handle;
struct drm_dma_handle *phys_handle;
 
union {
struct i915_gem_userptr {
uintptr_t ptr;
unsigned read_only :1;
1976,51 → 2109,65
int count;
};
 
#define INTEL_INFO(dev) (&to_i915(dev)->info)
/* Note that the (struct drm_i915_private *) cast is just to shut up gcc. */
#define __I915__(p) ({ \
struct drm_i915_private *__p; \
if (__builtin_types_compatible_p(typeof(*p), struct drm_i915_private)) \
__p = (struct drm_i915_private *)p; \
else if (__builtin_types_compatible_p(typeof(*p), struct drm_device)) \
__p = to_i915((struct drm_device *)p); \
else \
BUILD_BUG(); \
__p; \
})
#define INTEL_INFO(p) (&__I915__(p)->info)
#define INTEL_DEVID(p) (INTEL_INFO(p)->device_id)
 
#define IS_I830(dev) ((dev)->pdev->device == 0x3577)
#define IS_845G(dev) ((dev)->pdev->device == 0x2562)
#define IS_I830(dev) (INTEL_DEVID(dev) == 0x3577)
#define IS_845G(dev) (INTEL_DEVID(dev) == 0x2562)
#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
#define IS_I865G(dev) ((dev)->pdev->device == 0x2572)
#define IS_I865G(dev) (INTEL_DEVID(dev) == 0x2572)
#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
#define IS_I915GM(dev) ((dev)->pdev->device == 0x2592)
#define IS_I945G(dev) ((dev)->pdev->device == 0x2772)
#define IS_I915GM(dev) (INTEL_DEVID(dev) == 0x2592)
#define IS_I945G(dev) (INTEL_DEVID(dev) == 0x2772)
#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
#define IS_GM45(dev) ((dev)->pdev->device == 0x2A42)
#define IS_GM45(dev) (INTEL_DEVID(dev) == 0x2A42)
#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
#define IS_PINEVIEW_G(dev) ((dev)->pdev->device == 0xa001)
#define IS_PINEVIEW_M(dev) ((dev)->pdev->device == 0xa011)
#define IS_PINEVIEW_G(dev) (INTEL_DEVID(dev) == 0xa001)
#define IS_PINEVIEW_M(dev) (INTEL_DEVID(dev) == 0xa011)
#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
#define IS_IRONLAKE_M(dev) ((dev)->pdev->device == 0x0046)
#define IS_IRONLAKE_M(dev) (INTEL_DEVID(dev) == 0x0046)
#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
#define IS_IVB_GT1(dev) ((dev)->pdev->device == 0x0156 || \
(dev)->pdev->device == 0x0152 || \
(dev)->pdev->device == 0x015a)
#define IS_SNB_GT1(dev) ((dev)->pdev->device == 0x0102 || \
(dev)->pdev->device == 0x0106 || \
(dev)->pdev->device == 0x010A)
#define IS_IVB_GT1(dev) (INTEL_DEVID(dev) == 0x0156 || \
INTEL_DEVID(dev) == 0x0152 || \
INTEL_DEVID(dev) == 0x015a)
#define IS_SNB_GT1(dev) (INTEL_DEVID(dev) == 0x0102 || \
INTEL_DEVID(dev) == 0x0106 || \
INTEL_DEVID(dev) == 0x010A)
#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
#define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
#define IS_BROADWELL(dev) (!INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
#define IS_SKYLAKE(dev) (INTEL_INFO(dev)->is_skylake)
#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
((dev)->pdev->device & 0xFF00) == 0x0C00)
(INTEL_DEVID(dev) & 0xFF00) == 0x0C00)
#define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \
(((dev)->pdev->device & 0xf) == 0x2 || \
((dev)->pdev->device & 0xf) == 0x6 || \
((dev)->pdev->device & 0xf) == 0xe))
((INTEL_DEVID(dev) & 0xf) == 0x2 || \
(INTEL_DEVID(dev) & 0xf) == 0x6 || \
(INTEL_DEVID(dev) & 0xf) == 0xe))
#define IS_BDW_GT3(dev) (IS_BROADWELL(dev) && \
(INTEL_DEVID(dev) & 0x00F0) == 0x0020)
#define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \
((dev)->pdev->device & 0xFF00) == 0x0A00)
#define IS_ULT(dev) (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
(INTEL_DEVID(dev) & 0xFF00) == 0x0A00)
#define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \
((dev)->pdev->device & 0x00F0) == 0x0020)
(INTEL_DEVID(dev) & 0x00F0) == 0x0020)
/* ULX machines are also considered ULT. */
#define IS_HSW_ULX(dev) ((dev)->pdev->device == 0x0A0E || \
(dev)->pdev->device == 0x0A1E)
#define IS_HSW_ULX(dev) (INTEL_DEVID(dev) == 0x0A0E || \
INTEL_DEVID(dev) == 0x0A1E)
#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
 
/*
2036,6 → 2183,7
#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
#define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7)
#define IS_GEN8(dev) (INTEL_INFO(dev)->gen == 8)
#define IS_GEN9(dev) (INTEL_INFO(dev)->gen == 9)
 
#define RENDER_RING (1<<RCS)
#define BSD_RING (1<<VCS)
2048,14 → 2196,13
#define HAS_VEBOX(dev) (INTEL_INFO(dev)->ring_mask & VEBOX_RING)
#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
#define HAS_WT(dev) ((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \
to_i915(dev)->ellc_size)
__I915__(dev)->ellc_size)
#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
 
#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >= 6)
#define HAS_PPGTT(dev) (INTEL_INFO(dev)->gen >= 7 && !IS_GEN8(dev))
#define USES_PPGTT(dev) intel_enable_ppgtt(dev, false)
#define USES_FULL_PPGTT(dev) intel_enable_ppgtt(dev, true)
#define HAS_LOGICAL_RING_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 8)
#define USES_PPGTT(dev) (i915.enable_ppgtt)
#define USES_FULL_PPGTT(dev) (i915.enable_ppgtt == 2)
 
#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
2086,7 → 2233,7
#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
#define HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
 
#define HAS_IPS(dev) (IS_ULT(dev) || IS_BROADWELL(dev))
#define HAS_IPS(dev) (IS_HSW_ULT(dev) || IS_BROADWELL(dev))
 
#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi)
#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
2093,6 → 2240,8
#define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev))
#define HAS_RUNTIME_PM(dev) (IS_GEN6(dev) || IS_HASWELL(dev) || \
IS_BROADWELL(dev) || IS_VALLEYVIEW(dev))
#define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6)
#define HAS_RC6p(dev) (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev))
 
#define INTEL_PCH_DEVICE_ID_MASK 0xff00
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
2100,8 → 2249,11
#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
#define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100
#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00
 
#define INTEL_PCH_TYPE(dev) (to_i915(dev)->pch_type)
#define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type)
#define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT)
#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
2135,6 → 2287,7
int enable_rc6;
int enable_fbc;
int enable_ppgtt;
int enable_execlists;
int enable_psr;
unsigned int preliminary_hw_support;
int disable_power_well;
2154,8 → 2307,6
extern struct i915_params i915 __read_mostly;
 
/* i915_dma.c */
void i915_update_dri1_breadcrumb(struct drm_device *dev);
extern void i915_kernel_lost_context(struct drm_device * dev);
extern int i915_driver_load(struct drm_device *, unsigned long flags);
extern int i915_driver_unload(struct drm_device *);
extern int i915_driver_open(struct drm_device *dev, struct drm_file *file);
2169,9 → 2320,6
extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
#endif
extern int i915_emit_box(struct drm_device *dev,
struct drm_clip_rect *box,
int DR1, int DR4);
extern int intel_gpu_reset(struct drm_device *dev);
extern int i915_reset(struct drm_device *dev);
extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
2181,8 → 2329,6
int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
 
extern void intel_console_resume(struct work_struct *work);
 
/* i915_irq.c */
void i915_queue_hangcheck(struct drm_device *dev);
__printf(3, 4)
2189,10 → 2335,10
void i915_handle_error(struct drm_device *dev, bool wedged,
const char *fmt, ...);
 
void gen6_set_pm_mask(struct drm_i915_private *dev_priv, u32 pm_iir,
int new_delay);
extern void intel_irq_init(struct drm_device *dev);
extern void intel_hpd_init(struct drm_device *dev);
extern void intel_irq_init(struct drm_i915_private *dev_priv);
extern void intel_hpd_init(struct drm_i915_private *dev_priv);
int intel_irq_install(struct drm_i915_private *dev_priv);
void intel_irq_uninstall(struct drm_i915_private *dev_priv);
 
extern void intel_uncore_sanitize(struct drm_device *dev);
extern void intel_uncore_early_sanitize(struct drm_device *dev,
2212,10 → 2358,19
 
void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv);
void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv);
void
ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask);
void
ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask);
void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
uint32_t interrupt_mask,
uint32_t enabled_irq_mask);
#define ibx_enable_display_interrupt(dev_priv, bits) \
ibx_display_interrupt_update((dev_priv), (bits), (bits))
#define ibx_disable_display_interrupt(dev_priv, bits) \
ibx_display_interrupt_update((dev_priv), (bits), 0)
 
/* i915_gem.c */
int i915_gem_init_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
2230,6 → 2385,20
struct drm_file *file_priv);
int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void i915_gem_execbuffer_move_to_active(struct list_head *vmas,
struct intel_engine_cs *ring);
void i915_gem_execbuffer_retire_commands(struct drm_device *dev,
struct drm_file *file,
struct intel_engine_cs *ring,
struct drm_i915_gem_object *obj);
int i915_gem_ringbuffer_submission(struct drm_device *dev,
struct drm_file *file,
struct intel_engine_cs *ring,
struct intel_context *ctx,
struct drm_i915_gem_execbuffer2 *args,
struct list_head *vmas,
struct drm_i915_gem_object *batch_obj,
u64 exec_start, u32 flags);
int i915_gem_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_execbuffer2(struct drm_device *dev, void *data,
2248,10 → 2417,6
struct drm_file *file_priv);
int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_set_tiling(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_get_tiling(struct drm_device *dev, void *data,
2264,6 → 2429,12
int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void i915_gem_load(struct drm_device *dev);
unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv,
long target,
unsigned flags);
#define I915_SHRINK_PURGEABLE 0x1
#define I915_SHRINK_UNBOUND 0x2
#define I915_SHRINK_BOUND 0x4
void *i915_gem_object_alloc(struct drm_device *dev);
void i915_gem_object_free(struct drm_i915_gem_object *obj);
void i915_gem_object_init(struct drm_i915_gem_object *obj,
2288,7 → 2459,6
int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
void i915_gem_lastclose(struct drm_device *dev);
 
int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
int *needs_clflush);
2382,6 → 2552,7
bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
int __must_check i915_gem_init(struct drm_device *dev);
int i915_gem_init_rings(struct drm_device *dev);
int __must_check i915_gem_init_hw(struct drm_device *dev);
int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice);
void i915_gem_init_swizzling(struct drm_device *dev);
2394,6 → 2565,11
u32 *seqno);
#define i915_add_request(ring, seqno) \
__i915_add_request(ring, NULL, NULL, seqno)
int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno,
unsigned reset_counter,
bool interruptible,
s64 *timeout,
struct drm_i915_file_private *file_priv);
int __must_check i915_wait_seqno(struct intel_engine_cs *ring,
uint32_t seqno);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
2452,7 → 2628,7
}
 
/* Some GGTT VM helpers */
#define obj_to_ggtt(obj) \
#define i915_obj_to_ggtt(obj) \
(&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base)
static inline bool i915_is_ggtt(struct i915_address_space *vm)
{
2461,21 → 2637,30
return vm == ggtt;
}
 
static inline struct i915_hw_ppgtt *
i915_vm_to_ppgtt(struct i915_address_space *vm)
{
WARN_ON(i915_is_ggtt(vm));
 
return container_of(vm, struct i915_hw_ppgtt, base);
}
 
 
static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj)
{
return i915_gem_obj_bound(obj, obj_to_ggtt(obj));
return i915_gem_obj_bound(obj, i915_obj_to_ggtt(obj));
}
 
static inline unsigned long
i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *obj)
{
return i915_gem_obj_offset(obj, obj_to_ggtt(obj));
return i915_gem_obj_offset(obj, i915_obj_to_ggtt(obj));
}
 
static inline unsigned long
i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj)
{
return i915_gem_obj_size(obj, obj_to_ggtt(obj));
return i915_gem_obj_size(obj, i915_obj_to_ggtt(obj));
}
 
static inline int __must_check
2483,7 → 2668,8
uint32_t alignment,
unsigned flags)
{
return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment, flags | PIN_GLOBAL);
return i915_gem_object_pin(obj, i915_obj_to_ggtt(obj),
alignment, flags | PIN_GLOBAL);
}
 
static inline int
2495,7 → 2681,6
void i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj);
 
/* i915_gem_context.c */
#define ctx_to_ppgtt(ctx) container_of((ctx)->vm, struct i915_hw_ppgtt, base)
int __must_check i915_gem_context_init(struct drm_device *dev);
void i915_gem_context_fini(struct drm_device *dev);
void i915_gem_context_reset(struct drm_device *dev);
2507,6 → 2692,8
struct intel_context *
i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
void i915_gem_context_free(struct kref *ctx_ref);
struct drm_i915_gem_object *
i915_gem_alloc_context_obj(struct drm_device *dev, size_t size);
static inline void i915_gem_context_reference(struct intel_context *ctx)
{
kref_get(&ctx->ref);
2527,8 → 2714,6
int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
 
/* i915_gem_render_state.c */
int i915_gem_render_state_init(struct intel_engine_cs *ring);
/* i915_gem_evict.c */
int __must_check i915_gem_evict_something(struct drm_device *dev,
struct i915_address_space *vm,
2596,6 → 2781,7
int i915_error_state_to_str(struct drm_i915_error_state_buf *estr,
const struct i915_error_state_file_priv *error);
int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb,
struct drm_i915_private *i915,
size_t count, loff_t pos);
static inline void i915_error_state_buf_release(
struct drm_i915_error_state_buf *eb)
2610,7 → 2796,7
void i915_destroy_error_state(struct drm_device *dev);
 
void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone);
const char *i915_cache_level_str(int type);
const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
 
/* i915_cmd_parser.c */
int i915_cmd_parser_get_version(void);
2653,7 → 2839,6
extern void intel_i2c_reset(struct drm_device *dev);
 
/* intel_opregion.c */
struct intel_encoder;
#ifdef CONFIG_ACPI
extern int intel_opregion_setup(struct drm_device *dev);
extern void intel_opregion_init(struct drm_device *dev);
2691,7 → 2876,6
 
/* modesetting */
extern void intel_modeset_init_hw(struct drm_device *dev);
extern void intel_modeset_suspend_hw(struct drm_device *dev);
extern void intel_modeset_init(struct drm_device *dev);
extern void intel_modeset_gem_init(struct drm_device *dev);
extern void intel_modeset_cleanup(struct drm_device *dev);
2702,6 → 2886,7
extern void i915_redisable_vga(struct drm_device *dev);
extern void i915_redisable_vga_power_on(struct drm_device *dev);
extern bool intel_fbc_enabled(struct drm_device *dev);
extern void bdw_fbc_sw_flush(struct drm_device *dev, u32 value);
extern void intel_disable_fbc(struct drm_device *dev);
extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
extern void intel_init_pch_refclk(struct drm_device *dev);
2741,8 → 2926,8
void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine);
void assert_force_wake_inactive(struct drm_i915_private *dev_priv);
 
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val);
int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val);
 
/* intel_sideband.c */
u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr);
2772,7 → 2957,9
 
#define FORCEWAKE_RENDER (1 << 0)
#define FORCEWAKE_MEDIA (1 << 1)
#define FORCEWAKE_ALL (FORCEWAKE_RENDER | FORCEWAKE_MEDIA)
#define FORCEWAKE_BLITTER (1 << 2)
#define FORCEWAKE_ALL (FORCEWAKE_RENDER | FORCEWAKE_MEDIA | \
FORCEWAKE_BLITTER)
 
 
#define I915_READ8(reg) dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true)
2838,6 → 3025,11
return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
}
 
static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
{
return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1);
}
 
static inline unsigned long
timespec_to_jiffies_timeout(const struct timespec *value)
{
2905,4 → 3097,36
#define ioread32(addr) readl(addr)
 
 
static inline int pm_runtime_get_sync(struct device *dev)
{
return 0;
}
 
static inline int pm_runtime_set_active(struct device *dev)
{
return 0;
}
 
static inline void pm_runtime_disable(struct device *dev)
{
 
}
 
static inline int pm_runtime_put_autosuspend(struct device *dev)
{
return 0;
}
 
static inline u8 inb(u16 port)
{
u8 v;
asm volatile("inb %1,%0" : "=a" (v) : "dN" (port));
return v;
}
 
static inline void outb(u8 v, u16 port)
{
asm volatile("outb %0,%1" : : "a" (v), "dN" (port));
}
 
#endif
/drivers/video/drm/i915/i915_gem.c
73,10 → 73,6
unsigned long len, unsigned long prot,
unsigned long flag, unsigned long offset);
 
static inline void clflush(volatile void *__p)
{
asm volatile("clflush %0" : "+m" (*(volatile char*)__p));
}
 
#define MAX_ERRNO 4095
 
98,7 → 94,7
struct drm_i915_fence_reg *fence,
bool enable);
 
static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
 
static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
 
static bool cpu_cache_is_coherent(struct drm_device *dev,
199,38 → 195,7
return i915_gem_obj_bound_any(obj) && !obj->active;
}
 
 
#if 0
 
int
i915_gem_init_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_init *args = data;
 
if (drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
 
if (args->gtt_start >= args->gtt_end ||
(args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
return -EINVAL;
 
/* GEM with user mode setting was never supported on ilk and later. */
if (INTEL_INFO(dev)->gen >= 5)
return -ENODEV;
 
mutex_lock(&dev->struct_mutex);
i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end,
args->gtt_end);
dev_priv->gtt.mappable_end = args->gtt_end;
mutex_unlock(&dev->struct_mutex);
 
return 0;
}
#endif
 
int
i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
690,12 → 655,12
bool needs_clflush_after)
{
char *vaddr;
int ret = 0;
int ret;
 
if (unlikely(page_do_bit17_swizzling))
return -EINVAL;
 
vaddr = (char *)MapIoMem((addr_t)page, 4096, PG_SW);
vaddr = kmap_atomic(page);
if (needs_clflush_before)
drm_clflush_virt_range(vaddr + shmem_page_offset,
page_length);
705,7 → 670,7
if (needs_clflush_after)
drm_clflush_virt_range(vaddr + shmem_page_offset,
page_length);
FreeKernelSpace(vaddr);
kunmap_atomic(vaddr);
 
return ret ? -EFAULT : 0;
}
842,11 → 807,10
 
mutex_lock(&dev->struct_mutex);
 
next_page:
 
if (ret)
goto out;
 
next_page:
remain -= page_length;
user_data += page_length;
offset += page_length;
925,11 → 889,6
* pread/pwrite currently are reading and writing from the CPU
* perspective, requiring manual detiling by the client.
*/
// if (obj->phys_obj) {
// ret = i915_gem_phys_pwrite(dev, obj, args, file);
// goto out;
// }
 
if (obj->tiling_mode == I915_TILING_NONE &&
obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
cpu_write_needs_clflush(obj)) {
1007,7 → 966,7
}
 
/**
* __wait_seqno - wait until execution of seqno has finished
* __i915_wait_seqno - wait until execution of seqno has finished
* @ring: the ring expected to report seqno
* @seqno: duh!
* @reset_counter: reset sequence associated with the given seqno
1024,7 → 983,7
* Returns 0 if the seqno was found within the alloted time. Else returns the
* errno with remaining time filled in timeout argument.
*/
static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno,
int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno,
unsigned reset_counter,
bool interruptible,
s64 *timeout,
1045,7 → 1004,8
if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
return 0;
 
timeout_expire = timeout ? jiffies + nsecs_to_jiffies((u64)*timeout) : 0;
timeout_expire = timeout ?
jiffies + nsecs_to_jiffies_timeout((u64)*timeout) : 0;
 
if (INTEL_INFO(dev)->gen >= 6 && ring->id == RCS && can_wait_boost(file_priv)) {
gen6_rps_boost(dev_priv);
1122,6 → 1082,7
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
bool interruptible = dev_priv->mm.interruptible;
unsigned reset_counter;
int ret;
 
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1135,14 → 1096,13
if (ret)
return ret;
 
return __wait_seqno(ring, seqno,
atomic_read(&dev_priv->gpu_error.reset_counter),
interruptible, NULL, NULL);
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
return __i915_wait_seqno(ring, seqno, reset_counter, interruptible,
NULL, NULL);
}
 
static int
i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj,
struct intel_engine_cs *ring)
i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj)
{
if (!obj->active)
return 0;
1179,7 → 1139,7
if (ret)
return ret;
 
return i915_gem_object_wait_rendering__tail(obj, ring);
return i915_gem_object_wait_rendering__tail(obj);
}
 
/* A nonblocking variant of the above wait. This is a highly dangerous routine
1214,12 → 1174,13
 
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
mutex_unlock(&dev->struct_mutex);
ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, file_priv);
ret = __i915_wait_seqno(ring, seqno, reset_counter, true, NULL,
file_priv);
mutex_lock(&dev->struct_mutex);
if (ret)
return ret;
 
return i915_gem_object_wait_rendering__tail(obj, ring);
return i915_gem_object_wait_rendering__tail(obj);
}
 
/**
1326,6 → 1287,16
*
* While the mapping holds a reference on the contents of the object, it doesn't
* imply a ref on the object itself.
*
* IMPORTANT:
*
* DRM driver writers who look a this function as an example for how to do GEM
* mmap support, please don't implement mmap support like here. The modern way
* to implement DRM mmap support is with an mmap offset ioctl (like
* i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
* That way debug tooling like valgrind will understand what's going on, hiding
* the mmap call in a driver private ioctl will break that. The i915 driver only
* does cpu mmaps this way because we didn't know better.
*/
int
i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1693,7 → 1664,14
goto err_pages;
 
}
 
#ifdef CONFIG_SWIOTLB
if (swiotlb_nr_tbl()) {
st->nents++;
sg_set_page(sg, page, PAGE_SIZE, 0);
sg = sg_next(sg);
continue;
}
#endif
if (!i || page_to_pfn(page) != last_pfn + 1) {
if (i)
sg = sg_next(sg);
1704,7 → 1682,9
}
last_pfn = page_to_pfn(page);
}
 
#ifdef CONFIG_SWIOTLB
if (!swiotlb_nr_tbl())
#endif
sg_mark_end(sg);
obj->pages = st;
 
1756,8 → 1736,6
i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
struct intel_engine_cs *ring)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 seqno = intel_ring_get_seqno(ring);
 
BUG_ON(ring == NULL);
1776,20 → 1754,7
list_move_tail(&obj->ring_list, &ring->active_list);
 
obj->last_read_seqno = seqno;
 
if (obj->fenced_gpu_access) {
obj->last_fenced_seqno = seqno;
 
/* Bump MRU to take account of the delayed flush */
if (obj->fence_reg != I915_FENCE_REG_NONE) {
struct drm_i915_fence_reg *reg;
 
reg = &dev_priv->fence_regs[obj->fence_reg];
list_move_tail(&reg->lru_list,
&dev_priv->mm.fence_list);
}
}
}
 
void i915_vma_move_to_active(struct i915_vma *vma,
struct intel_engine_cs *ring)
1814,6 → 1779,8
list_move_tail(&vma->mm_list, &vm->inactive_list);
}
 
intel_fb_obj_flush(obj, true);
 
list_del_init(&obj->ring_list);
obj->ring = NULL;
 
1822,7 → 1789,6
obj->base.write_domain = 0;
 
obj->last_fenced_seqno = 0;
obj->fenced_gpu_access = false;
 
obj->active = 0;
drm_gem_object_unreference(&obj->base);
1920,10 → 1886,21
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct drm_i915_gem_request *request;
struct intel_ringbuffer *ringbuf;
u32 request_ring_position, request_start;
int ret;
 
request_start = intel_ring_get_tail(ring->buffer);
request = ring->preallocated_lazy_request;
if (WARN_ON(request == NULL))
return -ENOMEM;
 
if (i915.enable_execlists) {
struct intel_context *ctx = request->ctx;
ringbuf = ctx->engine[ring->id].ringbuf;
} else
ringbuf = ring->buffer;
 
request_start = intel_ring_get_tail(ringbuf);
/*
* Emit any outstanding flushes - execbuf can fail to emit the flush
* after having emitted the batchbuffer command. Hence we need to fix
1931,24 → 1908,32
* is that the flush _must_ happen before the next request, no matter
* what.
*/
if (i915.enable_execlists) {
ret = logical_ring_flush_all_caches(ringbuf);
if (ret)
return ret;
} else {
ret = intel_ring_flush_all_caches(ring);
if (ret)
return ret;
}
 
request = ring->preallocated_lazy_request;
if (WARN_ON(request == NULL))
return -ENOMEM;
 
/* Record the position of the start of the request so that
* should we detect the updated seqno part-way through the
* GPU processing the request, we never over-estimate the
* position of the head.
*/
request_ring_position = intel_ring_get_tail(ring->buffer);
request_ring_position = intel_ring_get_tail(ringbuf);
 
if (i915.enable_execlists) {
ret = ring->emit_request(ringbuf);
if (ret)
return ret;
} else {
ret = ring->add_request(ring);
if (ret)
return ret;
}
 
request->seqno = intel_ring_get_seqno(ring);
request->ring = ring;
1963,6 → 1948,7
*/
request->batch_obj = obj;
 
if (!i915.enable_execlists) {
/* Hold a reference to the current context so that we can inspect
* it later in case a hangcheck error event fires.
*/
1969,6 → 1955,7
request->ctx = ring->last_context;
if (request->ctx)
i915_gem_context_reference(request->ctx);
}
 
request->emitted_jiffies = jiffies;
list_add_tail(&request->list, &ring->request_list);
1988,7 → 1975,6
ring->outstanding_lazy_seqno = 0;
ring->preallocated_lazy_request = NULL;
 
if (!dev_priv->ums.mm_suspended) {
// i915_queue_hangcheck(ring->dev);
 
queue_delayed_work(dev_priv->wq,
1995,7 → 1981,6
&dev_priv->mm.retire_work,
round_jiffies_up_relative(HZ));
intel_mark_busy(dev_priv->dev);
}
 
if (out_seqno)
*out_seqno = request->seqno;
2062,12 → 2047,20
 
static void i915_gem_free_request(struct drm_i915_gem_request *request)
{
struct intel_context *ctx = request->ctx;
 
list_del(&request->list);
i915_gem_request_remove_from_client(request);
 
if (request->ctx)
i915_gem_context_unreference(request->ctx);
if (ctx) {
if (i915.enable_execlists) {
struct intel_engine_cs *ring = request->ring;
 
if (ctx != ring->default_context)
intel_lr_context_unpin(ring, ctx);
}
i915_gem_context_unreference(ctx);
}
kfree(request);
}
 
2122,6 → 2115,23
}
 
/*
* Clear the execlists queue up before freeing the requests, as those
* are the ones that keep the context and ringbuffer backing objects
* pinned in place.
*/
while (!list_empty(&ring->execlist_queue)) {
struct intel_ctx_submit_request *submit_req;
 
submit_req = list_first_entry(&ring->execlist_queue,
struct intel_ctx_submit_request,
execlist_link);
list_del(&submit_req->execlist_link);
intel_runtime_pm_put(dev_priv);
i915_gem_context_unreference(submit_req->ctx);
kfree(submit_req);
}
 
/*
* We must free the requests after all the corresponding objects have
* been moved off active lists. Which is the same order as the normal
* retire_requests function does. This is important if object hold
2222,6 → 2232,7
 
while (!list_empty(&ring->request_list)) {
struct drm_i915_gem_request *request;
struct intel_ringbuffer *ringbuf;
 
request = list_first_entry(&ring->request_list,
struct drm_i915_gem_request,
2231,12 → 2242,24
break;
 
trace_i915_gem_request_retire(ring, request->seqno);
 
/* This is one of the few common intersection points
* between legacy ringbuffer submission and execlists:
* we need to tell them apart in order to find the correct
* ringbuffer to which the request belongs to.
*/
if (i915.enable_execlists) {
struct intel_context *ctx = request->ctx;
ringbuf = ctx->engine[ring->id].ringbuf;
} else
ringbuf = ring->buffer;
 
/* We know the GPU must have read the request to have
* sent us the seqno + interrupt, so use the position
* of tail of the request to update the last known position
* of the GPU head.
*/
ring->buffer->last_retired_head = request->tail;
ringbuf->last_retired_head = request->tail;
 
i915_gem_free_request(request);
}
2261,7 → 2284,16
for_each_ring(ring, dev_priv, i) {
i915_gem_retire_requests_ring(ring);
idle &= list_empty(&ring->request_list);
if (i915.enable_execlists) {
unsigned long flags;
 
spin_lock_irqsave(&ring->execlist_lock, flags);
idle &= list_empty(&ring->execlist_queue);
spin_unlock_irqrestore(&ring->execlist_lock, flags);
 
intel_execlists_retire_requests(ring);
}
}
 
if (idle)
mod_delayed_work(dev_priv->wq,
2353,6 → 2385,9
u32 seqno = 0;
int ret = 0;
 
if (args->flags != 0)
return -EINVAL;
 
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
2388,8 → 2423,8
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
mutex_unlock(&dev->struct_mutex);
 
return __wait_seqno(ring, seqno, reset_counter, true, &args->timeout_ns,
file->driver_priv);
return __i915_wait_seqno(ring, seqno, reset_counter, true,
&args->timeout_ns, file->driver_priv);
 
out:
drm_gem_object_unreference(&obj->base);
2501,6 → 2536,9
* cause memory corruption through use-after-free.
*/
 
/* Throw away the active reference before moving to the unbound list */
i915_gem_object_retire(obj);
 
if (i915_is_ggtt(vma->vm)) {
i915_gem_object_finish_gtt(obj);
 
2515,9 → 2553,8
vma->unbind_vma(vma);
 
list_del_init(&vma->mm_list);
/* Avoid an unnecessary call to unbind on rebind. */
if (i915_is_ggtt(vma->vm))
obj->map_and_fenceable = true;
obj->map_and_fenceable = false;
 
drm_mm_remove_node(&vma->node);
i915_gem_vma_destroy(vma);
2546,9 → 2583,11
 
/* Flush everything onto the inactive list. */
for_each_ring(ring, dev_priv, i) {
if (!i915.enable_execlists) {
ret = i915_switch_context(ring, ring->default_context);
if (ret)
return ret;
}
 
ret = intel_ring_idle(ring);
if (ret)
2707,6 → 2746,7
obj->stride, obj->tiling_mode);
 
switch (INTEL_INFO(dev)->gen) {
case 9:
case 8:
case 7:
case 6:
2762,7 → 2802,6
obj->last_fenced_seqno = 0;
}
 
obj->fenced_gpu_access = false;
return 0;
}
 
2869,6 → 2908,9
return 0;
}
} else if (enable) {
if (WARN_ON(!obj->map_and_fenceable))
return -EINVAL;
 
reg = i915_find_fence_reg(dev);
if (IS_ERR(reg))
return PTR_ERR(reg);
2890,17 → 2932,20
return 0;
}
 
static bool i915_gem_valid_gtt_space(struct drm_device *dev,
struct drm_mm_node *gtt_space,
static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
unsigned long cache_level)
{
struct drm_mm_node *gtt_space = &vma->node;
struct drm_mm_node *other;
 
/* On non-LLC machines we have to be careful when putting differing
* types of snoopable memory together to avoid the prefetcher
* crossing memory domains and dying.
/*
* On some machines we have to be careful when putting differing types
* of snoopable memory together to avoid the prefetcher crossing memory
* domains and dying. During vm initialisation, we decide whether or not
* these constraints apply and set the drm_mm.color_adjust
* appropriately.
*/
if (HAS_LLC(dev))
if (vma->vm->mm.color_adjust == NULL)
return true;
 
if (!drm_mm_node_allocated(gtt_space))
2920,46 → 2965,6
return true;
}
 
static void i915_gem_verify_gtt(struct drm_device *dev)
{
#if WATCH_GTT
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
int err = 0;
 
list_for_each_entry(obj, &dev_priv->mm.gtt_list, global_list) {
if (obj->gtt_space == NULL) {
printk(KERN_ERR "object found on GTT list with no space reserved\n");
err++;
continue;
}
 
if (obj->cache_level != obj->gtt_space->color) {
printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
i915_gem_obj_ggtt_offset(obj),
i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
obj->cache_level,
obj->gtt_space->color);
err++;
continue;
}
 
if (!i915_gem_valid_gtt_space(dev,
obj->gtt_space,
obj->cache_level)) {
printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
i915_gem_obj_ggtt_offset(obj),
i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
obj->cache_level);
err++;
continue;
}
}
 
WARN_ON(err);
#endif
}
 
/**
* Finds free space in the GTT aperture and binds the object there.
*/
3032,8 → 3037,7
 
goto err_free_vma;
}
if (WARN_ON(!i915_gem_valid_gtt_space(dev, &vma->node,
obj->cache_level))) {
if (WARN_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level))) {
ret = -EINVAL;
goto err_remove_node;
}
3045,25 → 3049,10
list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
list_add_tail(&vma->mm_list, &vm->inactive_list);
 
if (i915_is_ggtt(vm)) {
bool mappable, fenceable;
 
fenceable = (vma->node.size == fence_size &&
(vma->node.start & (fence_alignment - 1)) == 0);
 
mappable = (vma->node.start + obj->base.size <=
dev_priv->gtt.mappable_end);
 
obj->map_and_fenceable = mappable && fenceable;
}
 
WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
 
trace_i915_vma_bind(vma, flags);
vma->bind_vma(vma, obj->cache_level,
flags & (PIN_MAPPABLE | PIN_GLOBAL) ? GLOBAL_BIND : 0);
flags & PIN_GLOBAL ? GLOBAL_BIND : 0);
 
i915_gem_verify_gtt(dev);
return vma;
 
err_remove_node:
3091,7 → 3080,7
* Stolen memory is always coherent with the GPU as it is explicitly
* marked as wc by the system, or the system is cache-coherent.
*/
if (obj->stolen)
if (obj->stolen || obj->phys_handle)
return false;
 
/* If the GPU is snooping the contents of the CPU cache,
3133,6 → 3122,8
old_write_domain = obj->base.write_domain;
obj->base.write_domain = 0;
 
intel_fb_obj_flush(obj, false);
 
trace_i915_gem_object_change_domain(obj,
obj->base.read_domains,
old_write_domain);
3154,6 → 3145,8
old_write_domain = obj->base.write_domain;
obj->base.write_domain = 0;
 
intel_fb_obj_flush(obj, false);
 
trace_i915_gem_object_change_domain(obj,
obj->base.read_domains,
old_write_domain);
3169,11 → 3162,12
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
{
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
uint32_t old_write_domain, old_read_domains;
int ret;
 
/* Not valid to be called on unbound objects. */
if (!i915_gem_obj_bound_any(obj))
if (vma == NULL)
return -EINVAL;
 
if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3207,19 → 3201,18
obj->dirty = 1;
}
 
if (write)
intel_fb_obj_invalidate(obj, NULL);
 
trace_i915_gem_object_change_domain(obj,
old_read_domains,
old_write_domain);
 
/* And bump the LRU for this access */
if (i915_gem_object_is_inactive(obj)) {
struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
if (vma)
if (i915_gem_object_is_inactive(obj))
list_move_tail(&vma->mm_list,
&dev_priv->gtt.base.inactive_list);
 
}
 
return 0;
}
 
3239,7 → 3232,7
}
 
list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) {
if (!i915_gem_valid_gtt_space(vma, cache_level)) {
ret = i915_vma_unbind(vma);
if (ret)
return ret;
3266,7 → 3259,7
list_for_each_entry(vma, &obj->vma_list, vma_link)
if (drm_mm_node_allocated(&vma->node))
vma->bind_vma(vma, cache_level,
obj->has_global_gtt_mapping ? GLOBAL_BIND : 0);
vma->bound & GLOBAL_BIND);
}
 
list_for_each_entry(vma, &obj->vma_list, vma_link)
3296,7 → 3289,6
old_write_domain);
}
 
i915_gem_verify_gtt(dev);
return 0;
}
 
3382,9 → 3374,6
{
struct i915_vma *vma;
 
if (list_empty(&obj->vma_list))
return false;
 
vma = i915_gem_obj_to_ggtt(obj);
if (!vma)
return false;
3543,6 → 3532,9
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
}
 
if (write)
intel_fb_obj_invalidate(obj, NULL);
 
trace_i915_gem_object_change_domain(obj,
old_read_domains,
old_write_domain);
3594,7 → 3586,7
if (seqno == 0)
return 0;
 
ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, NULL);
ret = __i915_wait_seqno(ring, seqno, reset_counter, true, NULL, NULL);
if (ret == 0)
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
 
3628,6 → 3620,7
{
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
struct i915_vma *vma;
unsigned bound;
int ret;
 
if (WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base))
3636,6 → 3629,9
if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm)))
return -EINVAL;
 
if (WARN_ON((flags & (PIN_MAPPABLE | PIN_GLOBAL)) == PIN_MAPPABLE))
return -EINVAL;
 
vma = i915_gem_obj_to_vma(obj, vm);
if (vma) {
if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
3657,6 → 3653,7
}
}
 
bound = vma ? vma->bound : 0;
if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
vma = i915_gem_object_bind_to_vm(obj, vm, alignment, flags);
if (IS_ERR(vma))
3663,9 → 3660,32
return PTR_ERR(vma);
}
 
if (flags & PIN_GLOBAL && !obj->has_global_gtt_mapping)
if (flags & PIN_GLOBAL && !(vma->bound & GLOBAL_BIND))
vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND);
 
if ((bound ^ vma->bound) & GLOBAL_BIND) {
bool mappable, fenceable;
u32 fence_size, fence_alignment;
 
fence_size = i915_gem_get_gtt_size(obj->base.dev,
obj->base.size,
obj->tiling_mode);
fence_alignment = i915_gem_get_gtt_alignment(obj->base.dev,
obj->base.size,
obj->tiling_mode,
true);
 
fenceable = (vma->node.size == fence_size &&
(vma->node.start & (fence_alignment - 1)) == 0);
 
mappable = (vma->node.start + obj->base.size <=
dev_priv->gtt.mappable_end);
 
obj->map_and_fenceable = mappable && fenceable;
}
 
WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
 
vma->pin_count++;
if (flags & PIN_MAPPABLE)
obj->pin_mappable |= true;
3720,7 → 3740,7
struct drm_i915_gem_object *obj;
int ret;
 
if (INTEL_INFO(dev)->gen >= 6)
if (drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
 
ret = i915_mutex_lock_interruptible(dev);
3776,6 → 3796,9
struct drm_i915_gem_object *obj;
int ret;
 
if (drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
 
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
3855,6 → 3878,7
i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_madvise *args = data;
struct drm_i915_gem_object *obj;
int ret;
3882,6 → 3906,15
goto out;
}
 
if (obj->pages &&
obj->tiling_mode != I915_TILING_NONE &&
dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
if (obj->madv == I915_MADV_WILLNEED)
i915_gem_object_unpin_pages(obj);
if (args->madv == I915_MADV_WILLNEED)
i915_gem_object_pin_pages(obj);
}
 
if (obj->madv != __I915_MADV_PURGED)
obj->madv = args->madv;
 
3911,8 → 3944,6
 
obj->fence_reg = I915_FENCE_REG_NONE;
obj->madv = I915_MADV_WILLNEED;
/* Avoid an unnecessary call to unbind on the first bind. */
obj->map_and_fenceable = true;
 
i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
}
4001,6 → 4032,11
 
WARN_ON(obj->frontbuffer_bits);
 
if (obj->pages && obj->madv == I915_MADV_WILLNEED &&
dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES &&
obj->tiling_mode != I915_TILING_NONE)
i915_gem_object_unpin_pages(obj);
 
if (WARN_ON(obj->pages_pin_count))
obj->pages_pin_count = 0;
i915_gem_object_put_pages(obj);
4037,6 → 4073,7
 
void i915_gem_vma_destroy(struct i915_vma *vma)
{
struct i915_address_space *vm = NULL;
WARN_ON(vma->node.allocated);
 
/* Keep the vma as a placeholder in the execbuffer reservation lists */
4043,6 → 4080,11
if (!list_empty(&vma->exec_list))
return;
 
vm = vma->vm;
 
if (!i915_is_ggtt(vm))
i915_ppgtt_put(i915_vm_to_ppgtt(vm));
 
list_del(&vma->vma_link);
 
kfree(vma);
4056,9 → 4098,6
int ret = 0;
 
mutex_lock(&dev->struct_mutex);
if (dev_priv->ums.mm_suspended)
goto err;
 
ret = i915_gpu_idle(dev);
if (ret)
goto err;
4069,15 → 4108,7
if (!drm_core_check_feature(dev, DRIVER_MODESET))
i915_gem_evict_everything(dev);
 
i915_kernel_lost_context(dev);
i915_gem_stop_ringbuffers(dev);
 
/* Hack! Don't let anybody do execbuf while we don't control the chip.
* We need to replace this with a semaphore, or something.
* And not confound ums.mm_suspended!
*/
dev_priv->ums.mm_suspended = !drm_core_check_feature(dev,
DRIVER_MODESET);
mutex_unlock(&dev->struct_mutex);
 
del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
4164,11 → 4195,46
return true;
}
 
static int i915_gem_init_rings(struct drm_device *dev)
static void init_unused_ring(struct drm_device *dev, u32 base)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
I915_WRITE(RING_CTL(base), 0);
I915_WRITE(RING_HEAD(base), 0);
I915_WRITE(RING_TAIL(base), 0);
I915_WRITE(RING_START(base), 0);
}
 
static void init_unused_rings(struct drm_device *dev)
{
if (IS_I830(dev)) {
init_unused_ring(dev, PRB1_BASE);
init_unused_ring(dev, SRB0_BASE);
init_unused_ring(dev, SRB1_BASE);
init_unused_ring(dev, SRB2_BASE);
init_unused_ring(dev, SRB3_BASE);
} else if (IS_GEN2(dev)) {
init_unused_ring(dev, SRB0_BASE);
init_unused_ring(dev, SRB1_BASE);
} else if (IS_GEN3(dev)) {
init_unused_ring(dev, PRB1_BASE);
init_unused_ring(dev, PRB2_BASE);
}
}
 
int i915_gem_init_rings(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
 
/*
* At least 830 can leave some of the unused rings
* "active" (ie. head != tail) after resume which
* will prevent c3 entry. Makes sure all unused rings
* are totally idle.
*/
init_unused_rings(dev);
 
ret = intel_init_render_ring_buffer(dev);
if (ret)
return ret;
4247,7 → 4313,7
 
i915_gem_init_swizzling(dev);
 
ret = i915_gem_init_rings(dev);
ret = dev_priv->gt.init_rings(dev);
if (ret)
return ret;
 
4265,8 → 4331,16
if (ret && ret != -EIO) {
DRM_ERROR("Context enable failed %d\n", ret);
i915_gem_cleanup_ringbuffer(dev);
 
return ret;
}
 
ret = i915_ppgtt_init_hw(dev);
if (ret && ret != -EIO) {
DRM_ERROR("PPGTT enable failed %d\n", ret);
i915_gem_cleanup_ringbuffer(dev);
}
 
return ret;
}
 
4275,6 → 4349,9
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
 
i915.enable_execlists = intel_sanitize_enable_execlists(dev,
i915.enable_execlists);
 
mutex_lock(&dev->struct_mutex);
 
if (IS_VALLEYVIEW(dev)) {
4285,6 → 4362,24
DRM_DEBUG_DRIVER("allow wake ack timed out\n");
}
 
if (!i915.enable_execlists) {
dev_priv->gt.do_execbuf = i915_gem_ringbuffer_submission;
dev_priv->gt.init_rings = i915_gem_init_rings;
dev_priv->gt.cleanup_ring = intel_cleanup_ring_buffer;
dev_priv->gt.stop_ring = intel_stop_ring_buffer;
} else {
dev_priv->gt.do_execbuf = intel_execlists_submission;
dev_priv->gt.init_rings = intel_logical_rings_init;
dev_priv->gt.cleanup_ring = intel_logical_ring_cleanup;
dev_priv->gt.stop_ring = intel_logical_ring_stop;
}
 
// ret = i915_gem_init_userptr(dev);
// if (ret) {
// mutex_unlock(&dev->struct_mutex);
// return ret;
// }
 
i915_gem_init_global_gtt(dev);
 
ret = i915_gem_context_init(dev);
4316,80 → 4411,9
int i;
 
for_each_ring(ring, dev_priv, i)
intel_cleanup_ring_buffer(ring);
dev_priv->gt.cleanup_ring(ring);
}
 
#if 0
 
int
i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
 
if (drm_core_check_feature(dev, DRIVER_MODESET))
return 0;
 
if (i915_reset_in_progress(&dev_priv->gpu_error)) {
DRM_ERROR("Reenabling wedged hardware, good luck\n");
atomic_set(&dev_priv->gpu_error.reset_counter, 0);
}
 
mutex_lock(&dev->struct_mutex);
dev_priv->ums.mm_suspended = 0;
 
ret = i915_gem_init_hw(dev);
if (ret != 0) {
mutex_unlock(&dev->struct_mutex);
return ret;
}
 
BUG_ON(!list_empty(&dev_priv->gtt.base.active_list));
 
ret = drm_irq_install(dev, dev->pdev->irq);
if (ret)
goto cleanup_ringbuffer;
mutex_unlock(&dev->struct_mutex);
 
return 0;
 
cleanup_ringbuffer:
i915_gem_cleanup_ringbuffer(dev);
dev_priv->ums.mm_suspended = 1;
mutex_unlock(&dev->struct_mutex);
 
return ret;
}
 
int
i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
if (drm_core_check_feature(dev, DRIVER_MODESET))
return 0;
 
mutex_lock(&dev->struct_mutex);
drm_irq_uninstall(dev);
mutex_unlock(&dev->struct_mutex);
 
return i915_gem_suspend(dev);
}
 
void
i915_gem_lastclose(struct drm_device *dev)
{
int ret;
 
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
 
ret = i915_gem_suspend(dev);
if (ret)
DRM_ERROR("failed to idle hardware: %d\n", ret);
}
#endif
 
static void
init_ring_lists(struct intel_engine_cs *ring)
{
4433,7 → 4457,7
init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
 
/* On GEN3 we really need to make sure the ARB C3 LP bit is set */
if (IS_GEN3(dev)) {
if (!drm_core_check_feature(dev, DRIVER_MODESET) && IS_GEN3(dev)) {
I915_WRITE(MI_ARB_STATE,
_MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
}
4440,6 → 4464,10
 
dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
 
/* Old X drivers will take 0-2 for front, back, depth buffers */
if (!drm_core_check_feature(dev, DRIVER_MODESET))
dev_priv->fence_reg_start = 3;
 
if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev))
dev_priv->num_fence_regs = 32;
else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4485,6 → 4513,15
return ret;
}
 
/**
* i915_gem_track_fb - update frontbuffer tracking
* old: current GEM buffer for the frontbuffer slots
* new: new GEM buffer for the frontbuffer slots
* frontbuffer_bits: bitmask of frontbuffer slots
*
* This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
* from @old and setting them in @new. Both @old and @new can be NULL.
*/
void i915_gem_track_fb(struct drm_i915_gem_object *old,
struct drm_i915_gem_object *new,
unsigned frontbuffer_bits)
4522,9 → 4559,7
struct drm_i915_private *dev_priv = o->base.dev->dev_private;
struct i915_vma *vma;
 
if (!dev_priv->mm.aliasing_ppgtt ||
vm == &dev_priv->mm.aliasing_ppgtt->base)
vm = &dev_priv->gtt.base;
WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
 
list_for_each_entry(vma, &o->vma_list, vma_link) {
if (vma->vm == vm)
4565,9 → 4600,7
struct drm_i915_private *dev_priv = o->base.dev->dev_private;
struct i915_vma *vma;
 
if (!dev_priv->mm.aliasing_ppgtt ||
vm == &dev_priv->mm.aliasing_ppgtt->base)
vm = &dev_priv->gtt.base;
WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
 
BUG_ON(list_empty(&o->vma_list));
 
4584,14 → 4617,8
{
struct i915_vma *vma;
 
/* This WARN has probably outlived its usefulness (callers already
* WARN if they don't find the GGTT vma they expect). When removing,
* remember to remove the pre-check in is_pin_display() as well */
if (WARN_ON(list_empty(&obj->vma_list)))
return NULL;
 
vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link);
if (vma->vm != obj_to_ggtt(obj))
if (vma->vm != i915_obj_to_ggtt(obj))
return NULL;
 
return vma;
/drivers/video/drm/i915/i915_gem_context.c
88,6 → 88,7
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_trace.h"
 
/* This is a HW constraint. The value below is the largest known requirement
* I've seen in a spec to date, and that was a workaround for a non-shipping
96,50 → 97,6
#define GEN6_CONTEXT_ALIGN (64<<10)
#define GEN7_CONTEXT_ALIGN 4096
 
static void do_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt)
{
struct drm_device *dev = ppgtt->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_address_space *vm = &ppgtt->base;
 
if (ppgtt == dev_priv->mm.aliasing_ppgtt ||
(list_empty(&vm->active_list) && list_empty(&vm->inactive_list))) {
ppgtt->base.cleanup(&ppgtt->base);
return;
}
 
/*
* Make sure vmas are unbound before we take down the drm_mm
*
* FIXME: Proper refcounting should take care of this, this shouldn't be
* needed at all.
*/
if (!list_empty(&vm->active_list)) {
struct i915_vma *vma;
 
list_for_each_entry(vma, &vm->active_list, mm_list)
if (WARN_ON(list_empty(&vma->vma_link) ||
list_is_singular(&vma->vma_link)))
break;
 
i915_gem_evict_vm(&ppgtt->base, true);
} else {
i915_gem_retire_requests(dev);
i915_gem_evict_vm(&ppgtt->base, false);
}
 
ppgtt->base.cleanup(&ppgtt->base);
}
 
static void ppgtt_release(struct kref *kref)
{
struct i915_hw_ppgtt *ppgtt =
container_of(kref, struct i915_hw_ppgtt, ref);
 
do_ppgtt_cleanup(ppgtt);
kfree(ppgtt);
}
 
static size_t get_context_alignment(struct drm_device *dev)
{
if (IS_GEN6(dev))
180,16 → 137,14
{
struct intel_context *ctx = container_of(ctx_ref,
typeof(*ctx), ref);
struct i915_hw_ppgtt *ppgtt = NULL;
 
if (ctx->legacy_hw_ctx.rcs_state) {
/* We refcount even the aliasing PPGTT to keep the code symmetric */
if (USES_PPGTT(ctx->legacy_hw_ctx.rcs_state->base.dev))
ppgtt = ctx_to_ppgtt(ctx);
}
trace_i915_context_free(ctx);
 
if (ppgtt)
kref_put(&ppgtt->ref, ppgtt_release);
if (i915.enable_execlists)
intel_lr_context_free(ctx);
 
i915_ppgtt_put(ctx->ppgtt);
 
if (ctx->legacy_hw_ctx.rcs_state)
drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base);
list_del(&ctx->link);
196,7 → 151,7
kfree(ctx);
}
 
static struct drm_i915_gem_object *
struct drm_i915_gem_object *
i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
{
struct drm_i915_gem_object *obj;
226,26 → 181,6
return obj;
}
 
static struct i915_hw_ppgtt *
create_vm_for_ctx(struct drm_device *dev, struct intel_context *ctx)
{
struct i915_hw_ppgtt *ppgtt;
int ret;
 
ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
if (!ppgtt)
return ERR_PTR(-ENOMEM);
 
ret = i915_gem_init_ppgtt(dev, ppgtt);
if (ret) {
kfree(ppgtt);
return ERR_PTR(ret);
}
 
ppgtt->ctx = ctx;
return ppgtt;
}
 
static struct intel_context *
__create_hw_context(struct drm_device *dev,
struct drm_i915_file_private *file_priv)
301,11 → 236,9
*/
static struct intel_context *
i915_gem_create_context(struct drm_device *dev,
struct drm_i915_file_private *file_priv,
bool create_vm)
struct drm_i915_file_private *file_priv)
{
const bool is_global_default_ctx = file_priv == NULL;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_context *ctx;
int ret = 0;
 
331,8 → 264,8
}
}
 
if (create_vm) {
struct i915_hw_ppgtt *ppgtt = create_vm_for_ctx(dev, ctx);
if (USES_FULL_PPGTT(dev)) {
struct i915_hw_ppgtt *ppgtt = i915_ppgtt_create(dev, file_priv);
 
if (IS_ERR_OR_NULL(ppgtt)) {
DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
339,27 → 272,13
PTR_ERR(ppgtt));
ret = PTR_ERR(ppgtt);
goto err_unpin;
} else
ctx->vm = &ppgtt->base;
 
/* This case is reserved for the global default context and
* should only happen once. */
if (is_global_default_ctx) {
if (WARN_ON(dev_priv->mm.aliasing_ppgtt)) {
ret = -EEXIST;
goto err_unpin;
}
 
dev_priv->mm.aliasing_ppgtt = ppgtt;
ctx->ppgtt = ppgtt;
}
} else if (USES_PPGTT(dev)) {
/* For platforms which only have aliasing PPGTT, we fake the
* address space and refcounting. */
ctx->vm = &dev_priv->mm.aliasing_ppgtt->base;
kref_get(&dev_priv->mm.aliasing_ppgtt->ref);
} else
ctx->vm = &dev_priv->gtt.base;
 
trace_i915_context_create(ctx);
 
return ctx;
 
err_unpin:
375,36 → 294,25
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
 
/* Prevent the hardware from restoring the last context (which hung) on
* the next switch */
/* In execlists mode we will unreference the context when the execlist
* queue is cleared and the requests destroyed.
*/
if (i915.enable_execlists)
return;
 
for (i = 0; i < I915_NUM_RINGS; i++) {
struct intel_engine_cs *ring = &dev_priv->ring[i];
struct intel_context *dctx = ring->default_context;
struct intel_context *lctx = ring->last_context;
 
/* Do a fake switch to the default context */
if (lctx == dctx)
continue;
 
if (!lctx)
continue;
 
if (dctx->legacy_hw_ctx.rcs_state && i == RCS) {
WARN_ON(i915_gem_obj_ggtt_pin(dctx->legacy_hw_ctx.rcs_state,
get_context_alignment(dev), 0));
/* Fake a finish/inactive */
dctx->legacy_hw_ctx.rcs_state->base.write_domain = 0;
dctx->legacy_hw_ctx.rcs_state->active = 0;
}
 
if (lctx) {
if (lctx->legacy_hw_ctx.rcs_state && i == RCS)
i915_gem_object_ggtt_unpin(lctx->legacy_hw_ctx.rcs_state);
 
i915_gem_context_unreference(lctx);
i915_gem_context_reference(dctx);
ring->last_context = dctx;
ring->last_context = NULL;
}
}
}
 
int i915_gem_context_init(struct drm_device *dev)
{
417,7 → 325,11
if (WARN_ON(dev_priv->ring[RCS].default_context))
return 0;
 
if (HAS_HW_CONTEXTS(dev)) {
if (i915.enable_execlists) {
/* NB: intentionally left blank. We will allocate our own
* backing objects as we need them, thank you very much */
dev_priv->hw_context_size = 0;
} else if (HAS_HW_CONTEXTS(dev)) {
dev_priv->hw_context_size = round_up(get_context_size(dev), 4096);
if (dev_priv->hw_context_size > (1<<20)) {
DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n",
426,7 → 338,7
}
}
 
ctx = i915_gem_create_context(dev, NULL, USES_PPGTT(dev));
ctx = i915_gem_create_context(dev, NULL);
if (IS_ERR(ctx)) {
DRM_ERROR("Failed to create default global context (error %ld)\n",
PTR_ERR(ctx));
433,11 → 345,16
return PTR_ERR(ctx);
}
 
for (i = 0; i < I915_NUM_RINGS; i++) {
struct intel_engine_cs *ring = &dev_priv->ring[i];
 
/* NB: RCS will hold a ref for all rings */
for (i = 0; i < I915_NUM_RINGS; i++)
dev_priv->ring[i].default_context = ctx;
ring->default_context = ctx;
}
 
DRM_DEBUG_DRIVER("%s context support initialized\n", dev_priv->hw_context_size ? "HW" : "fake");
DRM_DEBUG_DRIVER("%s context support initialized\n",
i915.enable_execlists ? "LR" :
dev_priv->hw_context_size ? "HW" : "fake");
return 0;
}
 
489,19 → 406,11
struct intel_engine_cs *ring;
int ret, i;
 
/* This is the only place the aliasing PPGTT gets enabled, which means
* it has to happen before we bail on reset */
if (dev_priv->mm.aliasing_ppgtt) {
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
ppgtt->enable(ppgtt);
}
BUG_ON(!dev_priv->ring[RCS].default_context);
 
/* FIXME: We should make this work, even in reset */
if (i915_reset_in_progress(&dev_priv->gpu_error))
if (i915.enable_execlists)
return 0;
 
BUG_ON(!dev_priv->ring[RCS].default_context);
 
for_each_ring(ring, dev_priv, i) {
ret = i915_switch_context(ring, ring->default_context);
if (ret)
527,7 → 436,7
idr_init(&file_priv->context_idr);
 
mutex_lock(&dev->struct_mutex);
ctx = i915_gem_create_context(dev, file_priv, USES_FULL_PPGTT(dev));
ctx = i915_gem_create_context(dev, file_priv);
mutex_unlock(&dev->struct_mutex);
 
if (IS_ERR(ctx)) {
563,7 → 472,13
struct intel_context *new_context,
u32 hw_flags)
{
int ret;
u32 flags = hw_flags | MI_MM_SPACE_GTT;
const int num_rings =
/* Use an extended w/a on ivb+ if signalling from other rings */
i915_semaphore_is_enabled(ring->dev) ?
hweight32(INTEL_INFO(ring->dev)->ring_mask) - 1 :
0;
int len, i, ret;
 
/* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB
* invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value
576,23 → 491,40
return ret;
}
 
ret = intel_ring_begin(ring, 6);
/* These flags are for resource streamer on HSW+ */
if (!IS_HASWELL(ring->dev) && INTEL_INFO(ring->dev)->gen < 8)
flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);
 
 
len = 4;
if (INTEL_INFO(ring->dev)->gen >= 7)
len += 2 + (num_rings ? 4*num_rings + 2 : 0);
 
ret = intel_ring_begin(ring, len);
if (ret)
return ret;
 
/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
if (INTEL_INFO(ring->dev)->gen >= 7)
if (INTEL_INFO(ring->dev)->gen >= 7) {
intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
else
intel_ring_emit(ring, MI_NOOP);
if (num_rings) {
struct intel_engine_cs *signaller;
 
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
for_each_ring(signaller, to_i915(ring->dev), i) {
if (signaller == ring)
continue;
 
intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base));
intel_ring_emit(ring, _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
}
}
}
 
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_SET_CONTEXT);
intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->legacy_hw_ctx.rcs_state) |
MI_MM_SPACE_GTT |
MI_SAVE_EXT_STATE_EN |
MI_RESTORE_EXT_STATE_EN |
hw_flags);
flags);
/*
* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
* WaMiSetContext_Hang:snb,ivb,vlv
599,10 → 531,21
*/
intel_ring_emit(ring, MI_NOOP);
 
if (INTEL_INFO(ring->dev)->gen >= 7)
if (INTEL_INFO(ring->dev)->gen >= 7) {
if (num_rings) {
struct intel_engine_cs *signaller;
 
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
for_each_ring(signaller, to_i915(ring->dev), i) {
if (signaller == ring)
continue;
 
intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base));
intel_ring_emit(ring, _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
}
}
intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
else
intel_ring_emit(ring, MI_NOOP);
}
 
intel_ring_advance(ring);
 
614,9 → 557,9
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct intel_context *from = ring->last_context;
struct i915_hw_ppgtt *ppgtt = ctx_to_ppgtt(to);
u32 hw_flags = 0;
bool uninitialized = false;
struct i915_vma *vma;
int ret, i;
 
if (from != NULL && ring == &dev_priv->ring[RCS]) {
642,8 → 585,9
*/
from = ring->last_context;
 
if (USES_FULL_PPGTT(ring->dev)) {
ret = ppgtt->switch_mm(ppgtt, ring, false);
if (to->ppgtt) {
trace_switch_mm(ring, to);
ret = to->ppgtt->switch_mm(to->ppgtt, ring);
if (ret)
goto unpin_out;
}
666,11 → 610,10
if (ret)
goto unpin_out;
 
if (!to->legacy_hw_ctx.rcs_state->has_global_gtt_mapping) {
struct i915_vma *vma = i915_gem_obj_to_vma(to->legacy_hw_ctx.rcs_state,
&dev_priv->gtt.base);
vma->bind_vma(vma, to->legacy_hw_ctx.rcs_state->cache_level, GLOBAL_BIND);
}
vma = i915_gem_obj_to_ggtt(to->legacy_hw_ctx.rcs_state);
if (!(vma->bound & GLOBAL_BIND))
vma->bind_vma(vma, to->legacy_hw_ctx.rcs_state->cache_level,
GLOBAL_BIND);
 
if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to))
hw_flags |= MI_RESTORE_INHIBIT;
723,6 → 666,12
ring->last_context = to;
 
if (uninitialized) {
if (ring->init_context) {
ret = ring->init_context(ring, to);
if (ret)
DRM_ERROR("ring init context: %d\n", ret);
}
 
ret = i915_gem_render_state_init(ring);
if (ret)
DRM_ERROR("init render state: %d\n", ret);
743,8 → 692,12
*
* The context life cycle is simple. The context refcount is incremented and
* decremented by 1 and create and destroy. If the context is in use by the GPU,
* it will have a refoucnt > 1. This allows us to destroy the context abstract
* it will have a refcount > 1. This allows us to destroy the context abstract
* object while letting the normal object tracking destroy the backing BO.
*
* This function should not be used in execlists mode. Instead the context is
* switched by writing to the ELSP and requests keep a reference to their
* context.
*/
int i915_switch_context(struct intel_engine_cs *ring,
struct intel_context *to)
751,6 → 704,7
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
 
WARN_ON(i915.enable_execlists);
WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
 
if (to->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */
766,9 → 720,9
return do_switch(ring, to);
}
 
static bool hw_context_enabled(struct drm_device *dev)
static bool contexts_enabled(struct drm_device *dev)
{
return to_i915(dev)->hw_context_size;
return i915.enable_execlists || to_i915(dev)->hw_context_size;
}
 
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
779,7 → 733,7
struct intel_context *ctx;
int ret;
 
if (!hw_context_enabled(dev))
if (!contexts_enabled(dev))
return -ENODEV;
 
ret = i915_mutex_lock_interruptible(dev);
786,7 → 740,7
if (ret)
return ret;
 
ctx = i915_gem_create_context(dev, file_priv, USES_FULL_PPGTT(dev));
ctx = i915_gem_create_context(dev, file_priv);
mutex_unlock(&dev->struct_mutex);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
/drivers/video/drm/i915/i915_gem_evict.c
243,7 → 243,7
i915_gem_evict_everything(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_address_space *vm;
struct i915_address_space *vm, *v;
bool lists_empty = true;
int ret;
 
270,7 → 270,7
i915_gem_retire_requests(dev);
 
/* Having flushed everything, unbind() should never raise an error */
list_for_each_entry(vm, &dev_priv->vm_list, global_link)
list_for_each_entry_safe(vm, v, &dev_priv->vm_list, global_link)
WARN_ON(i915_gem_evict_vm(vm, false));
 
return 0;
/drivers/video/drm/i915/i915_gem_execbuffer.c
35,6 → 35,7
 
#define __EXEC_OBJECT_HAS_PIN (1<<31)
#define __EXEC_OBJECT_HAS_FENCE (1<<30)
#define __EXEC_OBJECT_NEEDS_MAP (1<<29)
#define __EXEC_OBJECT_NEEDS_BIAS (1<<28)
 
#define BATCH_OFFSET_BIAS (256*1024)
101,7 → 102,6
struct i915_address_space *vm,
struct drm_file *file)
{
struct drm_i915_private *dev_priv = vm->dev->dev_private;
struct drm_i915_gem_object *obj;
struct list_head objects;
int i, ret;
136,21 → 136,7
i = 0;
while (!list_empty(&objects)) {
struct i915_vma *vma;
struct i915_address_space *bind_vm = vm;
 
if (exec[i].flags & EXEC_OBJECT_NEEDS_GTT &&
USES_FULL_PPGTT(vm->dev)) {
ret = -EINVAL;
goto err;
}
 
/* If we have secure dispatch, or the userspace assures us that
* they know what they're doing, use the GGTT VM.
*/
if (((args->flags & I915_EXEC_SECURE) &&
(i == (args->buffer_count - 1))))
bind_vm = &dev_priv->gtt.base;
 
obj = list_first_entry(&objects,
struct drm_i915_gem_object,
obj_exec_link);
163,7 → 149,7
* from the (obj, vm) we don't run the risk of creating
* duplicated vmas for the same vm.
*/
vma = i915_gem_obj_lookup_or_create_vma(obj, bind_vm);
vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
if (IS_ERR(vma)) {
DRM_DEBUG("Failed to lookup VMA\n");
ret = PTR_ERR(vma);
276,7 → 262,6
uint64_t target_offset)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t page_offset = offset_in_page(reloc->offset);
uint64_t delta = reloc->delta + target_offset;
char *vaddr;
286,8 → 271,8
if (ret)
return ret;
 
vaddr = (char*)dev_priv->gtt.mappable+4096;
MapPage(vaddr,(addr_t)i915_gem_object_get_page(obj,reloc->offset >> PAGE_SHIFT), PG_SW);
vaddr = kmap_atomic(i915_gem_object_get_page(obj,
reloc->offset >> PAGE_SHIFT));
*(uint32_t *)(vaddr + page_offset) = lower_32_bits(delta);
 
if (INTEL_INFO(dev)->gen >= 8) {
294,13 → 279,16
page_offset = offset_in_page(page_offset + sizeof(uint32_t));
 
if (page_offset == 0) {
MapPage(vaddr,(addr_t)i915_gem_object_get_page(obj,
(reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT), PG_SW);
kunmap_atomic(vaddr);
vaddr = kmap_atomic(i915_gem_object_get_page(obj,
(reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
}
 
*(uint32_t *)(vaddr + page_offset) = upper_32_bits(delta);
}
 
kunmap_atomic(vaddr);
 
return 0;
}
 
312,7 → 300,7
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint64_t delta = reloc->delta + target_offset;
uint32_t __iomem *reloc_entry;
uint64_t offset;
void __iomem *reloc_page;
int ret;
 
325,15 → 313,15
return ret;
 
/* Map the page containing the relocation we're going to perform. */
reloc->offset += i915_gem_obj_ggtt_offset(obj);
offset = i915_gem_obj_ggtt_offset(obj);
offset += reloc->offset;
MapPage(dev_priv->gtt.mappable,dev_priv->gtt.mappable_base +
(reloc->offset & PAGE_MASK), PG_SW);
(offset & PAGE_MASK), PG_SW);
reloc_page = dev_priv->gtt.mappable;
reloc_entry = (uint32_t __iomem *)
(reloc_page + offset_in_page(reloc->offset));
iowrite32(lower_32_bits(delta), reloc_entry);
iowrite32(lower_32_bits(delta), reloc_page + offset_in_page(offset));
 
 
 
return 0;
}
 
363,12 → 351,9
* through the ppgtt for non_secure batchbuffers. */
if (unlikely(IS_GEN6(dev) &&
reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
!target_i915_obj->has_global_gtt_mapping)) {
struct i915_vma *vma =
list_first_entry(&target_i915_obj->vma_list,
typeof(*vma), vma_link);
vma->bind_vma(vma, target_i915_obj->cache_level, GLOBAL_BIND);
}
!(target_vma->bound & GLOBAL_BIND)))
target_vma->bind_vma(target_vma, target_i915_obj->cache_level,
GLOBAL_BIND);
 
/* Validate that the target is in a valid r/w GPU domain */
if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
523,14 → 508,6
}
 
static int
need_reloc_mappable(struct i915_vma *vma)
{
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
return entry->relocation_count && !use_cpu_reloc(vma->obj) &&
i915_is_ggtt(vma->vm);
}
 
static int
i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
struct intel_engine_cs *ring,
bool *need_reloc)
537,20 → 514,12
{
struct drm_i915_gem_object *obj = vma->obj;
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
bool need_fence;
uint64_t flags;
int ret;
 
flags = 0;
 
need_fence =
has_fenced_gpu_access &&
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
obj->tiling_mode != I915_TILING_NONE;
if (need_fence || need_reloc_mappable(vma))
flags |= PIN_MAPPABLE;
 
if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
flags |= PIN_GLOBAL | PIN_MAPPABLE;
if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
flags |= PIN_GLOBAL;
if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
562,7 → 531,6
 
entry->flags |= __EXEC_OBJECT_HAS_PIN;
 
if (has_fenced_gpu_access) {
if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
ret = i915_gem_object_get_fence(obj);
if (ret)
570,10 → 538,7
 
if (i915_gem_object_pin_fence(obj))
entry->flags |= __EXEC_OBJECT_HAS_FENCE;
 
obj->pending_fenced_gpu_access = true;
}
}
 
if (entry->offset != vma->node.start) {
entry->offset = vma->node.start;
589,19 → 554,33
}
 
static bool
eb_vma_misplaced(struct i915_vma *vma, bool has_fenced_gpu_access)
need_reloc_mappable(struct i915_vma *vma)
{
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
 
if (entry->relocation_count == 0)
return false;
 
if (!i915_is_ggtt(vma->vm))
return false;
 
/* See also use_cpu_reloc() */
if (HAS_LLC(vma->obj->base.dev))
return false;
 
if (vma->obj->base.write_domain == I915_GEM_DOMAIN_CPU)
return false;
 
return true;
}
 
static bool
eb_vma_misplaced(struct i915_vma *vma)
{
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
struct drm_i915_gem_object *obj = vma->obj;
bool need_fence, need_mappable;
 
need_fence =
has_fenced_gpu_access &&
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
obj->tiling_mode != I915_TILING_NONE;
need_mappable = need_fence || need_reloc_mappable(vma);
 
WARN_ON((need_mappable || need_fence) &&
WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
!i915_is_ggtt(vma->vm));
 
if (entry->alignment &&
608,7 → 587,7
vma->node.start & (entry->alignment - 1))
return true;
 
if (need_mappable && !obj->map_and_fenceable)
if (entry->flags & __EXEC_OBJECT_NEEDS_MAP && !obj->map_and_fenceable)
return true;
 
if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
630,9 → 609,6
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
int retry;
 
if (list_empty(vmas))
return 0;
 
i915_gem_retire_requests_ring(ring);
 
vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
646,20 → 622,21
obj = vma->obj;
entry = vma->exec_entry;
 
if (!has_fenced_gpu_access)
entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
need_fence =
has_fenced_gpu_access &&
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
obj->tiling_mode != I915_TILING_NONE;
need_mappable = need_fence || need_reloc_mappable(vma);
 
if (need_mappable)
if (need_mappable) {
entry->flags |= __EXEC_OBJECT_NEEDS_MAP;
list_move(&vma->exec_list, &ordered_vmas);
else
} else
list_move_tail(&vma->exec_list, &ordered_vmas);
 
obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
obj->base.pending_write_domain = 0;
obj->pending_fenced_gpu_access = false;
}
list_splice(&ordered_vmas, vmas);
 
684,7 → 661,7
if (!drm_mm_node_allocated(&vma->node))
continue;
 
if (eb_vma_misplaced(vma, has_fenced_gpu_access))
if (eb_vma_misplaced(vma))
ret = i915_vma_unbind(vma);
else
ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
732,9 → 709,6
int i, total, ret;
unsigned count = args->buffer_count;
 
if (WARN_ON(list_empty(&eb->vmas)))
return 0;
 
vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm;
 
/* We may process another execbuffer during the unlock... */
878,18 → 852,24
}
 
static int
validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
validate_exec_list(struct drm_device *dev,
struct drm_i915_gem_exec_object2 *exec,
int count)
{
int i;
unsigned relocs_total = 0;
unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
unsigned invalid_flags;
int i;
 
invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
if (USES_FULL_PPGTT(dev))
invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
 
for (i = 0; i < count; i++) {
char __user *ptr = to_user_ptr(exec[i].relocs_ptr);
int length; /* limited by fault_in_pages_readable() */
 
if (exec[i].flags & __EXEC_OBJECT_UNKNOWN_FLAGS)
if (exec[i].flags & invalid_flags)
return -EINVAL;
 
/* First check for malicious input causing overflow in
932,16 → 912,26
return ERR_PTR(-EIO);
}
 
if (i915.enable_execlists && !ctx->engine[ring->id].state) {
int ret = intel_lr_context_deferred_create(ctx, ring);
if (ret) {
DRM_DEBUG("Could not create LRC %u: %d\n", ctx_id, ret);
return ERR_PTR(ret);
}
}
 
return ctx;
}
 
static void
void
i915_gem_execbuffer_move_to_active(struct list_head *vmas,
struct intel_engine_cs *ring)
{
u32 seqno = intel_ring_get_seqno(ring);
struct i915_vma *vma;
 
list_for_each_entry(vma, vmas, exec_list) {
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
struct drm_i915_gem_object *obj = vma->obj;
u32 old_read = obj->base.read_domains;
u32 old_write = obj->base.write_domain;
950,12 → 940,11
if (obj->base.write_domain == 0)
obj->base.pending_read_domains |= obj->base.read_domains;
obj->base.read_domains = obj->base.pending_read_domains;
obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
 
i915_vma_move_to_active(vma, ring);
if (obj->base.write_domain) {
obj->dirty = 1;
obj->last_write_seqno = intel_ring_get_seqno(ring);
obj->last_write_seqno = seqno;
 
intel_fb_obj_invalidate(obj, ring);
 
962,12 → 951,20
/* update for the implicit flush after a batch */
obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
}
if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
obj->last_fenced_seqno = seqno;
if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
struct drm_i915_private *dev_priv = to_i915(ring->dev);
list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
&dev_priv->mm.fence_list);
}
}
 
trace_i915_gem_object_change_domain(obj, old_read, old_write);
}
}
 
static void
void
i915_gem_execbuffer_retire_commands(struct drm_device *dev,
struct drm_file *file,
struct intel_engine_cs *ring,
1008,7 → 1005,48
}
 
static int
legacy_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
i915_emit_box(struct intel_engine_cs *ring,
struct drm_clip_rect *box,
int DR1, int DR4)
{
int ret;
 
if (box->y2 <= box->y1 || box->x2 <= box->x1 ||
box->y2 <= 0 || box->x2 <= 0) {
DRM_ERROR("Bad box %d,%d..%d,%d\n",
box->x1, box->y1, box->x2, box->y2);
return -EINVAL;
}
 
if (INTEL_INFO(ring->dev)->gen >= 4) {
ret = intel_ring_begin(ring, 4);
if (ret)
return ret;
 
intel_ring_emit(ring, GFX_OP_DRAWRECT_INFO_I965);
intel_ring_emit(ring, (box->x1 & 0xffff) | box->y1 << 16);
intel_ring_emit(ring, ((box->x2 - 1) & 0xffff) | (box->y2 - 1) << 16);
intel_ring_emit(ring, DR4);
} else {
ret = intel_ring_begin(ring, 6);
if (ret)
return ret;
 
intel_ring_emit(ring, GFX_OP_DRAWRECT_INFO);
intel_ring_emit(ring, DR1);
intel_ring_emit(ring, (box->x1 & 0xffff) | box->y1 << 16);
intel_ring_emit(ring, ((box->x2 - 1) & 0xffff) | (box->y2 - 1) << 16);
intel_ring_emit(ring, DR4);
intel_ring_emit(ring, 0);
}
intel_ring_advance(ring);
 
return 0;
}
 
 
int
i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
struct intel_engine_cs *ring,
struct intel_context *ctx,
struct drm_i915_gem_execbuffer2 *args,
1135,7 → 1173,7
exec_len = args->batch_len;
if (cliprects) {
for (i = 0; i < args->num_cliprects; i++) {
ret = i915_emit_box(dev, &cliprects[i],
ret = i915_emit_box(ring, &cliprects[i],
args->DR1, args->DR4);
if (ret)
goto error;
1235,7 → 1273,7
if (!i915_gem_check_execbuffer(args))
return -EINVAL;
 
ret = validate_exec_list(exec, args->buffer_count);
ret = validate_exec_list(dev, exec, args->buffer_count);
if (ret)
return ret;
 
1282,12 → 1320,6
if (ret)
goto pre_mutex_err;
 
if (dev_priv->ums.mm_suspended) {
mutex_unlock(&dev->struct_mutex);
ret = -EBUSY;
goto pre_mutex_err;
}
 
ctx = i915_gem_validate_context(dev, file, ring, ctx_id);
if (IS_ERR(ctx)) {
mutex_unlock(&dev->struct_mutex);
1297,8 → 1329,9
 
i915_gem_context_reference(ctx);
 
vm = ctx->vm;
if (!USES_FULL_PPGTT(dev))
if (ctx->ppgtt)
vm = &ctx->ppgtt->base;
else
vm = &dev_priv->gtt.base;
 
eb = eb_create(args);
1365,25 → 1398,36
/* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
* batch" bit. Hence we need to pin secure batches into the global gtt.
* hsw should have this fixed, but bdw mucks it up again. */
if (flags & I915_DISPATCH_SECURE &&
!batch_obj->has_global_gtt_mapping) {
/* When we have multiple VMs, we'll need to make sure that we
* allocate space first */
struct i915_vma *vma = i915_gem_obj_to_ggtt(batch_obj);
BUG_ON(!vma);
vma->bind_vma(vma, batch_obj->cache_level, GLOBAL_BIND);
}
if (flags & I915_DISPATCH_SECURE) {
/*
* So on first glance it looks freaky that we pin the batch here
* outside of the reservation loop. But:
* - The batch is already pinned into the relevant ppgtt, so we
* already have the backing storage fully allocated.
* - No other BO uses the global gtt (well contexts, but meh),
* so we don't really have issues with mutliple objects not
* fitting due to fragmentation.
* So this is actually safe.
*/
ret = i915_gem_obj_ggtt_pin(batch_obj, 0, 0);
if (ret)
goto err;
 
if (flags & I915_DISPATCH_SECURE)
exec_start += i915_gem_obj_ggtt_offset(batch_obj);
else
} else
exec_start += i915_gem_obj_offset(batch_obj, vm);
 
ret = legacy_ringbuffer_submission(dev, file, ring, ctx,
args, &eb->vmas, batch_obj, exec_start, flags);
if (ret)
goto err;
ret = dev_priv->gt.do_execbuf(dev, file, ring, ctx, args,
&eb->vmas, batch_obj, exec_start, flags);
 
/*
* FIXME: We crucially rely upon the active tracking for the (ppgtt)
* batch vma for correctness. For less ugly and less fragility this
* needs to be adjusted to also track the ggtt batch vma properly as
* active.
*/
if (flags & I915_DISPATCH_SECURE)
i915_gem_object_ggtt_unpin(batch_obj);
err:
/* the request owns the ref now */
i915_gem_context_unreference(ctx);
/drivers/video/drm/i915/i915_gem_gtt.c
23,13 → 23,7
*
*/
 
 
#define AGP_NORMAL_MEMORY 0
 
#define AGP_USER_TYPES (1 << 16)
#define AGP_USER_MEMORY (AGP_USER_TYPES)
#define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1)
 
#include <linux/seq_file.h>
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
36,29 → 30,33
#include "i915_trace.h"
#include "intel_drv.h"
 
#include <asm/cacheflush.h>
static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv);
static void chv_setup_private_ppat(struct drm_i915_private *dev_priv);
 
bool intel_enable_ppgtt(struct drm_device *dev, bool full)
static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
{
if (i915.enable_ppgtt == 0)
return false;
bool has_aliasing_ppgtt;
bool has_full_ppgtt;
 
if (i915.enable_ppgtt == 1 && full)
return false;
has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6;
has_full_ppgtt = INTEL_INFO(dev)->gen >= 7;
if (IS_GEN8(dev))
has_full_ppgtt = false; /* XXX why? */
 
return true;
}
 
static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
{
if (enable_ppgtt == 0 || !HAS_ALIASING_PPGTT(dev))
/*
* We don't allow disabling PPGTT for gen9+ as it's a requirement for
* execlists, the sole mechanism available to submit work.
*/
if (INTEL_INFO(dev)->gen < 9 &&
(enable_ppgtt == 0 || !has_aliasing_ppgtt))
return 0;
 
if (enable_ppgtt == 1)
return 1;
 
if (enable_ppgtt == 2 && HAS_PPGTT(dev))
if (enable_ppgtt == 2 && has_full_ppgtt)
return 2;
 
#ifdef CONFIG_INTEL_IOMMU
76,7 → 74,7
return 0;
}
 
return HAS_ALIASING_PPGTT(dev) ? 1 : 0;
return has_aliasing_ppgtt ? 1 : 0;
}
 
 
84,7 → 82,6
enum i915_cache_level cache_level,
u32 flags);
static void ppgtt_unbind_vma(struct i915_vma *vma);
static int gen8_ppgtt_enable(struct i915_hw_ppgtt *ppgtt);
 
static inline gen8_gtt_pte_t gen8_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
174,9 → 171,6
gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
 
/* Mark the page as writeable. Other platforms don't have a
* setting for read-only/writable, so this matches that behavior.
*/
if (!(flags & PTE_READ_ONLY))
pte |= BYT_PTE_WRITEABLE;
 
222,19 → 216,12
 
/* Broadwell Page Directory Pointer Descriptors */
static int gen8_write_pdp(struct intel_engine_cs *ring, unsigned entry,
uint64_t val, bool synchronous)
uint64_t val)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
int ret;
 
BUG_ON(entry >= 4);
 
if (synchronous) {
I915_WRITE(GEN8_RING_PDP_UDW(ring, entry), val >> 32);
I915_WRITE(GEN8_RING_PDP_LDW(ring, entry), (u32)val);
return 0;
}
 
ret = intel_ring_begin(ring, 6);
if (ret)
return ret;
251,8 → 238,7
}
 
static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt,
struct intel_engine_cs *ring,
bool synchronous)
struct intel_engine_cs *ring)
{
int i, ret;
 
261,7 → 247,7
 
for (i = used_pd - 1; i >= 0; i--) {
dma_addr_t addr = ppgtt->pd_dma_addr[i];
ret = gen8_write_pdp(ring, i, addr, synchronous);
ret = gen8_write_pdp(ring, i, addr);
if (ret)
return ret;
}
283,10 → 269,6
unsigned num_entries = length >> PAGE_SHIFT;
unsigned last_pte, i;
 
pt_vaddr = (gen8_gtt_pte_t*)AllocKernelSpace(4096);
if(pt_vaddr == NULL)
return;
 
scratch_pte = gen8_pte_encode(ppgtt->base.scratch.addr,
I915_CACHE_LLC, use_scratch);
 
297,7 → 279,7
if (last_pte > GEN8_PTES_PER_PAGE)
last_pte = GEN8_PTES_PER_PAGE;
 
MapPage(pt_vaddr,(addr_t)page_table, PG_SW);
pt_vaddr = kmap_atomic(page_table);
 
for (i = pte; i < last_pte; i++) {
pt_vaddr[i] = scratch_pte;
306,6 → 288,7
 
if (!HAS_LLC(ppgtt->base.dev))
drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
kunmap_atomic(pt_vaddr);
 
pte = 0;
if (++pde == GEN8_PDES_PER_PAGE) {
313,7 → 296,6
pde = 0;
}
}
FreeKernelSpace(pt_vaddr);
}
 
static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
329,16 → 311,15
unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK;
struct sg_page_iter sg_iter;
 
pt_vaddr = AllocKernelSpace(4096);
if(pt_vaddr == NULL)
return;
pt_vaddr = NULL;
 
MapPage(pt_vaddr,(addr_t)(ppgtt->gen8_pt_pages[pdpe][pde]), 3);
 
for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
if (WARN_ON(pdpe >= GEN8_LEGACY_PDPS))
break;
 
if (pt_vaddr == NULL)
pt_vaddr = kmap_atomic(ppgtt->gen8_pt_pages[pdpe][pde]);
 
pt_vaddr[pte] =
gen8_pte_encode(sg_page_iter_dma_address(&sg_iter),
cache_level, true);
345,16 → 326,21
if (++pte == GEN8_PTES_PER_PAGE) {
if (!HAS_LLC(ppgtt->base.dev))
drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
kunmap_atomic(pt_vaddr);
pt_vaddr = NULL;
if (++pde == GEN8_PDES_PER_PAGE) {
pdpe++;
pde = 0;
}
pte = 0;
MapPage(pt_vaddr,(addr_t)(ppgtt->gen8_pt_pages[pdpe][pde]), 3);
}
}
FreeKernelSpace(pt_vaddr);
if (pt_vaddr) {
if (!HAS_LLC(ppgtt->base.dev))
drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
kunmap_atomic(pt_vaddr);
}
}
 
static void gen8_free_page_tables(struct page **pt_pages)
{
409,9 → 395,6
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
 
list_del(&vm->global_link);
drm_mm_takedown(&vm->mm);
 
gen8_ppgtt_unmap_pages(ppgtt);
gen8_ppgtt_free(ppgtt);
}
576,7 → 559,6
const int max_pdp = DIV_ROUND_UP(size, 1 << 30);
const int min_pt_pages = GEN8_PDES_PER_PAGE * max_pdp;
int i, j, ret;
gen8_ppgtt_pde_t *pd_vaddr;
 
if (size % (1<<30))
DRM_INFO("Pages will be wasted unless GTT size (%llu) is divisible by 1GB\n", size);
609,11 → 591,9
* plugged in correctly. So we do that now/here. For aliasing PPGTT, we
* will never need to touch the PDEs again.
*/
 
pd_vaddr = AllocKernelSpace(4096);
 
for (i = 0; i < max_pdp; i++) {
MapPage(pd_vaddr,(addr_t)(&ppgtt->pd_pages[i]), 3);
gen8_ppgtt_pde_t *pd_vaddr;
pd_vaddr = kmap_atomic(&ppgtt->pd_pages[i]);
for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j];
pd_vaddr[j] = gen8_pde_encode(ppgtt->base.dev, addr,
621,10 → 601,9
}
if (!HAS_LLC(ppgtt->base.dev))
drm_clflush_virt_range(pd_vaddr, PAGE_SIZE);
kunmap_atomic(pd_vaddr);
}
FreeKernelSpace(pd_vaddr);
 
ppgtt->enable = gen8_ppgtt_enable;
ppgtt->switch_mm = gen8_mm_switch;
ppgtt->base.clear_range = gen8_ppgtt_clear_range;
ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
677,29 → 656,10
}
 
static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
struct intel_engine_cs *ring,
bool synchronous)
struct intel_engine_cs *ring)
{
struct drm_device *dev = ppgtt->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
 
/* If we're in reset, we can assume the GPU is sufficiently idle to
* manually frob these bits. Ideally we could use the ring functions,
* except our error handling makes it quite difficult (can't use
* intel_ring_begin, ring->flush, or intel_ring_advance)
*
* FIXME: We should try not to special case reset
*/
if (synchronous ||
i915_reset_in_progress(&dev_priv->gpu_error)) {
WARN_ON(ppgtt != dev_priv->mm.aliasing_ppgtt);
I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
POSTING_READ(RING_PP_DIR_BASE(ring));
return 0;
}
 
/* NB: TLBs must be flushed and invalidated before a switch */
ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
if (ret)
721,29 → 681,10
}
 
static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
struct intel_engine_cs *ring,
bool synchronous)
struct intel_engine_cs *ring)
{
struct drm_device *dev = ppgtt->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
 
/* If we're in reset, we can assume the GPU is sufficiently idle to
* manually frob these bits. Ideally we could use the ring functions,
* except our error handling makes it quite difficult (can't use
* intel_ring_begin, ring->flush, or intel_ring_advance)
*
* FIXME: We should try not to special case reset
*/
if (synchronous ||
i915_reset_in_progress(&dev_priv->gpu_error)) {
WARN_ON(ppgtt != dev_priv->mm.aliasing_ppgtt);
I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
POSTING_READ(RING_PP_DIR_BASE(ring));
return 0;
}
 
/* NB: TLBs must be flushed and invalidated before a switch */
ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
if (ret)
772,14 → 713,11
}
 
static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
struct intel_engine_cs *ring,
bool synchronous)
struct intel_engine_cs *ring)
{
struct drm_device *dev = ppgtt->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (!synchronous)
return 0;
 
I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
789,39 → 727,20
return 0;
}
 
static int gen8_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
static void gen8_ppgtt_enable(struct drm_device *dev)
{
struct drm_device *dev = ppgtt->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
int j, ret;
int j;
 
for_each_ring(ring, dev_priv, j) {
I915_WRITE(RING_MODE_GEN7(ring),
_MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
 
/* We promise to do a switch later with FULL PPGTT. If this is
* aliasing, this is the one and only switch we'll do */
if (USES_FULL_PPGTT(dev))
continue;
 
ret = ppgtt->switch_mm(ppgtt, ring, true);
if (ret)
goto err_out;
}
 
return 0;
 
err_out:
for_each_ring(ring, dev_priv, j)
I915_WRITE(RING_MODE_GEN7(ring),
_MASKED_BIT_DISABLE(GFX_PPGTT_ENABLE));
return ret;
}
 
static int gen7_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
static void gen7_ppgtt_enable(struct drm_device *dev)
{
struct drm_device *dev = ppgtt->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
uint32_t ecochk, ecobits;
840,31 → 759,16
I915_WRITE(GAM_ECOCHK, ecochk);
 
for_each_ring(ring, dev_priv, i) {
int ret;
/* GFX_MODE is per-ring on gen7+ */
I915_WRITE(RING_MODE_GEN7(ring),
_MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
 
/* We promise to do a switch later with FULL PPGTT. If this is
* aliasing, this is the one and only switch we'll do */
if (USES_FULL_PPGTT(dev))
continue;
 
ret = ppgtt->switch_mm(ppgtt, ring, true);
if (ret)
return ret;
}
 
return 0;
}
 
static int gen6_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
static void gen6_ppgtt_enable(struct drm_device *dev)
{
struct drm_device *dev = ppgtt->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
uint32_t ecochk, gab_ctl, ecobits;
int i;
 
ecobits = I915_READ(GAC_ECO_BITS);
I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
877,16 → 781,8
I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
 
I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
 
for_each_ring(ring, dev_priv, i) {
int ret = ppgtt->switch_mm(ppgtt, ring, true);
if (ret)
return ret;
}
 
return 0;
}
 
/* PPGTT support for Sandybdrige/Gen6 and later */
static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
uint64_t start,
904,28 → 800,23
 
scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0);
 
pt_vaddr = AllocKernelSpace(4096);
 
if(pt_vaddr == NULL)
return;
 
while (num_entries) {
last_pte = first_pte + num_entries;
if (last_pte > I915_PPGTT_PT_ENTRIES)
last_pte = I915_PPGTT_PT_ENTRIES;
 
MapPage(pt_vaddr,(addr_t)(ppgtt->pt_pages[act_pt]), 3);
pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
 
for (i = first_pte; i < last_pte; i++)
pt_vaddr[i] = scratch_pte;
 
kunmap_atomic(pt_vaddr);
 
num_entries -= last_pte - first_pte;
first_pte = 0;
act_pt++;
};
 
FreeKernelSpace(pt_vaddr);
}
}
 
static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
struct sg_table *pages,
940,25 → 831,24
unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES;
struct sg_page_iter sg_iter;
 
pt_vaddr = AllocKernelSpace(4096);
 
pt_vaddr = NULL;
for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
if(pt_vaddr == NULL)
return;
pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
 
MapPage(pt_vaddr,(addr_t)(ppgtt->pt_pages[act_pt]), 3);
for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
 
pt_vaddr[act_pte] =
vm->pte_encode(sg_page_iter_dma_address(&sg_iter),
cache_level, true, flags);
 
if (++act_pte == I915_PPGTT_PT_ENTRIES) {
kunmap_atomic(pt_vaddr);
pt_vaddr = NULL;
act_pt++;
MapPage(pt_vaddr,(addr_t)(ppgtt->pt_pages[act_pt]), 3);
act_pte = 0;
}
}
FreeKernelSpace(pt_vaddr);
if (pt_vaddr)
kunmap_atomic(pt_vaddr);
}
 
static void gen6_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt)
988,8 → 878,6
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
 
list_del(&vm->global_link);
drm_mm_takedown(&ppgtt->base.mm);
drm_mm_remove_node(&ppgtt->node);
 
gen6_ppgtt_unmap_pages(ppgtt);
1110,13 → 998,10
 
ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode;
if (IS_GEN6(dev)) {
ppgtt->enable = gen6_ppgtt_enable;
ppgtt->switch_mm = gen6_mm_switch;
} else if (IS_HASWELL(dev)) {
ppgtt->enable = gen7_ppgtt_enable;
ppgtt->switch_mm = hsw_mm_switch;
} else if (IS_GEN7(dev)) {
ppgtt->enable = gen7_ppgtt_enable;
ppgtt->switch_mm = gen7_mm_switch;
} else
BUG();
1147,40 → 1032,119
ppgtt->node.size >> 20,
ppgtt->node.start / PAGE_SIZE);
 
gen6_write_pdes(ppgtt);
DRM_DEBUG("Adding PPGTT at offset %x\n",
ppgtt->pd_offset << 10);
 
return 0;
}
 
int i915_gem_init_ppgtt(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret = 0;
 
ppgtt->base.dev = dev;
ppgtt->base.scratch = dev_priv->gtt.base.scratch;
 
if (INTEL_INFO(dev)->gen < 8)
ret = gen6_ppgtt_init(ppgtt);
else if (IS_GEN8(dev))
ret = gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total);
return gen6_ppgtt_init(ppgtt);
else if (IS_GEN8(dev) || IS_GEN9(dev))
return gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total);
else
BUG();
}
int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret = 0;
 
if (!ret) {
struct drm_i915_private *dev_priv = dev->dev_private;
ret = __hw_ppgtt_init(dev, ppgtt);
if (ret == 0) {
kref_init(&ppgtt->ref);
drm_mm_init(&ppgtt->base.mm, ppgtt->base.start,
ppgtt->base.total);
i915_init_vm(dev_priv, &ppgtt->base);
if (INTEL_INFO(dev)->gen < 8) {
gen6_write_pdes(ppgtt);
DRM_DEBUG("Adding PPGTT at offset %x\n",
ppgtt->pd_offset << 10);
}
 
return ret;
}
 
int i915_ppgtt_init_hw(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
int i, ret = 0;
 
/* In the case of execlists, PPGTT is enabled by the context descriptor
* and the PDPs are contained within the context itself. We don't
* need to do anything here. */
if (i915.enable_execlists)
return 0;
 
if (!USES_PPGTT(dev))
return 0;
 
if (IS_GEN6(dev))
gen6_ppgtt_enable(dev);
else if (IS_GEN7(dev))
gen7_ppgtt_enable(dev);
else if (INTEL_INFO(dev)->gen >= 8)
gen8_ppgtt_enable(dev);
else
WARN_ON(1);
 
if (ppgtt) {
for_each_ring(ring, dev_priv, i) {
ret = ppgtt->switch_mm(ppgtt, ring);
if (ret != 0)
return ret;
}
}
 
return ret;
}
struct i915_hw_ppgtt *
i915_ppgtt_create(struct drm_device *dev, struct drm_i915_file_private *fpriv)
{
struct i915_hw_ppgtt *ppgtt;
int ret;
 
ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
if (!ppgtt)
return ERR_PTR(-ENOMEM);
 
ret = i915_ppgtt_init(dev, ppgtt);
if (ret) {
kfree(ppgtt);
return ERR_PTR(ret);
}
 
ppgtt->file_priv = fpriv;
 
trace_i915_ppgtt_create(&ppgtt->base);
 
return ppgtt;
}
 
void i915_ppgtt_release(struct kref *kref)
{
struct i915_hw_ppgtt *ppgtt =
container_of(kref, struct i915_hw_ppgtt, ref);
 
trace_i915_ppgtt_release(&ppgtt->base);
 
/* vmas should already be unbound */
WARN_ON(!list_empty(&ppgtt->base.active_list));
WARN_ON(!list_empty(&ppgtt->base.inactive_list));
 
list_del(&ppgtt->base.global_link);
drm_mm_takedown(&ppgtt->base.mm);
 
ppgtt->base.cleanup(&ppgtt->base);
kfree(ppgtt);
}
 
static void
ppgtt_bind_vma(struct i915_vma *vma,
enum i915_cache_level cache_level,
1254,7 → 1218,7
fault_reg = I915_READ(RING_FAULT_REG(ring));
if (fault_reg & RING_FAULT_VALID) {
DRM_DEBUG_DRIVER("Unexpected fault\n"
"\tAddr: 0x%08lx\\n"
"\tAddr: 0x%08lx\n"
"\tAddress space: %s\n"
"\tSource ID: %d\n"
"\tType: %d\n",
1269,6 → 1233,16
POSTING_READ(RING_FAULT_REG(&dev_priv->ring[RCS]));
}
 
static void i915_ggtt_flush(struct drm_i915_private *dev_priv)
{
if (INTEL_INFO(dev_priv->dev)->gen < 6) {
intel_gtt_chipset_flush();
} else {
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
POSTING_READ(GFX_FLSH_CNTL_GEN6);
}
}
 
void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
1285,6 → 1259,8
dev_priv->gtt.base.start,
dev_priv->gtt.base.total,
true);
 
i915_ggtt_flush(dev_priv);
}
 
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
1312,7 → 1288,7
* Unfortunately above, we've just wiped out the mappings
* without telling our object about it. So we need to fake it.
*/
obj->has_global_gtt_mapping = 0;
vma->bound &= ~GLOBAL_BIND;
vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND);
}
 
1337,7 → 1313,7
gen6_write_pdes(container_of(vm, struct i915_hw_ppgtt, base));
}
 
i915_gem_chipset_flush(dev);
i915_ggtt_flush(dev_priv);
}
 
int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
1509,7 → 1485,7
 
BUG_ON(!i915_is_ggtt(vma->vm));
intel_gtt_insert_sg_entries(vma->obj->pages, entry, flags);
vma->obj->has_global_gtt_mapping = 1;
vma->bound = GLOBAL_BIND;
}
 
static void i915_ggtt_clear_range(struct i915_address_space *vm,
1528,7 → 1504,7
const unsigned int size = vma->obj->base.size >> PAGE_SHIFT;
 
BUG_ON(!i915_is_ggtt(vma->vm));
vma->obj->has_global_gtt_mapping = 0;
vma->bound = 0;
intel_gtt_clear_range(first, size);
}
 
1556,17 → 1532,17
* flags. At all other times, the GPU will use the aliasing PPGTT.
*/
if (!dev_priv->mm.aliasing_ppgtt || flags & GLOBAL_BIND) {
if (!obj->has_global_gtt_mapping ||
if (!(vma->bound & GLOBAL_BIND) ||
(cache_level != obj->cache_level)) {
vma->vm->insert_entries(vma->vm, obj->pages,
vma->node.start,
cache_level, flags);
obj->has_global_gtt_mapping = 1;
vma->bound |= GLOBAL_BIND;
}
}
 
if (dev_priv->mm.aliasing_ppgtt &&
(!obj->has_aliasing_ppgtt_mapping ||
(!(vma->bound & LOCAL_BIND) ||
(cache_level != obj->cache_level))) {
struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
appgtt->base.insert_entries(&appgtt->base,
1573,7 → 1549,7
vma->obj->pages,
vma->node.start,
cache_level, flags);
vma->obj->has_aliasing_ppgtt_mapping = 1;
vma->bound |= LOCAL_BIND;
}
}
 
1583,21 → 1559,21
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj = vma->obj;
 
if (obj->has_global_gtt_mapping) {
if (vma->bound & GLOBAL_BIND) {
vma->vm->clear_range(vma->vm,
vma->node.start,
obj->base.size,
true);
obj->has_global_gtt_mapping = 0;
vma->bound &= ~GLOBAL_BIND;
}
 
if (obj->has_aliasing_ppgtt_mapping) {
if (vma->bound & LOCAL_BIND) {
struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
appgtt->base.clear_range(&appgtt->base,
vma->node.start,
obj->base.size,
true);
obj->has_aliasing_ppgtt_mapping = 0;
vma->bound &= ~LOCAL_BIND;
}
}
 
1634,7 → 1610,7
}
}
 
void i915_gem_setup_global_gtt(struct drm_device *dev,
static int i915_gem_setup_global_gtt(struct drm_device *dev,
unsigned long start,
unsigned long mappable_end,
unsigned long end)
1653,6 → 1629,7
struct drm_mm_node *entry;
struct drm_i915_gem_object *obj;
unsigned long hole_start, hole_end;
int ret;
 
BUG_ON(mappable_end > end);
 
1664,16 → 1641,18
/* Mark any preallocated objects as occupied */
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm);
int ret;
 
DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n",
i915_gem_obj_ggtt_offset(obj), obj->base.size);
 
WARN_ON(i915_gem_obj_ggtt_bound(obj));
ret = drm_mm_reserve_node(&ggtt_vm->mm, &vma->node);
if (ret)
DRM_DEBUG_KMS("Reservation failed\n");
obj->has_global_gtt_mapping = 1;
if (ret) {
DRM_DEBUG_KMS("Reservation failed: %i\n", ret);
return ret;
}
vma->bound |= GLOBAL_BIND;
}
 
dev_priv->gtt.base.start = start;
dev_priv->gtt.base.total = end - start;
1688,8 → 1667,24
 
/* And finally clear the reserved guard page */
ggtt_vm->clear_range(ggtt_vm, end - PAGE_SIZE, PAGE_SIZE, true);
 
if (USES_PPGTT(dev) && !USES_FULL_PPGTT(dev)) {
struct i915_hw_ppgtt *ppgtt;
 
ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
if (!ppgtt)
return -ENOMEM;
 
ret = __hw_ppgtt_init(dev, ppgtt);
if (ret != 0)
return ret;
 
dev_priv->mm.aliasing_ppgtt = ppgtt;
}
 
return 0;
}
 
void i915_gem_init_global_gtt(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
1701,6 → 1696,25
i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
}
 
void i915_global_gtt_cleanup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_address_space *vm = &dev_priv->gtt.base;
 
if (dev_priv->mm.aliasing_ppgtt) {
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
 
ppgtt->base.cleanup(&ppgtt->base);
}
 
if (drm_mm_initialized(&vm->mm)) {
drm_mm_takedown(&vm->mm);
list_del(&vm->global_link);
}
 
vm->cleanup(vm);
}
 
static int setup_scratch_page(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
1710,7 → 1724,6
page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
if (page == NULL)
return -ENOMEM;
get_page(page);
set_pages_uc(page, 1);
 
#ifdef CONFIG_INTEL_IOMMU
1735,7 → 1748,6
set_pages_wb(page, 1);
pci_unmap_page(dev->pdev, dev_priv->gtt.base.scratch.addr,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
put_page(page);
__free_page(page);
}
 
1805,6 → 1817,18
return (gmch_ctrl - 0x17 + 9) << 22;
}
 
static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl)
{
gen9_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
gen9_gmch_ctl &= BDW_GMCH_GMS_MASK;
 
if (gen9_gmch_ctl < 0xf0)
return gen9_gmch_ctl << 25; /* 32 MB units */
else
/* 4MB increments starting at 0xf0 for 4MB */
return (gen9_gmch_ctl - 0xf0 + 1) << 22;
}
 
static int ggtt_probe_common(struct drm_device *dev,
size_t gtt_size)
{
1848,6 → 1872,22
GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
 
if (!USES_PPGTT(dev_priv->dev))
/* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry,
* so RTL will always use the value corresponding to
* pat_sel = 000".
* So let's disable cache for GGTT to avoid screen corruptions.
* MOCS still can be used though.
* - System agent ggtt writes (i.e. cpu gtt mmaps) already work
* before this patch, i.e. the same uncached + snooping access
* like on gen6/7 seems to be in effect.
* - So this just fixes blitter/render access. Again it looks
* like it's not just uncached access, but uncached + snooping.
* So we can still hold onto all our assumptions wrt cpu
* clflushing on LLC machines.
*/
pat = GEN8_PPAT(0, GEN8_PPAT_UC);
 
/* XXX: spec defines this as 2 distinct registers. It's unclear if a 64b
* write would work. */
I915_WRITE(GEN8_PRIVATE_PAT, pat);
1864,9 → 1904,17
* Only the snoop bit has meaning for CHV, the rest is
* ignored.
*
* Note that the harware enforces snooping for all page
* table accesses. The snoop bit is actually ignored for
* PDEs.
* The hardware will never snoop for certain types of accesses:
* - CPU GTT (GMADR->GGTT->no snoop->memory)
* - PPGTT page tables
* - some other special cycles
*
* As with BDW, we also need to consider the following for GT accesses:
* "For GGTT, there is NO pat_sel[2:0] from the entry,
* so RTL will always use the value corresponding to
* pat_sel = 000".
* Which means we must set the snoop bit in PAT entry 0
* in order to keep the global status page working.
*/
pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
GEN8_PPAT(1, 0) |
1901,7 → 1949,10
 
pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
 
if (IS_CHERRYVIEW(dev)) {
if (INTEL_INFO(dev)->gen >= 9) {
*stolen = gen9_get_stolen_size(snb_gmch_ctl);
gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl);
} else if (IS_CHERRYVIEW(dev)) {
*stolen = chv_get_stolen_size(snb_gmch_ctl);
gtt_size = chv_get_total_gtt_size(snb_gmch_ctl);
} else {
1969,10 → 2020,6
 
struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base);
 
if (drm_mm_initialized(&vm->mm)) {
drm_mm_takedown(&vm->mm);
list_del(&vm->global_link);
}
iounmap(gtt->gsm);
teardown_scratch_page(vm->dev);
}
2077,6 → 2124,7
vma->obj = obj;
 
switch (INTEL_INFO(vm->dev)->gen) {
case 9:
case 8:
case 7:
case 6:
2103,8 → 2151,10
/* Keep GGTT vmas first to make debug easier */
if (i915_is_ggtt(vm))
list_add(&vma->vma_link, &obj->vma_list);
else
else {
list_add_tail(&vma->vma_link, &obj->vma_list);
i915_ppgtt_get(i915_vm_to_ppgtt(vm));
}
 
return vma;
}
/drivers/video/drm/i915/i915_gem_gtt.h
34,6 → 34,8
#ifndef __I915_GEM_GTT_H__
#define __I915_GEM_GTT_H__
 
struct drm_i915_file_private;
 
typedef uint32_t gen6_gtt_pte_t;
typedef uint64_t gen8_gtt_pte_t;
typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
121,6 → 123,12
struct drm_i915_gem_object *obj;
struct i915_address_space *vm;
 
/** Flags and address space this VMA is bound to */
#define GLOBAL_BIND (1<<0)
#define LOCAL_BIND (1<<1)
#define PTE_READ_ONLY (1<<2)
unsigned int bound : 4;
 
/** This object's place on the active/inactive lists */
struct list_head mm_list;
 
153,8 → 161,6
* setting the valid PTE entries to a reserved scratch page. */
void (*unbind_vma)(struct i915_vma *vma);
/* Map an object into an address space with the given cache flags. */
#define GLOBAL_BIND (1<<0)
#define PTE_READ_ONLY (1<<1)
void (*bind_vma)(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags);
258,23 → 264,35
dma_addr_t *gen8_pt_dma_addr[4];
};
 
struct intel_context *ctx;
struct drm_i915_file_private *file_priv;
 
int (*enable)(struct i915_hw_ppgtt *ppgtt);
int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
struct intel_engine_cs *ring,
bool synchronous);
struct intel_engine_cs *ring);
// void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
};
 
int i915_gem_gtt_init(struct drm_device *dev);
void i915_gem_init_global_gtt(struct drm_device *dev);
void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
unsigned long mappable_end, unsigned long end);
void i915_global_gtt_cleanup(struct drm_device *dev);
 
bool intel_enable_ppgtt(struct drm_device *dev, bool full);
int i915_gem_init_ppgtt(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt);
 
int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt);
int i915_ppgtt_init_hw(struct drm_device *dev);
void i915_ppgtt_release(struct kref *kref);
struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_device *dev,
struct drm_i915_file_private *fpriv);
static inline void i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt)
{
if (ppgtt)
kref_get(&ppgtt->ref);
}
static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt)
{
if (ppgtt)
kref_put(&ppgtt->ref, i915_ppgtt_release);
}
 
void i915_check_and_clear_faults(struct drm_device *dev);
void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
void i915_gem_restore_gtt_mappings(struct drm_device *dev);
/drivers/video/drm/i915/i915_gem_render_state.c
28,13 → 28,6
#include "i915_drv.h"
#include "intel_renderstate.h"
 
struct render_state {
const struct intel_renderstate_rodata *rodata;
struct drm_i915_gem_object *obj;
u64 ggtt_offset;
int gen;
};
 
static const struct intel_renderstate_rodata *
render_state_get_rodata(struct drm_device *dev, const int gen)
{
45,6 → 38,8
return &gen7_null_state;
case 8:
return &gen8_null_state;
case 9:
return &gen9_null_state;
}
 
return NULL;
113,7 → 108,7
 
d[i++] = s;
}
FreeKernelSpace(d);
kunmap(page);
 
ret = i915_gem_object_set_to_gtt_domain(so->obj, false);
if (ret)
127,31 → 122,48
return 0;
}
 
static void render_state_fini(struct render_state *so)
void i915_gem_render_state_fini(struct render_state *so)
{
i915_gem_object_ggtt_unpin(so->obj);
drm_gem_object_unreference(&so->obj->base);
}
 
int i915_gem_render_state_init(struct intel_engine_cs *ring)
int i915_gem_render_state_prepare(struct intel_engine_cs *ring,
struct render_state *so)
{
struct render_state so;
int ret;
 
if (WARN_ON(ring->id != RCS))
return -ENOENT;
 
ret = render_state_init(&so, ring->dev);
ret = render_state_init(so, ring->dev);
if (ret)
return ret;
 
if (so.rodata == NULL)
if (so->rodata == NULL)
return 0;
 
ret = render_state_setup(&so);
ret = render_state_setup(so);
if (ret) {
i915_gem_render_state_fini(so);
return ret;
}
 
return 0;
}
 
int i915_gem_render_state_init(struct intel_engine_cs *ring)
{
struct render_state so;
int ret;
 
ret = i915_gem_render_state_prepare(ring, &so);
if (ret)
goto out;
return ret;
 
if (so.rodata == NULL)
return 0;
 
ret = ring->dispatch_execbuffer(ring,
so.ggtt_offset,
so.rodata->batch_items * 4,
164,6 → 176,6
ret = __i915_add_request(ring, NULL, so.obj, NULL);
/* __i915_add_request moves object to inactive if it fails */
out:
render_state_fini(&so);
i915_gem_render_state_fini(&so);
return ret;
}
/drivers/video/drm/i915/i915_gem_render_state.h
0,0 → 1,47
/*
* Copyright © 2014 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
 
#ifndef _I915_GEM_RENDER_STATE_H_
#define _I915_GEM_RENDER_STATE_H_
 
#include <linux/types.h>
 
struct intel_renderstate_rodata {
const u32 *reloc;
const u32 *batch;
const u32 batch_items;
};
 
struct render_state {
const struct intel_renderstate_rodata *rodata;
struct drm_i915_gem_object *obj;
u64 ggtt_offset;
int gen;
};
 
int i915_gem_render_state_init(struct intel_engine_cs *ring);
void i915_gem_render_state_fini(struct render_state *so);
int i915_gem_render_state_prepare(struct intel_engine_cs *ring,
struct render_state *so);
 
#endif /* _I915_GEM_RENDER_STATE_H_ */
/drivers/video/drm/i915/i915_gem_stolen.c
276,6 → 276,7
int i915_gem_init_stolen(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 tmp;
int bios_reserved = 0;
 
#ifdef CONFIG_INTEL_IOMMU
295,8 → 296,16
DRM_DEBUG_KMS("found %zd bytes of stolen memory at %08lx\n",
dev_priv->gtt.stolen_size, dev_priv->mm.stolen_base);
 
if (IS_VALLEYVIEW(dev))
bios_reserved = 1024*1024; /* top 1M on VLV/BYT */
if (INTEL_INFO(dev)->gen >= 8) {
tmp = I915_READ(GEN7_BIOS_RESERVED);
tmp >>= GEN8_BIOS_RESERVED_SHIFT;
tmp &= GEN8_BIOS_RESERVED_MASK;
bios_reserved = (1024*1024) << tmp;
} else if (IS_GEN7(dev)) {
tmp = I915_READ(GEN7_BIOS_RESERVED);
bios_reserved = tmp & GEN7_BIOS_RESERVED_256K ?
256*1024 : 1024*1024;
}
 
if (WARN_ON(bios_reserved > dev_priv->gtt.stolen_size))
return 0;
511,7 → 520,7
}
}
 
obj->has_global_gtt_mapping = 1;
vma->bound |= GLOBAL_BIND;
 
list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
list_add_tail(&vma->mm_list, &ggtt->inactive_list);
/drivers/video/drm/i915/i915_gem_tiling.c
91,20 → 91,37
uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
 
if (IS_VALLEYVIEW(dev)) {
if (INTEL_INFO(dev)->gen >= 8 || IS_VALLEYVIEW(dev)) {
/*
* On BDW+, swizzling is not used. We leave the CPU memory
* controller in charge of optimizing memory accesses without
* the extra address manipulation GPU side.
*
* VLV and CHV don't have GPU swizzling.
*/
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
} else if (INTEL_INFO(dev)->gen >= 6) {
if (dev_priv->preserve_bios_swizzle) {
if (I915_READ(DISP_ARB_CTL) &
DISP_TILE_SURFACE_SWIZZLING) {
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
} else {
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
}
} else {
uint32_t dimm_c0, dimm_c1;
dimm_c0 = I915_READ(MAD_DIMM_C0);
dimm_c1 = I915_READ(MAD_DIMM_C1);
dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
/* Enable swizzling when the channels are populated with
* identically sized dimms. We don't need to check the 3rd
* channel because no cpu with gpu attached ships in that
* configuration. Also, swizzling only makes sense for 2
* channels anyway. */
/* Enable swizzling when the channels are populated
* with identically sized dimms. We don't need to check
* the 3rd channel because no cpu with gpu attached
* ships in that configuration. Also, swizzling only
* makes sense for 2 channels anyway. */
if (dimm_c0 == dimm_c1) {
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
112,6 → 129,7
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
}
}
} else if (IS_GEN5(dev)) {
/* On Ironlake whatever DRAM config, GPU always do
* same swizzling setup.
160,6 → 178,15
}
break;
}
 
/* check for L-shaped memory aka modified enhanced addressing */
if (IS_GEN4(dev)) {
uint32_t ddc2 = I915_READ(DCC2);
 
if (!(ddc2 & DCC2_MODIFIED_ENHANCED_DISABLE))
dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES;
}
 
if (dcc == 0xffffffff) {
DRM_ERROR("Couldn't read from MCHBAR. "
"Disabling tiling.\n");
357,26 → 384,22
* has to also include the unfenced register the GPU uses
* whilst executing a fenced command for an untiled object.
*/
if (obj->map_and_fenceable &&
!i915_gem_object_fence_ok(obj, args->tiling_mode))
ret = i915_gem_object_ggtt_unbind(obj);
 
obj->map_and_fenceable =
!i915_gem_obj_ggtt_bound(obj) ||
(i915_gem_obj_ggtt_offset(obj) +
obj->base.size <= dev_priv->gtt.mappable_end &&
i915_gem_object_fence_ok(obj, args->tiling_mode));
 
/* Rebind if we need a change of alignment */
if (!obj->map_and_fenceable) {
u32 unfenced_align =
i915_gem_get_gtt_alignment(dev, obj->base.size,
args->tiling_mode,
false);
if (i915_gem_obj_ggtt_offset(obj) & (unfenced_align - 1))
ret = i915_gem_object_ggtt_unbind(obj);
if (ret == 0) {
if (obj->pages &&
obj->madv == I915_MADV_WILLNEED &&
dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
if (args->tiling_mode == I915_TILING_NONE)
i915_gem_object_unpin_pages(obj);
if (obj->tiling_mode == I915_TILING_NONE)
i915_gem_object_pin_pages(obj);
}
 
if (ret == 0) {
obj->fence_dirty =
obj->fenced_gpu_access ||
obj->last_fenced_seqno ||
obj->fence_reg != I915_FENCE_REG_NONE;
 
obj->tiling_mode = args->tiling_mode;
440,6 → 463,7
}
 
/* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */
args->phys_swizzle_mode = args->swizzle_mode;
if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
/drivers/video/drm/i915/i915_gpu_error.c
208,7 → 208,7
err_puts(m, err->userptr ? " userptr" : "");
err_puts(m, err->ring != -1 ? " " : "");
err_puts(m, ring_str(err->ring));
err_puts(m, i915_cache_level_str(err->cache_level));
err_puts(m, i915_cache_level_str(m->i915, err->cache_level));
 
if (err->name)
err_printf(m, " (name: %d)", err->name);
229,6 → 229,8
return "wait";
case HANGCHECK_ACTIVE:
return "active";
case HANGCHECK_ACTIVE_LOOP:
return "active (loop)";
case HANGCHECK_KICK:
return "kick";
case HANGCHECK_HUNG:
/drivers/video/drm/i915/i915_irq.c
35,7 → 35,13
#include "i915_trace.h"
#include "intel_drv.h"
 
#define assert_spin_locked(a)
/**
* DOC: interrupt handling
*
* These functions provide the basic support for enabling and disabling the
* interrupt handling support. There's a lot more functionality in i915_irq.c
* and related files, but that will be described in separate chapters.
*/
 
static const u32 hpd_ibx[] = {
[HPD_CRT] = SDE_CRT_HOTPLUG,
118,31 → 124,22
 
#define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \
I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
POSTING_READ(GEN8_##type##_IER(which)); \
POSTING_READ(GEN8_##type##_IMR(which)); \
} while (0)
 
#define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \
I915_WRITE(type##IER, (ier_val)); \
I915_WRITE(type##IMR, (imr_val)); \
I915_WRITE(type##IER, (ier_val)); \
POSTING_READ(type##IER); \
POSTING_READ(type##IMR); \
} while (0)
 
#define pr_err(fmt, ...) \
printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
 
 
#define DRM_WAKEUP( queue ) wake_up( queue )
#define DRM_INIT_WAITQUEUE( queue ) init_waitqueue_head( queue )
 
#define MAX_NOPID ((u32)~0)
 
 
 
/* For display hotplug interrupt */
static void
void
ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
{
assert_spin_locked(&dev_priv->irq_lock);
157,12 → 154,12
}
}
 
static void
void
ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
{
assert_spin_locked(&dev_priv->irq_lock);
 
if (!intel_irqs_enabled(dev_priv))
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return;
 
if ((dev_priv->irq_mask & mask) != mask) {
203,71 → 200,28
ilk_update_gt_irq(dev_priv, mask, 0);
}
 
/**
* snb_update_pm_irq - update GEN6_PMIMR
* @dev_priv: driver private
* @interrupt_mask: mask of interrupt bits to update
* @enabled_irq_mask: mask of interrupt bits to enable
*/
static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
uint32_t interrupt_mask,
uint32_t enabled_irq_mask)
static u32 gen6_pm_iir(struct drm_i915_private *dev_priv)
{
uint32_t new_val;
 
assert_spin_locked(&dev_priv->irq_lock);
 
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return;
 
new_val = dev_priv->pm_irq_mask;
new_val &= ~interrupt_mask;
new_val |= (~enabled_irq_mask & interrupt_mask);
 
if (new_val != dev_priv->pm_irq_mask) {
dev_priv->pm_irq_mask = new_val;
I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
POSTING_READ(GEN6_PMIMR);
return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
}
}
 
void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
static u32 gen6_pm_imr(struct drm_i915_private *dev_priv)
{
snb_update_pm_irq(dev_priv, mask, mask);
return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
}
 
void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
static u32 gen6_pm_ier(struct drm_i915_private *dev_priv)
{
snb_update_pm_irq(dev_priv, mask, 0);
return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
}
 
static bool ivb_can_enable_err_int(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc;
enum pipe pipe;
 
assert_spin_locked(&dev_priv->irq_lock);
 
for_each_pipe(pipe) {
crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
 
if (crtc->cpu_fifo_underrun_disabled)
return false;
}
 
return true;
}
 
/**
* bdw_update_pm_irq - update GT interrupt 2
* snb_update_pm_irq - update GEN6_PMIMR
* @dev_priv: driver private
* @interrupt_mask: mask of interrupt bits to update
* @enabled_irq_mask: mask of interrupt bits to enable
*
* Copied from the snb function, updated with relevant register offsets
*/
static void bdw_update_pm_irq(struct drm_i915_private *dev_priv,
static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
uint32_t interrupt_mask,
uint32_t enabled_irq_mask)
{
275,9 → 229,6
 
assert_spin_locked(&dev_priv->irq_lock);
 
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return;
 
new_val = dev_priv->pm_irq_mask;
new_val &= ~interrupt_mask;
new_val |= (~enabled_irq_mask & interrupt_mask);
284,135 → 235,85
 
if (new_val != dev_priv->pm_irq_mask) {
dev_priv->pm_irq_mask = new_val;
I915_WRITE(GEN8_GT_IMR(2), dev_priv->pm_irq_mask);
POSTING_READ(GEN8_GT_IMR(2));
I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask);
POSTING_READ(gen6_pm_imr(dev_priv));
}
}
 
void gen8_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
{
bdw_update_pm_irq(dev_priv, mask, mask);
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return;
 
snb_update_pm_irq(dev_priv, mask, mask);
}
 
void gen8_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv,
uint32_t mask)
{
bdw_update_pm_irq(dev_priv, mask, 0);
snb_update_pm_irq(dev_priv, mask, 0);
}
 
static bool cpt_can_enable_serr_int(struct drm_device *dev)
void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
{
struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe pipe;
struct intel_crtc *crtc;
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return;
 
assert_spin_locked(&dev_priv->irq_lock);
 
for_each_pipe(pipe) {
crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
 
if (crtc->pch_fifo_underrun_disabled)
return false;
__gen6_disable_pm_irq(dev_priv, mask);
}
 
return true;
}
 
void i9xx_check_fifo_underruns(struct drm_device *dev)
void gen6_reset_rps_interrupts(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc;
unsigned long flags;
uint32_t reg = gen6_pm_iir(dev_priv);
 
spin_lock_irqsave(&dev_priv->irq_lock, flags);
 
for_each_intel_crtc(dev, crtc) {
u32 reg = PIPESTAT(crtc->pipe);
u32 pipestat;
 
if (crtc->cpu_fifo_underrun_disabled)
continue;
 
pipestat = I915_READ(reg) & 0xffff0000;
if ((pipestat & PIPE_FIFO_UNDERRUN_STATUS) == 0)
continue;
 
I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
spin_lock_irq(&dev_priv->irq_lock);
I915_WRITE(reg, dev_priv->pm_rps_events);
I915_WRITE(reg, dev_priv->pm_rps_events);
POSTING_READ(reg);
 
DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe));
spin_unlock_irq(&dev_priv->irq_lock);
}
 
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
}
 
static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
enum pipe pipe,
bool enable, bool old)
void gen6_enable_rps_interrupts(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg = PIPESTAT(pipe);
u32 pipestat = I915_READ(reg) & 0xffff0000;
 
assert_spin_locked(&dev_priv->irq_lock);
spin_lock_irq(&dev_priv->irq_lock);
 
if (enable) {
I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
POSTING_READ(reg);
} else {
if (old && pipestat & PIPE_FIFO_UNDERRUN_STATUS)
DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
WARN_ON(dev_priv->rps.pm_iir);
WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
dev_priv->rps.interrupts_enabled = true;
I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
dev_priv->pm_rps_events);
gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
 
spin_unlock_irq(&dev_priv->irq_lock);
}
}
 
static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
enum pipe pipe, bool enable)
void gen6_disable_rps_interrupts(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
DE_PIPEB_FIFO_UNDERRUN;
 
if (enable)
ironlake_enable_display_irq(dev_priv, bit);
else
ironlake_disable_display_irq(dev_priv, bit);
}
spin_lock_irq(&dev_priv->irq_lock);
dev_priv->rps.interrupts_enabled = false;
spin_unlock_irq(&dev_priv->irq_lock);
 
static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
enum pipe pipe,
bool enable, bool old)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (enable) {
I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
cancel_work_sync(&dev_priv->rps.work);
 
if (!ivb_can_enable_err_int(dev))
return;
spin_lock_irq(&dev_priv->irq_lock);
 
ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
} else {
ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
I915_WRITE(GEN6_PMINTRMSK, INTEL_INFO(dev_priv)->gen >= 8 ?
~GEN8_PMINTR_REDIRECT_TO_NON_DISP : ~0);
 
if (old &&
I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) {
DRM_ERROR("uncleared fifo underrun on pipe %c\n",
pipe_name(pipe));
}
}
}
__gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
~dev_priv->pm_rps_events);
I915_WRITE(gen6_pm_iir(dev_priv), dev_priv->pm_rps_events);
I915_WRITE(gen6_pm_iir(dev_priv), dev_priv->pm_rps_events);
 
static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev,
enum pipe pipe, bool enable)
{
struct drm_i915_private *dev_priv = dev->dev_private;
dev_priv->rps.pm_iir = 0;
 
assert_spin_locked(&dev_priv->irq_lock);
 
if (enable)
dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN;
else
dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN;
I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
spin_unlock_irq(&dev_priv->irq_lock);
}
 
/**
421,7 → 322,7
* @interrupt_mask: mask of interrupt bits to update
* @enabled_irq_mask: mask of interrupt bits to enable
*/
static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
uint32_t interrupt_mask,
uint32_t enabled_irq_mask)
{
437,161 → 338,7
I915_WRITE(SDEIMR, sdeimr);
POSTING_READ(SDEIMR);
}
#define ibx_enable_display_interrupt(dev_priv, bits) \
ibx_display_interrupt_update((dev_priv), (bits), (bits))
#define ibx_disable_display_interrupt(dev_priv, bits) \
ibx_display_interrupt_update((dev_priv), (bits), 0)
 
static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
enum transcoder pch_transcoder,
bool enable)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t bit = (pch_transcoder == TRANSCODER_A) ?
SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
 
if (enable)
ibx_enable_display_interrupt(dev_priv, bit);
else
ibx_disable_display_interrupt(dev_priv, bit);
}
 
static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
enum transcoder pch_transcoder,
bool enable, bool old)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (enable) {
I915_WRITE(SERR_INT,
SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
 
if (!cpt_can_enable_serr_int(dev))
return;
 
ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT);
} else {
ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
 
if (old && I915_READ(SERR_INT) &
SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) {
DRM_ERROR("uncleared pch fifo underrun on pch transcoder %c\n",
transcoder_name(pch_transcoder));
}
}
}
 
/**
* intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages
* @dev: drm device
* @pipe: pipe
* @enable: true if we want to report FIFO underrun errors, false otherwise
*
* This function makes us disable or enable CPU fifo underruns for a specific
* pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun
* reporting for one pipe may also disable all the other CPU error interruts for
* the other pipes, due to the fact that there's just one interrupt mask/enable
* bit for all the pipes.
*
* Returns the previous state of underrun reporting.
*/
static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
enum pipe pipe, bool enable)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
bool old;
 
assert_spin_locked(&dev_priv->irq_lock);
 
old = !intel_crtc->cpu_fifo_underrun_disabled;
intel_crtc->cpu_fifo_underrun_disabled = !enable;
 
if (INTEL_INFO(dev)->gen < 5 || IS_VALLEYVIEW(dev))
i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old);
else if (IS_GEN5(dev) || IS_GEN6(dev))
ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
else if (IS_GEN7(dev))
ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old);
else if (IS_GEN8(dev))
broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
 
return old;
}
 
bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
enum pipe pipe, bool enable)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
bool ret;
 
spin_lock_irqsave(&dev_priv->irq_lock, flags);
ret = __intel_set_cpu_fifo_underrun_reporting(dev, pipe, enable);
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
return ret;
}
 
static bool __cpu_fifo_underrun_reporting_enabled(struct drm_device *dev,
enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
return !intel_crtc->cpu_fifo_underrun_disabled;
}
 
/**
* intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages
* @dev: drm device
* @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
* @enable: true if we want to report FIFO underrun errors, false otherwise
*
* This function makes us disable or enable PCH fifo underruns for a specific
* PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO
* underrun reporting for one transcoder may also disable all the other PCH
* error interruts for the other transcoders, due to the fact that there's just
* one interrupt mask/enable bit for all the transcoders.
*
* Returns the previous state of underrun reporting.
*/
bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
enum transcoder pch_transcoder,
bool enable)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
unsigned long flags;
bool old;
 
/*
* NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT
* has only one pch transcoder A that all pipes can use. To avoid racy
* pch transcoder -> pipe lookups from interrupt code simply store the
* underrun statistics in crtc A. Since we never expose this anywhere
* nor use it outside of the fifo underrun code here using the "wrong"
* crtc on LPT won't cause issues.
*/
 
spin_lock_irqsave(&dev_priv->irq_lock, flags);
 
old = !intel_crtc->pch_fifo_underrun_disabled;
intel_crtc->pch_fifo_underrun_disabled = !enable;
 
if (HAS_PCH_IBX(dev))
ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
else
cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable, old);
 
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
return old;
}
 
 
static void
__i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
u32 enable_mask, u32 status_mask)
600,6 → 347,7
u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
 
assert_spin_locked(&dev_priv->irq_lock);
WARN_ON(!intel_irqs_enabled(dev_priv));
 
if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
status_mask & ~PIPESTAT_INT_STATUS_MASK,
626,6 → 374,7
u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
 
assert_spin_locked(&dev_priv->irq_lock);
WARN_ON(!intel_irqs_enabled(dev_priv));
 
if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
status_mask & ~PIPESTAT_INT_STATUS_MASK,
705,12 → 454,11
static void i915_enable_asle_pipestat(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
 
if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
return;
 
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
spin_lock_irq(&dev_priv->irq_lock);
 
i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
if (INTEL_INFO(dev)->gen >= 4)
717,7 → 465,7
i915_enable_pipestat(dev_priv, PIPE_A,
PIPE_LEGACY_BLC_EVENT_STATUS);
 
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
spin_unlock_irq(&dev_priv->irq_lock);
}
 
/**
1025,7 → 773,7
 
/* In vblank? */
if (in_vbl)
ret |= DRM_SCANOUTPOS_INVBL;
ret |= DRM_SCANOUTPOS_IN_VBLANK;
 
return ret;
}
1109,7 → 857,6
struct intel_connector *intel_connector;
struct intel_encoder *intel_encoder;
struct drm_connector *connector;
unsigned long irqflags;
bool hpd_disabled = false;
bool changed = false;
u32 hpd_event_bits;
1117,7 → 864,7
mutex_lock(&mode_config->mutex);
DRM_DEBUG_KMS("running encoder hotplug functions\n");
 
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
spin_lock_irq(&dev_priv->irq_lock);
 
hpd_event_bits = dev_priv->hpd_event_bits;
dev_priv->hpd_event_bits = 0;
1146,7 → 893,7
* therefore make sure it's enabled when disabling HPD on
* some connectors */
 
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
spin_unlock_irq(&dev_priv->irq_lock);
 
list_for_each_entry(connector, &mode_config->connector_list, head) {
intel_connector = to_intel_connector(connector);
1269,10 → 1016,10
* @dev_priv: DRM device private
*
*/
static u32 vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv)
static int vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv)
{
u32 residency_C0_up = 0, residency_C0_down = 0;
u8 new_delay, adj;
int new_delay, adj;
 
dev_priv->rps.ei_interrupt_count++;
 
1347,14 → 1094,15
int new_delay, adj;
 
spin_lock_irq(&dev_priv->irq_lock);
/* Speed up work cancelation during disabling rps interrupts. */
if (!dev_priv->rps.interrupts_enabled) {
spin_unlock_irq(&dev_priv->irq_lock);
return;
}
pm_iir = dev_priv->rps.pm_iir;
dev_priv->rps.pm_iir = 0;
if (INTEL_INFO(dev_priv->dev)->gen >= 8)
gen8_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
else {
/* Make sure not to corrupt PMIMR state used by ringbuffer */
/* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
}
spin_unlock_irq(&dev_priv->irq_lock);
 
/* Make sure we didn't queue anything we're not going to process. */
1435,7 → 1183,6
u32 error_status, row, bank, subbank;
char *parity_event[6];
uint32_t misccpctl;
unsigned long flags;
uint8_t slice = 0;
 
/* We must turn off DOP level clock gating to access the L3 registers.
1480,9 → 1227,9
 
out:
WARN_ON(dev_priv->l3_parity.which_slice);
spin_lock_irqsave(&dev_priv->irq_lock, flags);
spin_lock_irq(&dev_priv->irq_lock);
gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
spin_unlock_irq(&dev_priv->irq_lock);
 
mutex_unlock(&dev_priv->dev->struct_mutex);
}
1534,32 → 1281,18
 
if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
GT_BSD_CS_ERROR_INTERRUPT |
GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) {
i915_handle_error(dev, false, "GT error interrupt 0x%08x",
gt_iir);
}
GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
 
if (gt_iir & GT_PARITY_ERROR(dev))
ivybridge_parity_error_irq_handler(dev, gt_iir);
}
 
static void gen8_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
{
if ((pm_iir & dev_priv->pm_rps_events) == 0)
return;
 
spin_lock(&dev_priv->irq_lock);
dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
gen8_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
spin_unlock(&dev_priv->irq_lock);
 
queue_work(dev_priv->wq, &dev_priv->rps.work);
}
 
static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
struct drm_i915_private *dev_priv,
u32 master_ctl)
{
struct intel_engine_cs *ring;
u32 rcs, bcs, vcs;
uint32_t tmp = 0;
irqreturn_t ret = IRQ_NONE;
1569,12 → 1302,20
if (tmp) {
I915_WRITE(GEN8_GT_IIR(0), tmp);
ret = IRQ_HANDLED;
 
rcs = tmp >> GEN8_RCS_IRQ_SHIFT;
ring = &dev_priv->ring[RCS];
if (rcs & GT_RENDER_USER_INTERRUPT)
notify_ring(dev, ring);
if (rcs & GT_CONTEXT_SWITCH_INTERRUPT)
intel_execlists_handle_ctx_events(ring);
 
bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
if (rcs & GT_RENDER_USER_INTERRUPT)
notify_ring(dev, &dev_priv->ring[RCS]);
ring = &dev_priv->ring[BCS];
if (bcs & GT_RENDER_USER_INTERRUPT)
notify_ring(dev, &dev_priv->ring[BCS]);
notify_ring(dev, ring);
if (bcs & GT_CONTEXT_SWITCH_INTERRUPT)
intel_execlists_handle_ctx_events(ring);
} else
DRM_ERROR("The master control interrupt lied (GT0)!\n");
}
1584,12 → 1325,20
if (tmp) {
I915_WRITE(GEN8_GT_IIR(1), tmp);
ret = IRQ_HANDLED;
 
vcs = tmp >> GEN8_VCS1_IRQ_SHIFT;
ring = &dev_priv->ring[VCS];
if (vcs & GT_RENDER_USER_INTERRUPT)
notify_ring(dev, &dev_priv->ring[VCS]);
notify_ring(dev, ring);
if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
intel_execlists_handle_ctx_events(ring);
 
vcs = tmp >> GEN8_VCS2_IRQ_SHIFT;
ring = &dev_priv->ring[VCS2];
if (vcs & GT_RENDER_USER_INTERRUPT)
notify_ring(dev, &dev_priv->ring[VCS2]);
notify_ring(dev, ring);
if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
intel_execlists_handle_ctx_events(ring);
} else
DRM_ERROR("The master control interrupt lied (GT1)!\n");
}
1600,7 → 1349,7
I915_WRITE(GEN8_GT_IIR(2),
tmp & dev_priv->pm_rps_events);
ret = IRQ_HANDLED;
gen8_rps_irq_handler(dev_priv, tmp);
gen6_rps_irq_handler(dev_priv, tmp);
} else
DRM_ERROR("The master control interrupt lied (PM)!\n");
}
1610,9 → 1359,13
if (tmp) {
I915_WRITE(GEN8_GT_IIR(3), tmp);
ret = IRQ_HANDLED;
 
vcs = tmp >> GEN8_VECS_IRQ_SHIFT;
ring = &dev_priv->ring[VECS];
if (vcs & GT_RENDER_USER_INTERRUPT)
notify_ring(dev, &dev_priv->ring[VECS]);
notify_ring(dev, ring);
if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
intel_execlists_handle_ctx_events(ring);
} else
DRM_ERROR("The master control interrupt lied (GT3)!\n");
}
1623,7 → 1376,7
#define HPD_STORM_DETECT_PERIOD 1000
#define HPD_STORM_THRESHOLD 5
 
static int ilk_port_to_hotplug_shift(enum port port)
static int pch_port_to_hotplug_shift(enum port port)
{
switch (port) {
case PORT_A:
1639,7 → 1392,7
}
}
 
static int g4x_port_to_hotplug_shift(enum port port)
static int i915_port_to_hotplug_shift(enum port port)
{
switch (port) {
case PORT_A:
1697,15 → 1450,17
if (port && dev_priv->hpd_irq_port[port]) {
bool long_hpd;
 
if (IS_G4X(dev)) {
dig_shift = g4x_port_to_hotplug_shift(port);
if (HAS_PCH_SPLIT(dev)) {
dig_shift = pch_port_to_hotplug_shift(port);
long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
} else {
dig_shift = i915_port_to_hotplug_shift(port);
long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
} else {
dig_shift = ilk_port_to_hotplug_shift(port);
long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
}
 
DRM_DEBUG_DRIVER("digital hpd port %d %d\n", port, long_hpd);
DRM_DEBUG_DRIVER("digital hpd port %c - %s\n",
port_name(port),
long_hpd ? "long" : "short");
/* for long HPD pulses we want to have the digital queue happen,
but we still want HPD storm detection to function. */
if (long_hpd) {
1806,7 → 1561,7
 
if (!pipe_crc->entries) {
spin_unlock(&pipe_crc->lock);
DRM_ERROR("spurious interrupt\n");
DRM_DEBUG_KMS("spurious interrupt\n");
return;
}
 
1892,25 → 1647,39
* the work queue. */
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
{
/* TODO: RPS on GEN9+ is not supported yet. */
if (WARN_ONCE(INTEL_INFO(dev_priv)->gen >= 9,
"GEN9+: unexpected RPS IRQ\n"))
return;
 
if (pm_iir & dev_priv->pm_rps_events) {
spin_lock(&dev_priv->irq_lock);
gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
if (dev_priv->rps.interrupts_enabled) {
dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
spin_unlock(&dev_priv->irq_lock);
 
queue_work(dev_priv->wq, &dev_priv->rps.work);
}
spin_unlock(&dev_priv->irq_lock);
}
 
if (INTEL_INFO(dev_priv)->gen >= 8)
return;
 
if (HAS_VEBOX(dev_priv->dev)) {
if (pm_iir & PM_VEBOX_USER_INTERRUPT)
notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
 
if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
i915_handle_error(dev_priv->dev, false,
"VEBOX CS error interrupt 0x%08x",
pm_iir);
if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
}
}
 
static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe)
{
if (!drm_handle_vblank(dev, pipe))
return false;
 
return true;
}
 
static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
1920,7 → 1689,7
int pipe;
 
spin_lock(&dev_priv->irq_lock);
for_each_pipe(pipe) {
for_each_pipe(dev_priv, pipe) {
int reg;
u32 mask, iir_bit = 0;
 
1931,10 → 1700,10
* we need to be careful that we only handle what we want to
* handle.
*/
mask = 0;
if (__cpu_fifo_underrun_reporting_enabled(dev, pipe))
mask |= PIPE_FIFO_UNDERRUN_STATUS;
 
/* fifo underruns are filterered in the underrun handler. */
mask = PIPE_FIFO_UNDERRUN_STATUS;
 
switch (pipe) {
case PIPE_A:
iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1965,21 → 1734,14
}
spin_unlock(&dev_priv->irq_lock);
 
for_each_pipe(pipe) {
// if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
// drm_handle_vblank(dev, pipe);
for_each_pipe(dev_priv, pipe) {
 
if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
// intel_prepare_page_flip(dev, pipe);
// intel_finish_page_flip(dev, pipe);
}
 
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
i9xx_pipe_crc_irq_handler(dev, pipe);
 
if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
}
 
if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
2134,7 → 1896,7
DRM_ERROR("PCH poison interrupt\n");
 
if (pch_iir & SDE_FDI_MASK)
for_each_pipe(pipe)
for_each_pipe(dev_priv, pipe)
DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
pipe_name(pipe),
I915_READ(FDI_RX_IIR(pipe)));
2146,14 → 1908,10
DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
 
if (pch_iir & SDE_TRANSA_FIFO_UNDER)
if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
false))
DRM_ERROR("PCH transcoder A FIFO underrun\n");
intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
 
if (pch_iir & SDE_TRANSB_FIFO_UNDER)
if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
false))
DRM_ERROR("PCH transcoder B FIFO underrun\n");
intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
}
 
static void ivb_err_int_handler(struct drm_device *dev)
2165,13 → 1923,9
if (err_int & ERR_INT_POISON)
DRM_ERROR("Poison interrupt\n");
 
for_each_pipe(pipe) {
if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) {
if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
false))
DRM_ERROR("Pipe %c FIFO underrun\n",
pipe_name(pipe));
}
for_each_pipe(dev_priv, pipe) {
if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
 
if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
if (IS_IVYBRIDGE(dev))
2193,19 → 1947,13
DRM_ERROR("PCH poison interrupt\n");
 
if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
false))
DRM_ERROR("PCH transcoder A FIFO underrun\n");
intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
 
if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
false))
DRM_ERROR("PCH transcoder B FIFO underrun\n");
intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
 
if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C,
false))
DRM_ERROR("PCH transcoder C FIFO underrun\n");
intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C);
 
I915_WRITE(SERR_INT, serr_int);
}
2242,7 → 1990,7
DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
 
if (pch_iir & SDE_FDI_MASK_CPT)
for_each_pipe(pipe)
for_each_pipe(dev_priv, pipe)
DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
pipe_name(pipe),
I915_READ(FDI_RX_IIR(pipe)));
2265,14 → 2013,10
if (de_iir & DE_POISON)
DRM_ERROR("Poison interrupt\n");
 
for_each_pipe(pipe) {
// if (de_iir & DE_PIPE_VBLANK(pipe))
// drm_handle_vblank(dev, pipe);
for_each_pipe(dev_priv, pipe) {
 
if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
DRM_ERROR("Pipe %c FIFO underrun\n",
pipe_name(pipe));
intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
 
if (de_iir & DE_PIPE_CRC_DONE(pipe))
i9xx_pipe_crc_irq_handler(dev, pipe);
2315,9 → 2059,7
if (de_iir & DE_GSE_IVB)
intel_opregion_asle_intr(dev);
 
for_each_pipe(pipe) {
// if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
// drm_handle_vblank(dev, pipe);
for_each_pipe(dev_priv, pipe) {
 
/* plane/pipes map 1:1 on ilk+ */
if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
2421,7 → 2163,12
irqreturn_t ret = IRQ_NONE;
uint32_t tmp = 0;
enum pipe pipe;
u32 aux_mask = GEN8_AUX_CHANNEL_A;
 
if (IS_GEN9(dev))
aux_mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
GEN9_AUX_CHANNEL_D;
 
master_ctl = I915_READ(GEN8_MASTER_IRQ);
master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
if (!master_ctl)
2453,7 → 2200,8
if (tmp) {
I915_WRITE(GEN8_DE_PORT_IIR, tmp);
ret = IRQ_HANDLED;
if (tmp & GEN8_AUX_CHANNEL_A)
 
if (tmp & aux_mask)
dp_aux_irq_handler(dev);
else
DRM_ERROR("Unexpected DE Port interrupt\n");
2462,8 → 2210,8
DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
}
 
for_each_pipe(pipe) {
uint32_t pipe_iir;
for_each_pipe(dev_priv, pipe) {
uint32_t pipe_iir, flip_done = 0, fault_errors = 0;
 
if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
continue;
2472,29 → 2220,31
if (pipe_iir) {
ret = IRQ_HANDLED;
I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
// if (pipe_iir & GEN8_PIPE_VBLANK)
// intel_pipe_handle_vblank(dev, pipe);
 
if (pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE) {
// intel_prepare_page_flip(dev, pipe);
// intel_finish_page_flip_plane(dev, pipe);
}
 
if (IS_GEN9(dev))
flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE;
else
flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE;
 
 
if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
hsw_pipe_crc_irq_handler(dev, pipe);
 
if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) {
if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
false))
DRM_ERROR("Pipe %c FIFO underrun\n",
pipe_name(pipe));
}
if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN)
intel_cpu_fifo_underrun_irq_handler(dev_priv,
pipe);
 
if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) {
 
if (IS_GEN9(dev))
fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
else
fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
 
if (fault_errors)
DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
pipe_name(pipe),
pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
}
} else
DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
}
2662,7 → 2412,7
 
if (eir & I915_ERROR_MEMORY_REFRESH) {
pr_err("memory refresh error:\n");
for_each_pipe(pipe)
for_each_pipe(dev_priv, pipe)
pr_err("pipe %c stat: 0x%08x\n",
pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
/* pipestat has already been acked */
2759,55 → 2509,6
schedule_work(&dev_priv->gpu_error.work);
}
 
#if 0
static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_i915_gem_object *obj;
struct intel_unpin_work *work;
unsigned long flags;
bool stall_detected;
 
/* Ignore early vblank irqs */
if (intel_crtc == NULL)
return;
 
spin_lock_irqsave(&dev->event_lock, flags);
work = intel_crtc->unpin_work;
 
if (work == NULL ||
atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
!work->enable_stall_check) {
/* Either the pending flip IRQ arrived, or we're too early. Don't check */
spin_unlock_irqrestore(&dev->event_lock, flags);
return;
}
 
/* Potential stall - if we see that the flip has happened, assume a missed interrupt */
obj = work->pending_flip_obj;
if (INTEL_INFO(dev)->gen >= 4) {
int dspsurf = DSPSURF(intel_crtc->plane);
stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
i915_gem_obj_ggtt_offset(obj);
} else {
int dspaddr = DSPADDR(intel_crtc->plane);
stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) +
crtc->y * crtc->primary->fb->pitches[0] +
crtc->x * crtc->primary->fb->bits_per_pixel/8);
}
 
spin_unlock_irqrestore(&dev->event_lock, flags);
 
if (stall_detected) {
DRM_DEBUG_DRIVER("Pageflip stall detected\n");
intel_prepare_page_flip(dev, intel_crtc->plane);
}
}
 
#endif
 
/* Called from drm generic code, passed 'crtc' which
* we use as a pipe index
*/
3299,10 → 3000,22
ibx_irq_reset(dev);
}
 
static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
{
enum pipe pipe;
 
I915_WRITE(PORT_HOTPLUG_EN, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
 
for_each_pipe(dev_priv, pipe)
I915_WRITE(PIPESTAT(pipe), 0xffff);
 
GEN5_IRQ_RESET(VLV_);
}
 
static void valleyview_irq_preinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
 
/* VLV magic */
I915_WRITE(VLV_IMR, 0);
3310,22 → 3023,11
I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
 
/* and GT */
I915_WRITE(GTIIR, I915_READ(GTIIR));
I915_WRITE(GTIIR, I915_READ(GTIIR));
 
gen5_gt_irq_reset(dev);
 
I915_WRITE(DPINVGTT, 0xff);
I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
 
I915_WRITE(PORT_HOTPLUG_EN, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
for_each_pipe(pipe)
I915_WRITE(PIPESTAT(pipe), 0xffff);
I915_WRITE(VLV_IIR, 0xffffffff);
I915_WRITE(VLV_IMR, 0xffffffff);
I915_WRITE(VLV_IER, 0x0);
POSTING_READ(VLV_IER);
vlv_display_irq_reset(dev_priv);
}
 
static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
3346,8 → 3048,8
 
gen8_gt_irq_reset(dev_priv);
 
for_each_pipe(pipe)
if (intel_display_power_enabled(dev_priv,
for_each_pipe(dev_priv, pipe)
if (intel_display_power_is_enabled(dev_priv,
POWER_DOMAIN_PIPE(pipe)))
GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
 
3360,20 → 3062,19
 
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv)
{
unsigned long irqflags;
uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
 
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
spin_lock_irq(&dev_priv->irq_lock);
GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B],
~dev_priv->de_irq_mask[PIPE_B]);
~dev_priv->de_irq_mask[PIPE_B] | extra_ier);
GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C],
~dev_priv->de_irq_mask[PIPE_C]);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
~dev_priv->de_irq_mask[PIPE_C] | extra_ier);
spin_unlock_irq(&dev_priv->irq_lock);
}
 
static void cherryview_irq_preinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
 
I915_WRITE(GEN8_MASTER_IRQ, 0);
POSTING_READ(GEN8_MASTER_IRQ);
3382,37 → 3083,25
 
GEN5_IRQ_RESET(GEN8_PCU_);
 
POSTING_READ(GEN8_PCU_IIR);
 
I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
 
I915_WRITE(PORT_HOTPLUG_EN, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
 
for_each_pipe(pipe)
I915_WRITE(PIPESTAT(pipe), 0xffff);
 
I915_WRITE(VLV_IMR, 0xffffffff);
I915_WRITE(VLV_IER, 0x0);
I915_WRITE(VLV_IIR, 0xffffffff);
POSTING_READ(VLV_IIR);
vlv_display_irq_reset(dev_priv);
}
 
static void ibx_hpd_irq_setup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *intel_encoder;
u32 hotplug_irqs, hotplug, enabled_irqs = 0;
 
if (HAS_PCH_IBX(dev)) {
hotplug_irqs = SDE_HOTPLUG_MASK;
list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
for_each_intel_encoder(dev, intel_encoder)
if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
} else {
hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
for_each_intel_encoder(dev, intel_encoder)
if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
}
3475,8 → 3164,10
GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
 
if (INTEL_INFO(dev)->gen >= 6) {
pm_irqs |= dev_priv->pm_rps_events;
 
/*
* RPS interrupts will get enabled/disabled on demand when RPS
* itself is enabled/disabled.
*/
if (HAS_VEBOX(dev))
pm_irqs |= PM_VEBOX_USER_INTERRUPT;
 
3487,7 → 3178,6
 
static int ironlake_irq_postinstall(struct drm_device *dev)
{
unsigned long irqflags;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 display_mask, extra_mask;
 
3526,9 → 3216,9
* spinlocking not required here for correctness since interrupt
* setup is guaranteed to run in single-threaded context. But we
* need it to make the assert_spin_locked happy. */
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
spin_lock_irq(&dev_priv->irq_lock);
ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
spin_unlock_irq(&dev_priv->irq_lock);
}
 
return 0;
3538,31 → 3228,34
{
u32 pipestat_mask;
u32 iir_mask;
enum pipe pipe;
 
pipestat_mask = PIPESTAT_INT_STATUS_MASK |
PIPE_FIFO_UNDERRUN_STATUS;
 
I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask);
I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask);
for_each_pipe(dev_priv, pipe)
I915_WRITE(PIPESTAT(pipe), pipestat_mask);
POSTING_READ(PIPESTAT(PIPE_A));
 
pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
PIPE_CRC_DONE_INTERRUPT_STATUS;
 
i915_enable_pipestat(dev_priv, PIPE_A, pipestat_mask |
PIPE_GMBUS_INTERRUPT_STATUS);
i915_enable_pipestat(dev_priv, PIPE_B, pipestat_mask);
i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
for_each_pipe(dev_priv, pipe)
i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
 
iir_mask = I915_DISPLAY_PORT_INTERRUPT |
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
if (IS_CHERRYVIEW(dev_priv))
iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
dev_priv->irq_mask &= ~iir_mask;
 
I915_WRITE(VLV_IIR, iir_mask);
I915_WRITE(VLV_IIR, iir_mask);
I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
I915_WRITE(VLV_IMR, dev_priv->irq_mask);
I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
POSTING_READ(VLV_IER);
POSTING_READ(VLV_IMR);
}
 
static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
3569,14 → 3262,17
{
u32 pipestat_mask;
u32 iir_mask;
enum pipe pipe;
 
iir_mask = I915_DISPLAY_PORT_INTERRUPT |
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
if (IS_CHERRYVIEW(dev_priv))
iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
 
dev_priv->irq_mask |= iir_mask;
I915_WRITE(VLV_IMR, dev_priv->irq_mask);
I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
I915_WRITE(VLV_IMR, dev_priv->irq_mask);
I915_WRITE(VLV_IIR, iir_mask);
I915_WRITE(VLV_IIR, iir_mask);
POSTING_READ(VLV_IIR);
3584,14 → 3280,15
pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
PIPE_CRC_DONE_INTERRUPT_STATUS;
 
i915_disable_pipestat(dev_priv, PIPE_A, pipestat_mask |
PIPE_GMBUS_INTERRUPT_STATUS);
i915_disable_pipestat(dev_priv, PIPE_B, pipestat_mask);
i915_disable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
for_each_pipe(dev_priv, pipe)
i915_disable_pipestat(dev_priv, pipe, pipestat_mask);
 
pipestat_mask = PIPESTAT_INT_STATUS_MASK |
PIPE_FIFO_UNDERRUN_STATUS;
I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask);
I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask);
 
for_each_pipe(dev_priv, pipe)
I915_WRITE(PIPESTAT(pipe), pipestat_mask);
POSTING_READ(PIPESTAT(PIPE_A));
}
 
3604,7 → 3301,7
 
dev_priv->display_irqs_enabled = true;
 
if (dev_priv->dev->irq_enabled)
if (intel_irqs_enabled(dev_priv))
valleyview_display_irqs_install(dev_priv);
}
 
3617,35 → 3314,37
 
dev_priv->display_irqs_enabled = false;
 
if (dev_priv->dev->irq_enabled)
if (intel_irqs_enabled(dev_priv))
valleyview_display_irqs_uninstall(dev_priv);
}
 
static int valleyview_irq_postinstall(struct drm_device *dev)
static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
 
dev_priv->irq_mask = ~0;
 
I915_WRITE(PORT_HOTPLUG_EN, 0);
POSTING_READ(PORT_HOTPLUG_EN);
 
I915_WRITE(VLV_IIR, 0xffffffff);
I915_WRITE(VLV_IIR, 0xffffffff);
I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
I915_WRITE(VLV_IMR, dev_priv->irq_mask);
I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
I915_WRITE(VLV_IIR, 0xffffffff);
POSTING_READ(VLV_IER);
POSTING_READ(VLV_IMR);
 
/* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked check happy. */
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display_irqs_enabled)
valleyview_display_irqs_install(dev_priv);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
spin_unlock_irq(&dev_priv->irq_lock);
}
 
I915_WRITE(VLV_IIR, 0xffffffff);
I915_WRITE(VLV_IIR, 0xffffffff);
static int valleyview_irq_postinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
vlv_display_irq_postinstall(dev_priv);
 
gen5_gt_irq_postinstall(dev);
 
/* ack & enable invalid PTE error interrupts */
3661,46 → 3360,64
 
static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
{
int i;
 
/* These are interrupts we'll toggle with the ring mask register */
uint32_t gt_interrupts[] = {
GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
0,
GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT
GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
};
 
for (i = 0; i < ARRAY_SIZE(gt_interrupts); i++)
GEN8_IRQ_INIT_NDX(GT, i, ~gt_interrupts[i], gt_interrupts[i]);
 
dev_priv->pm_irq_mask = 0xffffffff;
GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
/*
* RPS interrupts will get enabled/disabled on demand when RPS itself
* is enabled/disabled.
*/
GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0);
GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
}
 
static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
uint32_t de_pipe_masked = GEN8_PIPE_PRIMARY_FLIP_DONE |
GEN8_PIPE_CDCLK_CRC_DONE |
uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
uint32_t de_pipe_enables;
int pipe;
u32 aux_en = GEN8_AUX_CHANNEL_A;
 
if (IS_GEN9(dev_priv)) {
de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
aux_en |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
GEN9_AUX_CHANNEL_D;
} else
de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
 
de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
GEN8_PIPE_FIFO_UNDERRUN;
int pipe;
 
dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
 
for_each_pipe(pipe)
if (intel_display_power_enabled(dev_priv,
for_each_pipe(dev_priv, pipe)
if (intel_display_power_is_enabled(dev_priv,
POWER_DOMAIN_PIPE(pipe)))
GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
dev_priv->de_irq_mask[pipe],
de_pipe_enables);
 
GEN5_IRQ_INIT(GEN8_DE_PORT_, ~GEN8_AUX_CHANNEL_A, GEN8_AUX_CHANNEL_A);
GEN5_IRQ_INIT(GEN8_DE_PORT_, ~aux_en, aux_en);
}
 
static int gen8_irq_postinstall(struct drm_device *dev)
3723,34 → 3440,9
static int cherryview_irq_postinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 enable_mask = I915_DISPLAY_PORT_INTERRUPT |
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
u32 pipestat_enable = PLANE_FLIP_DONE_INT_STATUS_VLV |
PIPE_CRC_DONE_INTERRUPT_STATUS;
unsigned long irqflags;
int pipe;
 
/*
* Leave vblank interrupts masked initially. enable/disable will
* toggle them based on usage.
*/
dev_priv->irq_mask = ~enable_mask;
vlv_display_irq_postinstall(dev_priv);
 
for_each_pipe(pipe)
I915_WRITE(PIPESTAT(pipe), 0xffff);
 
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
for_each_pipe(pipe)
i915_enable_pipestat(dev_priv, pipe, pipestat_enable);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
I915_WRITE(VLV_IIR, 0xffffffff);
I915_WRITE(VLV_IMR, dev_priv->irq_mask);
I915_WRITE(VLV_IER, enable_mask);
 
gen8_gt_irq_postinstall(dev_priv);
 
I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE);
3769,11 → 3461,23
gen8_irq_reset(dev);
}
 
static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv)
{
/* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked check happy. */
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display_irqs_enabled)
valleyview_display_irqs_uninstall(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
 
vlv_display_irq_reset(dev_priv);
 
dev_priv->irq_mask = ~0;
}
 
static void valleyview_irq_uninstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
int pipe;
 
if (!dev_priv)
return;
3780,30 → 3484,16
 
I915_WRITE(VLV_MASTER_IER, 0);
 
for_each_pipe(pipe)
I915_WRITE(PIPESTAT(pipe), 0xffff);
gen5_gt_irq_reset(dev);
 
I915_WRITE(HWSTAM, 0xffffffff);
I915_WRITE(PORT_HOTPLUG_EN, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
 
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
if (dev_priv->display_irqs_enabled)
valleyview_display_irqs_uninstall(dev_priv);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
dev_priv->irq_mask = 0;
 
I915_WRITE(VLV_IIR, 0xffffffff);
I915_WRITE(VLV_IMR, 0xffffffff);
I915_WRITE(VLV_IER, 0x0);
POSTING_READ(VLV_IER);
vlv_display_irq_uninstall(dev_priv);
}
 
static void cherryview_irq_uninstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
 
if (!dev_priv)
return;
3811,44 → 3501,11
I915_WRITE(GEN8_MASTER_IRQ, 0);
POSTING_READ(GEN8_MASTER_IRQ);
 
#define GEN8_IRQ_FINI_NDX(type, which) \
do { \
I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
I915_WRITE(GEN8_##type##_IER(which), 0); \
I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
POSTING_READ(GEN8_##type##_IIR(which)); \
I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
} while (0)
gen8_gt_irq_reset(dev_priv);
 
#define GEN8_IRQ_FINI(type) \
do { \
I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \
I915_WRITE(GEN8_##type##_IER, 0); \
I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
POSTING_READ(GEN8_##type##_IIR); \
I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
} while (0)
GEN5_IRQ_RESET(GEN8_PCU_);
 
GEN8_IRQ_FINI_NDX(GT, 0);
GEN8_IRQ_FINI_NDX(GT, 1);
GEN8_IRQ_FINI_NDX(GT, 2);
GEN8_IRQ_FINI_NDX(GT, 3);
 
GEN8_IRQ_FINI(PCU);
 
#undef GEN8_IRQ_FINI
#undef GEN8_IRQ_FINI_NDX
 
I915_WRITE(PORT_HOTPLUG_EN, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
 
for_each_pipe(pipe)
I915_WRITE(PIPESTAT(pipe), 0xffff);
 
I915_WRITE(VLV_IMR, 0xffffffff);
I915_WRITE(VLV_IER, 0x0);
I915_WRITE(VLV_IIR, 0xffffffff);
POSTING_READ(VLV_IIR);
vlv_display_irq_uninstall(dev_priv);
}
 
static void ironlake_irq_uninstall(struct drm_device *dev)
3862,13 → 3519,12
}
 
#if 0
 
static void i8xx_irq_preinstall(struct drm_device * dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
 
for_each_pipe(pipe)
for_each_pipe(dev_priv, pipe)
I915_WRITE(PIPESTAT(pipe), 0);
I915_WRITE16(IMR, 0xffff);
I915_WRITE16(IER, 0x0);
3878,7 → 3534,6
static int i8xx_irq_postinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
 
I915_WRITE16(EMR,
~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3901,10 → 3556,10
 
/* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked check happy. */
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
spin_lock_irq(&dev_priv->irq_lock);
i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
spin_unlock_irq(&dev_priv->irq_lock);
 
return 0;
}
3922,7 → 3577,7
return false;
 
if ((iir & flip_pending) == 0)
return false;
goto check_page_flip;
 
// intel_prepare_page_flip(dev, pipe);
 
3933,11 → 3588,14
* an interrupt per se, we watch for the change at vblank.
*/
if (I915_READ16(ISR) & flip_pending)
return false;
goto check_page_flip;
 
intel_finish_page_flip(dev, pipe);
return true;
 
return true;
check_page_flip:
// intel_check_page_flip(dev, pipe);
return false;
}
 
static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3946,7 → 3604,6
struct drm_i915_private *dev_priv = dev->dev_private;
u16 iir, new_iir;
u32 pipe_stats[2];
unsigned long irqflags;
int pipe;
u16 flip_mask =
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3962,13 → 3619,11
* It doesn't set the bit in iir again, but it still produces
* interrupts (for non-MSI).
*/
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
spin_lock(&dev_priv->irq_lock);
if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
i915_handle_error(dev, false,
"Command parser error, iir 0x%08x",
iir);
DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
 
for_each_pipe(pipe) {
for_each_pipe(dev_priv, pipe) {
int reg = PIPESTAT(pipe);
pipe_stats[pipe] = I915_READ(reg);
 
3978,17 → 3633,15
if (pipe_stats[pipe] & 0x8000ffff)
I915_WRITE(reg, pipe_stats[pipe]);
}
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
spin_unlock(&dev_priv->irq_lock);
 
I915_WRITE16(IIR, iir & ~flip_mask);
new_iir = I915_READ16(IIR); /* Flush posted writes */
 
i915_update_dri1_breadcrumb(dev);
 
if (iir & I915_USER_INTERRUPT)
notify_ring(dev, &dev_priv->ring[RCS]);
 
for_each_pipe(pipe) {
for_each_pipe(dev_priv, pipe) {
int plane = pipe;
if (HAS_FBC(dev))
plane = !plane;
4000,9 → 3653,9
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
i9xx_pipe_crc_irq_handler(dev, pipe);
 
if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
intel_cpu_fifo_underrun_irq_handler(dev_priv,
pipe);
}
 
iir = new_iir;
4016,7 → 3669,7
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
 
for_each_pipe(pipe) {
for_each_pipe(dev_priv, pipe) {
/* Clear enable bits; then clear status bits */
I915_WRITE(PIPESTAT(pipe), 0);
I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4039,7 → 3692,7
}
 
I915_WRITE16(HWSTAM, 0xeffe);
for_each_pipe(pipe)
for_each_pipe(dev_priv, pipe)
I915_WRITE(PIPESTAT(pipe), 0);
I915_WRITE(IMR, 0xffffffff);
I915_WRITE(IER, 0x0);
4050,7 → 3703,6
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 enable_mask;
unsigned long irqflags;
 
I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
 
4088,10 → 3740,10
 
/* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked check happy. */
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
spin_lock_irq(&dev_priv->irq_lock);
i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
spin_unlock_irq(&dev_priv->irq_lock);
 
return 0;
}
4109,7 → 3761,7
return false;
 
if ((iir & flip_pending) == 0)
return false;
goto check_page_flip;
 
// intel_prepare_page_flip(dev, plane);
 
4120,11 → 3772,14
* an interrupt per se, we watch for the change at vblank.
*/
if (I915_READ(ISR) & flip_pending)
return false;
goto check_page_flip;
 
intel_finish_page_flip(dev, pipe);
return true;
 
return true;
check_page_flip:
// intel_check_page_flip(dev, pipe);
return false;
}
 
static irqreturn_t i915_irq_handler(int irq, void *arg)
4132,7 → 3787,6
struct drm_device *dev = arg;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
unsigned long irqflags;
u32 flip_mask =
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4148,13 → 3802,11
* It doesn't set the bit in iir again, but it still produces
* interrupts (for non-MSI).
*/
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
spin_lock(&dev_priv->irq_lock);
if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
i915_handle_error(dev, false,
"Command parser error, iir 0x%08x",
iir);
DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
 
for_each_pipe(pipe) {
for_each_pipe(dev_priv, pipe) {
int reg = PIPESTAT(pipe);
pipe_stats[pipe] = I915_READ(reg);
 
4164,7 → 3816,7
irq_received = true;
}
}
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
spin_unlock(&dev_priv->irq_lock);
 
if (!irq_received)
break;
4180,7 → 3832,7
if (iir & I915_USER_INTERRUPT)
notify_ring(dev, &dev_priv->ring[RCS]);
 
for_each_pipe(pipe) {
for_each_pipe(dev_priv, pipe) {
int plane = pipe;
if (HAS_FBC(dev))
plane = !plane;
4195,9 → 3847,9
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
i9xx_pipe_crc_irq_handler(dev, pipe);
 
if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
intel_cpu_fifo_underrun_irq_handler(dev_priv,
pipe);
}
 
if (blc_event || (iir & I915_ASLE_INTERRUPT))
4222,8 → 3874,6
iir = new_iir;
} while (iir & ~flip_mask);
 
i915_update_dri1_breadcrumb(dev);
 
return ret;
}
 
4238,7 → 3888,7
}
 
I915_WRITE16(HWSTAM, 0xffff);
for_each_pipe(pipe) {
for_each_pipe(dev_priv, pipe) {
/* Clear enable bits; then clear status bits */
I915_WRITE(PIPESTAT(pipe), 0);
I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4258,7 → 3908,7
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
 
I915_WRITE(HWSTAM, 0xeffe);
for_each_pipe(pipe)
for_each_pipe(dev_priv, pipe)
I915_WRITE(PIPESTAT(pipe), 0);
I915_WRITE(IMR, 0xffffffff);
I915_WRITE(IER, 0x0);
4270,7 → 3920,6
struct drm_i915_private *dev_priv = dev->dev_private;
u32 enable_mask;
u32 error_mask;
unsigned long irqflags;
 
/* Unmask the interrupts that we always want on. */
dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
4291,11 → 3940,11
 
/* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked check happy. */
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
spin_lock_irq(&dev_priv->irq_lock);
i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
spin_unlock_irq(&dev_priv->irq_lock);
 
/*
* Enable some error detection, note the instruction error mask
4327,7 → 3976,6
static void i915_hpd_irq_setup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *intel_encoder;
u32 hotplug_en;
 
4338,7 → 3986,7
hotplug_en &= ~HOTPLUG_INT_EN_MASK;
/* Note HDMI and DP share hotplug bits */
/* enable bits are the same for all generations */
list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
for_each_intel_encoder(dev, intel_encoder)
if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
/* Programming the CRT detection parameters tends
4361,7 → 4009,6
struct drm_i915_private *dev_priv = dev->dev_private;
u32 iir, new_iir;
u32 pipe_stats[I915_MAX_PIPES];
unsigned long irqflags;
int ret = IRQ_NONE, pipe;
u32 flip_mask =
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4378,13 → 4025,11
* It doesn't set the bit in iir again, but it still produces
* interrupts (for non-MSI).
*/
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
spin_lock(&dev_priv->irq_lock);
if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
i915_handle_error(dev, false,
"Command parser error, iir 0x%08x",
iir);
DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
 
for_each_pipe(pipe) {
for_each_pipe(dev_priv, pipe) {
int reg = PIPESTAT(pipe);
pipe_stats[pipe] = I915_READ(reg);
 
4396,7 → 4041,7
irq_received = true;
}
}
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
spin_unlock(&dev_priv->irq_lock);
 
if (!irq_received)
break;
4415,7 → 4060,7
if (iir & I915_BSD_USER_INTERRUPT)
notify_ring(dev, &dev_priv->ring[VCS]);
 
for_each_pipe(pipe) {
for_each_pipe(dev_priv, pipe) {
if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
i915_handle_vblank(dev, pipe, pipe, iir))
flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
4426,9 → 4071,8
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
i9xx_pipe_crc_irq_handler(dev, pipe);
 
if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
}
 
if (blc_event || (iir & I915_ASLE_INTERRUPT))
4455,8 → 4099,6
iir = new_iir;
}
 
i915_update_dri1_breadcrumb(dev);
 
return ret;
}
 
4472,18 → 4114,18
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
 
I915_WRITE(HWSTAM, 0xffffffff);
for_each_pipe(pipe)
for_each_pipe(dev_priv, pipe)
I915_WRITE(PIPESTAT(pipe), 0);
I915_WRITE(IMR, 0xffffffff);
I915_WRITE(IER, 0x0);
 
for_each_pipe(pipe)
for_each_pipe(dev_priv, pipe)
I915_WRITE(PIPESTAT(pipe),
I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
I915_WRITE(IIR, I915_READ(IIR));
}
 
static void intel_hpd_irq_reenable(struct work_struct *work)
static void intel_hpd_irq_reenable_work(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, typeof(*dev_priv),
4490,10 → 4132,11
hotplug_reenable_work.work);
struct drm_device *dev = dev_priv->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
unsigned long irqflags;
int i;
 
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
intel_runtime_pm_get(dev_priv);
 
spin_lock_irq(&dev_priv->irq_lock);
for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
struct drm_connector *connector;
 
4517,32 → 4160,41
}
if (dev_priv->display.hpd_irq_setup)
dev_priv->display.hpd_irq_setup(dev);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
spin_unlock_irq(&dev_priv->irq_lock);
 
intel_runtime_pm_put(dev_priv);
}
 
void intel_irq_init(struct drm_device *dev)
/**
* intel_irq_init - initializes irq support
* @dev_priv: i915 device instance
*
* This function initializes all the irq support including work items, timers
* and all the vtables. It does not setup the interrupt itself though.
*/
void intel_irq_init(struct drm_i915_private *dev_priv)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_device *dev = dev_priv->dev;
 
INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
// INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func);
INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
 
/* Let's track the enabled rps events */
if (IS_VALLEYVIEW(dev))
/* WaGsvRC0ResidenncyMethod:VLV */
if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
/* WaGsvRC0ResidencyMethod:vlv */
dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
else
dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
 
/* Haven't installed the IRQ handler yet */
dev_priv->pm._irqs_disabled = true;
 
if (IS_GEN2(dev)) {
 
if (IS_GEN2(dev_priv)) {
dev->max_vblank_count = 0;
dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
} else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
} else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
dev->driver->get_vblank_counter = gm45_get_vblank_counter;
} else {
4550,12 → 4202,20
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
}
 
/*
* Opt out of the vblank disable timer on everything except gen2.
* Gen2 doesn't have a hardware frame counter and so depends on
* vblank interrupts to produce sane vblank seuquence numbers.
*/
if (!IS_GEN2(dev_priv))
dev->vblank_disable_immediate = true;
 
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
}
 
if (IS_CHERRYVIEW(dev)) {
if (IS_CHERRYVIEW(dev_priv)) {
dev->driver->irq_handler = cherryview_irq_handler;
dev->driver->irq_preinstall = cherryview_irq_preinstall;
dev->driver->irq_postinstall = cherryview_irq_postinstall;
4563,7 → 4223,7
dev->driver->enable_vblank = valleyview_enable_vblank;
dev->driver->disable_vblank = valleyview_disable_vblank;
dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
} else if (IS_VALLEYVIEW(dev)) {
} else if (IS_VALLEYVIEW(dev_priv)) {
dev->driver->irq_handler = valleyview_irq_handler;
dev->driver->irq_preinstall = valleyview_irq_preinstall;
dev->driver->irq_postinstall = valleyview_irq_postinstall;
4571,7 → 4231,7
dev->driver->enable_vblank = valleyview_enable_vblank;
dev->driver->disable_vblank = valleyview_disable_vblank;
dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
} else if (IS_GEN8(dev)) {
} else if (INTEL_INFO(dev_priv)->gen >= 8) {
dev->driver->irq_handler = gen8_irq_handler;
dev->driver->irq_preinstall = gen8_irq_reset;
dev->driver->irq_postinstall = gen8_irq_postinstall;
4588,8 → 4248,8
dev->driver->disable_vblank = ironlake_disable_vblank;
dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
} else {
if (INTEL_INFO(dev)->gen == 2) {
} else if (INTEL_INFO(dev)->gen == 3) {
if (INTEL_INFO(dev_priv)->gen == 2) {
} else if (INTEL_INFO(dev_priv)->gen == 3) {
dev->driver->irq_preinstall = i915_irq_preinstall;
dev->driver->irq_postinstall = i915_irq_postinstall;
dev->driver->irq_uninstall = i915_irq_uninstall;
4607,12 → 4267,23
}
}
 
void intel_hpd_init(struct drm_device *dev)
/**
* intel_hpd_init - initializes and enables hpd support
* @dev_priv: i915 device instance
*
* This function enables the hotplug support. It requires that interrupts have
* already been enabled with intel_irq_init_hw(). From this point on hotplug and
* poll request can run concurrently to other code, so locking rules must be
* obeyed.
*
* This is a separate step from interrupt enabling to simplify the locking rules
* in the driver load and resume code.
*/
void intel_hpd_init(struct drm_i915_private *dev_priv)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_device *dev = dev_priv->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_connector *connector;
unsigned long irqflags;
int i;
 
for (i = 1; i < HPD_NUM_PINS; i++) {
4630,37 → 4301,80
 
/* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked checks happy. */
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display.hpd_irq_setup)
dev_priv->display.hpd_irq_setup(dev);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
spin_unlock_irq(&dev_priv->irq_lock);
}
 
/* Disable interrupts so we can allow runtime PM. */
void intel_runtime_pm_disable_interrupts(struct drm_device *dev)
/**
* intel_irq_install - enables the hardware interrupt
* @dev_priv: i915 device instance
*
* This function enables the hardware interrupt handling, but leaves the hotplug
* handling still disabled. It is called after intel_irq_init().
*
* In the driver load and resume code we need working interrupts in a few places
* but don't want to deal with the hassle of concurrent probe and hotplug
* workers. Hence the split into this two-stage approach.
*/
int intel_irq_install(struct drm_i915_private *dev_priv)
{
struct drm_i915_private *dev_priv = dev->dev_private;
/*
* We enable some interrupt sources in our postinstall hooks, so mark
* interrupts as enabled _before_ actually enabling them to avoid
* special cases in our ordering checks.
*/
dev_priv->pm.irqs_enabled = true;
 
dev->driver->irq_uninstall(dev);
dev_priv->pm._irqs_disabled = true;
return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq);
}
 
/* Restore interrupts so we can recover from runtime PM. */
void intel_runtime_pm_restore_interrupts(struct drm_device *dev)
/**
* intel_irq_uninstall - finilizes all irq handling
* @dev_priv: i915 device instance
*
* This stops interrupt and hotplug handling and unregisters and frees all
* resources acquired in the init functions.
*/
void intel_irq_uninstall(struct drm_i915_private *dev_priv)
{
struct drm_i915_private *dev_priv = dev->dev_private;
// drm_irq_uninstall(dev_priv->dev);
// intel_hpd_cancel_work(dev_priv);
dev_priv->pm.irqs_enabled = false;
}
 
dev_priv->pm._irqs_disabled = false;
dev->driver->irq_preinstall(dev);
dev->driver->irq_postinstall(dev);
/**
* intel_runtime_pm_disable_interrupts - runtime interrupt disabling
* @dev_priv: i915 device instance
*
* This function is used to disable interrupts at runtime, both in the runtime
* pm and the system suspend/resume code.
*/
void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
{
dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
dev_priv->pm.irqs_enabled = false;
}
 
/**
* intel_runtime_pm_enable_interrupts - runtime interrupt enabling
* @dev_priv: i915 device instance
*
* This function is used to enable interrupts at runtime, both in the runtime
* pm and the system suspend/resume code.
*/
void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
{
dev_priv->pm.irqs_enabled = true;
dev_priv->dev->driver->irq_preinstall(dev_priv->dev);
dev_priv->dev->driver->irq_postinstall(dev_priv->dev);
}
 
irqreturn_t intel_irq_handler(struct drm_device *dev)
{
 
// printf("i915 irq\n");
 
// printf("device %p driver %p handler %p\n", dev, dev->driver, dev->driver->irq_handler) ;
 
return dev->driver->irq_handler(0, dev);
/drivers/video/drm/i915/i915_params.c
35,6 → 35,7
.vbt_sdvo_panel_type = -1,
.enable_rc6 = -1,
.enable_fbc = -1,
.enable_execlists = 0,
.enable_hangcheck = true,
.enable_ppgtt = 1,
.enable_psr = 0,
66,12 → 67,12
MODULE_PARM_DESC(powersave,
"Enable powersavings, fbc, downclocking, etc. (default: true)");
 
module_param_named(semaphores, i915.semaphores, int, 0400);
module_param_named_unsafe(semaphores, i915.semaphores, int, 0400);
MODULE_PARM_DESC(semaphores,
"Use semaphores for inter-ring sync "
"(default: -1 (use per-chip defaults))");
 
module_param_named(enable_rc6, i915.enable_rc6, int, 0400);
module_param_named_unsafe(enable_rc6, i915.enable_rc6, int, 0400);
MODULE_PARM_DESC(enable_rc6,
"Enable power-saving render C-state 6. "
"Different stages can be selected via bitmask values "
79,7 → 80,7
"For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. "
"default: -1 (use per-chip default)");
 
module_param_named(enable_fbc, i915.enable_fbc, int, 0600);
module_param_named_unsafe(enable_fbc, i915.enable_fbc, int, 0600);
MODULE_PARM_DESC(enable_fbc,
"Enable frame buffer compression for power savings "
"(default: -1 (use per-chip default))");
113,11 → 114,16
"WARNING: Disabling this can cause system wide hangs. "
"(default: true)");
 
module_param_named(enable_ppgtt, i915.enable_ppgtt, int, 0400);
module_param_named_unsafe(enable_ppgtt, i915.enable_ppgtt, int, 0400);
MODULE_PARM_DESC(enable_ppgtt,
"Override PPGTT usage. "
"(-1=auto [default], 0=disabled, 1=aliasing, 2=full)");
 
module_param_named(enable_execlists, i915.enable_execlists, int, 0400);
MODULE_PARM_DESC(enable_execlists,
"Override execlists usage. "
"(-1=auto, 0=disabled [default], 1=enabled)");
 
module_param_named(enable_psr, i915.enable_psr, int, 0600);
MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)");
 
/drivers/video/drm/i915/i915_reg.h
26,15 → 26,26
#define _I915_REG_H_
 
#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
#define _PLANE(plane, a, b) _PIPE(plane, a, b)
#define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a)))
 
#define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
#define _PIPE3(pipe, a, b, c) ((pipe) == PIPE_A ? (a) : \
(pipe) == PIPE_B ? (b) : (c))
 
#define _MASKED_BIT_ENABLE(a) (((a) << 16) | (a))
#define _MASKED_BIT_DISABLE(a) ((a) << 16)
#define _MASKED_FIELD(mask, value) ({ \
if (__builtin_constant_p(mask)) \
BUILD_BUG_ON_MSG(((mask) & 0xffff0000), "Incorrect mask"); \
if (__builtin_constant_p(value)) \
BUILD_BUG_ON_MSG((value) & 0xffff0000, "Incorrect value"); \
if (__builtin_constant_p(mask) && __builtin_constant_p(value)) \
BUILD_BUG_ON_MSG((value) & ~(mask), \
"Incorrect value for mask"); \
(mask) << 16 | (value); })
#define _MASKED_BIT_ENABLE(a) ({ typeof(a) _a = (a); _MASKED_FIELD(_a, _a); })
#define _MASKED_BIT_DISABLE(a) (_MASKED_FIELD((a), 0))
 
 
 
/* PCI config space */
 
#define HPLLCC 0xc0 /* 855 only */
74,15 → 85,17
#define I915_GC_RENDER_CLOCK_166_MHZ (0 << 0)
#define I915_GC_RENDER_CLOCK_200_MHZ (1 << 0)
#define I915_GC_RENDER_CLOCK_333_MHZ (4 << 0)
#define GCDGMBUS 0xcc
#define PCI_LBPC 0xf4 /* legacy/combination backlight modes, also called LBB */
 
 
/* Graphics reset regs */
#define I965_GDRST 0xc0 /* PCI config register */
#define I915_GDRST 0xc0 /* PCI config register */
#define GRDOM_FULL (0<<2)
#define GRDOM_RENDER (1<<2)
#define GRDOM_MEDIA (3<<2)
#define GRDOM_MASK (3<<2)
#define GRDOM_RESET_STATUS (1<<1)
#define GRDOM_RESET_ENABLE (1<<0)
 
#define ILK_GDSR 0x2ca4 /* MCHBAR offset */
143,6 → 156,14
#define GAB_CTL 0x24000
#define GAB_CTL_CONT_AFTER_PAGEFAULT (1<<8)
 
#define GEN7_BIOS_RESERVED 0x1082C0
#define GEN7_BIOS_RESERVED_1M (0 << 5)
#define GEN7_BIOS_RESERVED_256K (1 << 5)
#define GEN8_BIOS_RESERVED_SHIFT 7
#define GEN7_BIOS_RESERVED_MASK 0x1
#define GEN8_BIOS_RESERVED_MASK 0x3
 
 
/* VGA stuff */
 
#define VGA_ST01_MDA 0x3ba
240,6 → 261,16
#define MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19)
#define MI_DISPLAY_FLIP_IVB_PLANE_C (4 << 19)
#define MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19)
/* SKL ones */
#define MI_DISPLAY_FLIP_SKL_PLANE_1_A (0 << 8)
#define MI_DISPLAY_FLIP_SKL_PLANE_1_B (1 << 8)
#define MI_DISPLAY_FLIP_SKL_PLANE_1_C (2 << 8)
#define MI_DISPLAY_FLIP_SKL_PLANE_2_A (4 << 8)
#define MI_DISPLAY_FLIP_SKL_PLANE_2_B (5 << 8)
#define MI_DISPLAY_FLIP_SKL_PLANE_2_C (6 << 8)
#define MI_DISPLAY_FLIP_SKL_PLANE_3_A (7 << 8)
#define MI_DISPLAY_FLIP_SKL_PLANE_3_B (8 << 8)
#define MI_DISPLAY_FLIP_SKL_PLANE_3_C (9 << 8)
#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6, gen7 */
#define MI_SEMAPHORE_GLOBAL_GTT (1<<22)
#define MI_SEMAPHORE_UPDATE (1<<21)
272,6 → 303,7
#define MI_SEMAPHORE_POLL (1<<15)
#define MI_SEMAPHORE_SAD_GTE_SDD (1<<12)
#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1)
#define MI_STORE_DWORD_IMM_GEN8 MI_INSTR(0x20, 2)
#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */
#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1)
#define MI_STORE_DWORD_INDEX_SHIFT 2
282,6 → 314,7
* address/value pairs. Don't overdue it, though, x <= 2^4 must hold!
*/
#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*(x)-1)
#define MI_LRI_FORCE_POSTED (1<<12)
#define MI_STORE_REGISTER_MEM(x) MI_INSTR(0x24, 2*(x)-1)
#define MI_STORE_REGISTER_MEM_GEN8(x) MI_INSTR(0x24, 3*(x)-1)
#define MI_SRM_LRM_GLOBAL_GTT (1<<22)
304,6 → 337,8
#define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */
#define MI_BATCH_BUFFER_START_GEN8 MI_INSTR(0x31, 1)
 
#define MI_PREDICATE_SRC0 (0x2400)
#define MI_PREDICATE_SRC1 (0x2408)
 
#define MI_PREDICATE_RESULT_2 (0x2214)
#define LOWER_SLICE_ENABLED (1<<0)
360,6 → 395,7
#define PIPE_CONTROL_STORE_DATA_INDEX (1<<21)
#define PIPE_CONTROL_CS_STALL (1<<20)
#define PIPE_CONTROL_TLB_INVALIDATE (1<<18)
#define PIPE_CONTROL_MEDIA_STATE_CLEAR (1<<16)
#define PIPE_CONTROL_QW_WRITE (1<<14)
#define PIPE_CONTROL_POST_SYNC_OP_MASK (3<<14)
#define PIPE_CONTROL_DEPTH_STALL (1<<13)
501,10 → 537,26
#define BUNIT_REG_BISOC 0x11
 
#define PUNIT_REG_DSPFREQ 0x36
#define DSPFREQSTAT_SHIFT_CHV 24
#define DSPFREQSTAT_MASK_CHV (0x1f << DSPFREQSTAT_SHIFT_CHV)
#define DSPFREQGUAR_SHIFT_CHV 8
#define DSPFREQGUAR_MASK_CHV (0x1f << DSPFREQGUAR_SHIFT_CHV)
#define DSPFREQSTAT_SHIFT 30
#define DSPFREQSTAT_MASK (0x3 << DSPFREQSTAT_SHIFT)
#define DSPFREQGUAR_SHIFT 14
#define DSPFREQGUAR_MASK (0x3 << DSPFREQGUAR_SHIFT)
#define _DP_SSC(val, pipe) ((val) << (2 * (pipe)))
#define DP_SSC_MASK(pipe) _DP_SSC(0x3, (pipe))
#define DP_SSC_PWR_ON(pipe) _DP_SSC(0x0, (pipe))
#define DP_SSC_CLK_GATE(pipe) _DP_SSC(0x1, (pipe))
#define DP_SSC_RESET(pipe) _DP_SSC(0x2, (pipe))
#define DP_SSC_PWR_GATE(pipe) _DP_SSC(0x3, (pipe))
#define _DP_SSS(val, pipe) ((val) << (2 * (pipe) + 16))
#define DP_SSS_MASK(pipe) _DP_SSS(0x3, (pipe))
#define DP_SSS_PWR_ON(pipe) _DP_SSS(0x0, (pipe))
#define DP_SSS_CLK_GATE(pipe) _DP_SSS(0x1, (pipe))
#define DP_SSS_RESET(pipe) _DP_SSS(0x2, (pipe))
#define DP_SSS_PWR_GATE(pipe) _DP_SSS(0x3, (pipe))
 
/* See the PUNIT HAS v0.8 for the below bits */
enum punit_power_well {
518,6 → 570,11
PUNIT_POWER_WELL_DPIO_TX_C_LANES_23 = 9,
PUNIT_POWER_WELL_DPIO_RX0 = 10,
PUNIT_POWER_WELL_DPIO_RX1 = 11,
PUNIT_POWER_WELL_DPIO_CMN_D = 12,
/* FIXME: guesswork below */
PUNIT_POWER_WELL_DPIO_TX_D_LANES_01 = 13,
PUNIT_POWER_WELL_DPIO_TX_D_LANES_23 = 14,
PUNIT_POWER_WELL_DPIO_RX2 = 15,
 
PUNIT_POWER_WELL_NUM,
};
533,6 → 590,7
#define PUNIT_REG_GPU_LFM 0xd3
#define PUNIT_REG_GPU_FREQ_REQ 0xd4
#define PUNIT_REG_GPU_FREQ_STS 0xd8
#define GPLLENABLE (1<<4)
#define GENFREQSTATUS (1<<0)
#define PUNIT_REG_MEDIA_TURBO_FREQ_REQ 0xdc
#define PUNIT_REG_CZ_TIMESTAMP 0xce
641,7 → 699,7
* need to be accessed during AUX communication,
*
* Generally the common lane corresponds to the pipe and
* the spline (PCS/TX) correponds to the port.
* the spline (PCS/TX) corresponds to the port.
*
* For dual channel PHY (VLV/CHV):
*
765,6 → 823,8
#define _VLV_PCS_DW0_CH1 0x8400
#define DPIO_PCS_TX_LANE2_RESET (1<<16)
#define DPIO_PCS_TX_LANE1_RESET (1<<7)
#define DPIO_LEFT_TXFIFO_RST_MASTER2 (1<<4)
#define DPIO_RIGHT_TXFIFO_RST_MASTER2 (1<<3)
#define VLV_PCS_DW0(ch) _PORT(ch, _VLV_PCS_DW0_CH0, _VLV_PCS_DW0_CH1)
 
#define _VLV_PCS01_DW0_CH0 0x200
805,12 → 865,31
 
#define _VLV_PCS_DW9_CH0 0x8224
#define _VLV_PCS_DW9_CH1 0x8424
#define DPIO_PCS_TX2MARGIN_MASK (0x7<<13)
#define DPIO_PCS_TX2MARGIN_000 (0<<13)
#define DPIO_PCS_TX2MARGIN_101 (1<<13)
#define DPIO_PCS_TX1MARGIN_MASK (0x7<<10)
#define DPIO_PCS_TX1MARGIN_000 (0<<10)
#define DPIO_PCS_TX1MARGIN_101 (1<<10)
#define VLV_PCS_DW9(ch) _PORT(ch, _VLV_PCS_DW9_CH0, _VLV_PCS_DW9_CH1)
 
#define _VLV_PCS01_DW9_CH0 0x224
#define _VLV_PCS23_DW9_CH0 0x424
#define _VLV_PCS01_DW9_CH1 0x2624
#define _VLV_PCS23_DW9_CH1 0x2824
#define VLV_PCS01_DW9(ch) _PORT(ch, _VLV_PCS01_DW9_CH0, _VLV_PCS01_DW9_CH1)
#define VLV_PCS23_DW9(ch) _PORT(ch, _VLV_PCS23_DW9_CH0, _VLV_PCS23_DW9_CH1)
 
#define _CHV_PCS_DW10_CH0 0x8228
#define _CHV_PCS_DW10_CH1 0x8428
#define DPIO_PCS_SWING_CALC_TX0_TX2 (1<<30)
#define DPIO_PCS_SWING_CALC_TX1_TX3 (1<<31)
#define DPIO_PCS_TX2DEEMP_MASK (0xf<<24)
#define DPIO_PCS_TX2DEEMP_9P5 (0<<24)
#define DPIO_PCS_TX2DEEMP_6P0 (2<<24)
#define DPIO_PCS_TX1DEEMP_MASK (0xf<<16)
#define DPIO_PCS_TX1DEEMP_9P5 (0<<16)
#define DPIO_PCS_TX1DEEMP_6P0 (2<<16)
#define CHV_PCS_DW10(ch) _PORT(ch, _CHV_PCS_DW10_CH0, _CHV_PCS_DW10_CH1)
 
#define _VLV_PCS01_DW10_CH0 0x0228
822,8 → 901,18
 
#define _VLV_PCS_DW11_CH0 0x822c
#define _VLV_PCS_DW11_CH1 0x842c
#define DPIO_LANEDESKEW_STRAP_OVRD (1<<3)
#define DPIO_LEFT_TXFIFO_RST_MASTER (1<<1)
#define DPIO_RIGHT_TXFIFO_RST_MASTER (1<<0)
#define VLV_PCS_DW11(ch) _PORT(ch, _VLV_PCS_DW11_CH0, _VLV_PCS_DW11_CH1)
 
#define _VLV_PCS01_DW11_CH0 0x022c
#define _VLV_PCS23_DW11_CH0 0x042c
#define _VLV_PCS01_DW11_CH1 0x262c
#define _VLV_PCS23_DW11_CH1 0x282c
#define VLV_PCS01_DW11(ch) _PORT(ch, _VLV_PCS01_DW11_CH0, _VLV_PCS01_DW11_CH1)
#define VLV_PCS23_DW11(ch) _PORT(ch, _VLV_PCS23_DW11_CH0, _VLV_PCS23_DW11_CH1)
 
#define _VLV_PCS_DW12_CH0 0x8230
#define _VLV_PCS_DW12_CH1 0x8430
#define VLV_PCS_DW12(ch) _PORT(ch, _VLV_PCS_DW12_CH0, _VLV_PCS_DW12_CH1)
838,8 → 927,8
 
#define _VLV_TX_DW2_CH0 0x8288
#define _VLV_TX_DW2_CH1 0x8488
#define DPIO_SWING_MARGIN_SHIFT 16
#define DPIO_SWING_MARGIN_MASK (0xff << DPIO_SWING_MARGIN_SHIFT)
#define DPIO_SWING_MARGIN000_SHIFT 16
#define DPIO_SWING_MARGIN000_MASK (0xff << DPIO_SWING_MARGIN000_SHIFT)
#define DPIO_UNIQ_TRANS_SCALE_SHIFT 8
#define VLV_TX_DW2(ch) _PORT(ch, _VLV_TX_DW2_CH0, _VLV_TX_DW2_CH1)
 
847,6 → 936,8
#define _VLV_TX_DW3_CH1 0x848c
/* The following bit for CHV phy */
#define DPIO_TX_UNIQ_TRANS_SCALE_EN (1<<27)
#define DPIO_SWING_MARGIN101_SHIFT 16
#define DPIO_SWING_MARGIN101_MASK (0xff << DPIO_SWING_MARGIN101_SHIFT)
#define VLV_TX_DW3(ch) _PORT(ch, _VLV_TX_DW3_CH0, _VLV_TX_DW3_CH1)
 
#define _VLV_TX_DW4_CH0 0x8290
853,6 → 944,8
#define _VLV_TX_DW4_CH1 0x8490
#define DPIO_SWING_DEEMPH9P5_SHIFT 24
#define DPIO_SWING_DEEMPH9P5_MASK (0xff << DPIO_SWING_DEEMPH9P5_SHIFT)
#define DPIO_SWING_DEEMPH6P0_SHIFT 16
#define DPIO_SWING_DEEMPH6P0_MASK (0xff << DPIO_SWING_DEEMPH6P0_SHIFT)
#define VLV_TX_DW4(ch) _PORT(ch, _VLV_TX_DW4_CH0, _VLV_TX_DW4_CH1)
 
#define _VLV_TX3_DW4_CH0 0x690
1003,6 → 1096,13
#define PGTBL_ADDRESS_LO_MASK 0xfffff000 /* bits [31:12] */
#define PGTBL_ADDRESS_HI_MASK 0x000000f0 /* bits [35:32] (gen4) */
#define PGTBL_ER 0x02024
#define PRB0_BASE (0x2030-0x30)
#define PRB1_BASE (0x2040-0x30) /* 830,gen3 */
#define PRB2_BASE (0x2050-0x30) /* gen3 */
#define SRB0_BASE (0x2100-0x30) /* gen2 */
#define SRB1_BASE (0x2110-0x30) /* gen2 */
#define SRB2_BASE (0x2120-0x30) /* 830 */
#define SRB3_BASE (0x2130-0x30) /* 830 */
#define RENDER_RING_BASE 0x02000
#define BSD_RING_BASE 0x04000
#define GEN6_BSD_RING_BASE 0x12000
1029,6 → 1129,7
#define GEN6_VERSYNC (RING_SYNC_1(VEBOX_RING_BASE))
#define GEN6_VEVSYNC (RING_SYNC_2(VEBOX_RING_BASE))
#define GEN6_NOSYNC 0
#define RING_PSMI_CTL(base) ((base)+0x50)
#define RING_MAX_IDLE(base) ((base)+0x54)
#define RING_HWS_PGA(base) ((base)+0x80)
#define RING_HWS_PGA_GEN6(base) ((base)+0x2080)
1064,6 → 1165,7
#define RING_ACTHD_UDW(base) ((base)+0x5c)
#define RING_NOPID(base) ((base)+0x94)
#define RING_IMR(base) ((base)+0xa8)
#define RING_HWSTAM(base) ((base)+0x98)
#define RING_TIMESTAMP(base) ((base)+0x358)
#define TAIL_ADDR 0x001FFFF8
#define HEAD_WRAP_COUNT 0xFFE00000
1194,7 → 1296,7
#define GEN6_WIZ_HASHING_8x8 GEN6_WIZ_HASHING(0, 0)
#define GEN6_WIZ_HASHING_8x4 GEN6_WIZ_HASHING(0, 1)
#define GEN6_WIZ_HASHING_16x4 GEN6_WIZ_HASHING(1, 0)
#define GEN6_WIZ_HASHING_MASK (GEN6_WIZ_HASHING(1, 1) << 16)
#define GEN6_WIZ_HASHING_MASK GEN6_WIZ_HASHING(1, 1)
#define GEN6_TD_FOUR_ROW_DISPATCH_DISABLE (1 << 5)
 
#define GFX_MODE 0x02520
1248,6 → 1350,10
#define INSTPM_TLB_INVALIDATE (1<<9)
#define INSTPM_SYNC_FLUSH (1<<5)
#define ACTHD 0x020c8
#define MEM_MODE 0x020cc
#define MEM_DISPLAY_B_TRICKLE_FEED_DISABLE (1<<3) /* 830 only */
#define MEM_DISPLAY_A_TRICKLE_FEED_DISABLE (1<<2) /* 830/845 only */
#define MEM_DISPLAY_TRICKLE_FEED_DISABLE (1<<2) /* 85x only */
#define FW_BLC 0x020d8
#define FW_BLC2 0x020dc
#define FW_BLC_SELF 0x020e0 /* 915+ only */
1354,6 → 1460,7
#define GEN6_BLITTER_FBC_NOTIFY (1<<3)
 
#define GEN6_RC_SLEEP_PSMI_CONTROL 0x2050
#define GEN6_PSMI_SLEEP_MSG_DISABLE (1 << 0)
#define GEN8_RC_SEMA_IDLE_MSG_DISABLE (1 << 12)
#define GEN8_FF_DOP_CLOCK_GATE_DISABLE (1<<10)
 
1380,6 → 1487,7
#define GT_BSD_CS_ERROR_INTERRUPT (1 << 15)
#define GT_BSD_USER_INTERRUPT (1 << 12)
#define GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1 (1 << 11) /* hsw+; rsvd on snb, ivb, vlv */
#define GT_CONTEXT_SWITCH_INTERRUPT (1 << 8)
#define GT_RENDER_L3_PARITY_ERROR_INTERRUPT (1 << 5) /* !snb */
#define GT_RENDER_PIPECTL_NOTIFY_INTERRUPT (1 << 4)
#define GT_RENDER_CS_MASTER_ERROR_INTERRUPT (1 << 3)
1519,6 → 1627,7
/* Framebuffer compression for Ironlake */
#define ILK_DPFC_CB_BASE 0x43200
#define ILK_DPFC_CONTROL 0x43208
#define FBC_CTL_FALSE_COLOR (1<<10)
/* The bit 28-8 is reserved */
#define DPFC_RESERVED (0x1FFFFF00)
#define ILK_DPFC_RECOMP_CTL 0x4320c
1675,12 → 1784,9
#define DPIO_PHY_STATUS (VLV_DISPLAY_BASE + 0x6240)
#define DPLL_PORTD_READY_MASK (0xf)
#define DISPLAY_PHY_CONTROL (VLV_DISPLAY_BASE + 0x60100)
#define PHY_COM_LANE_RESET_DEASSERT(phy, val) \
((phy == DPIO_PHY0) ? (val | 1) : (val | 2))
#define PHY_COM_LANE_RESET_ASSERT(phy, val) \
((phy == DPIO_PHY0) ? (val & ~1) : (val & ~2))
#define PHY_COM_LANE_RESET_DEASSERT(phy) (1 << (phy))
#define DISPLAY_PHY_STATUS (VLV_DISPLAY_BASE + 0x60104)
#define PHY_POWERGOOD(phy) ((phy == DPIO_PHY0) ? (1<<31) : (1<<30))
#define PHY_POWERGOOD(phy) (((phy) == DPIO_PHY0) ? (1<<31) : (1<<30))
 
/*
* The i830 generation, in LVDS mode, defines P1 as the bit number set within
1953,6 → 2059,8
#define DCC_ADDRESSING_MODE_MASK (3 << 0)
#define DCC_CHANNEL_XOR_DISABLE (1 << 10)
#define DCC_CHANNEL_XOR_BIT_17 (1 << 9)
#define DCC2 0x10204
#define DCC2_MODIFIED_ENHANCED_DISABLE (1 << 20)
 
/* Pineview MCH register contains DDR3 setting */
#define CSHRDDR3CTL 0x101a8
2236,7 → 2344,6
 
#define GEN6_GT_THREAD_STATUS_REG 0x13805c
#define GEN6_GT_THREAD_STATUS_CORE_MASK 0x7
#define GEN6_GT_THREAD_STATUS_CORE_MASK_HSW (0x7 | (0x07 << 16))
 
#define GEN6_GT_PERF_STATUS (MCHBAR_MIRROR_BASE_SNB + 0x5948)
#define GEN6_RP_STATE_LIMITS (MCHBAR_MIRROR_BASE_SNB + 0x5994)
2397,6 → 2504,7
#define _PIPEASRC 0x6001c
#define _BCLRPAT_A 0x60020
#define _VSYNCSHIFT_A 0x60028
#define _PIPE_MULT_A 0x6002c
 
/* Pipe B timing regs */
#define _HTOTAL_B 0x61000
2408,6 → 2516,7
#define _PIPEBSRC 0x6101c
#define _BCLRPAT_B 0x61020
#define _VSYNCSHIFT_B 0x61028
#define _PIPE_MULT_B 0x6102c
 
#define TRANSCODER_A_OFFSET 0x60000
#define TRANSCODER_B_OFFSET 0x61000
2428,6 → 2537,7
#define BCLRPAT(trans) _TRANSCODER2(trans, _BCLRPAT_A)
#define VSYNCSHIFT(trans) _TRANSCODER2(trans, _VSYNCSHIFT_A)
#define PIPESRC(trans) _TRANSCODER2(trans, _PIPEASRC)
#define PIPE_MULT(trans) _TRANSCODER2(trans, _PIPE_MULT_A)
 
/* HSW+ eDP PSR registers */
#define EDP_PSR_BASE(dev) (IS_HASWELL(dev) ? 0x64800 : 0x6f800)
2457,9 → 2567,7
 
#define EDP_PSR_AUX_CTL(dev) (EDP_PSR_BASE(dev) + 0x10)
#define EDP_PSR_AUX_DATA1(dev) (EDP_PSR_BASE(dev) + 0x14)
#define EDP_PSR_DPCD_COMMAND 0x80060000
#define EDP_PSR_AUX_DATA2(dev) (EDP_PSR_BASE(dev) + 0x18)
#define EDP_PSR_DPCD_NORMAL_OPERATION (1<<24)
#define EDP_PSR_AUX_DATA3(dev) (EDP_PSR_BASE(dev) + 0x1c)
#define EDP_PSR_AUX_DATA4(dev) (EDP_PSR_BASE(dev) + 0x20)
#define EDP_PSR_AUX_DATA5(dev) (EDP_PSR_BASE(dev) + 0x24)
3476,6 → 3584,8
#define DP_LINK_TRAIN_OFF (3 << 28)
#define DP_LINK_TRAIN_MASK (3 << 28)
#define DP_LINK_TRAIN_SHIFT 28
#define DP_LINK_TRAIN_PAT_3_CHV (1 << 14)
#define DP_LINK_TRAIN_MASK_CHV ((3 << 28)|(1<<14))
 
/* CPT Link training mode */
#define DP_LINK_TRAIN_PAT_1_CPT (0 << 8)
3594,6 → 3704,7
#define DP_AUX_CH_CTL_PRECHARGE_TEST (1 << 11)
#define DP_AUX_CH_CTL_BIT_CLOCK_2X_MASK (0x7ff)
#define DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT 0
#define DP_AUX_CH_CTL_SYNC_PULSE_SKL(c) ((c) - 1)
 
/*
* Computing GMCH M and N values for the Display Port link
3732,7 → 3843,6
#define PIPE_VSYNC_INTERRUPT_STATUS (1UL<<9)
#define PIPE_DISPLAY_LINE_COMPARE_STATUS (1UL<<8)
#define PIPE_DPST_EVENT_STATUS (1UL<<7)
#define PIPE_LEGACY_BLC_EVENT_STATUS (1UL<<6)
#define PIPE_A_PSR_STATUS_VLV (1UL<<6)
#define PIPE_LEGACY_BLC_EVENT_STATUS (1UL<<6)
#define PIPE_ODD_FIELD_INTERRUPT_STATUS (1UL<<5)
3842,6 → 3952,7
#define DSPARB_BEND_SHIFT 9 /* on 855 */
#define DSPARB_AEND_SHIFT 0
 
/* pnv/gen4/g4x/vlv/chv */
#define DSPFW1 (dev_priv->info.display_mmio_offset + 0x70034)
#define DSPFW_SR_SHIFT 23
#define DSPFW_SR_MASK (0x1ff<<23)
3849,67 → 3960,145
#define DSPFW_CURSORB_MASK (0x3f<<16)
#define DSPFW_PLANEB_SHIFT 8
#define DSPFW_PLANEB_MASK (0x7f<<8)
#define DSPFW_PLANEA_MASK (0x7f)
#define DSPFW_PLANEB_MASK_VLV (0xff<<8) /* vlv/chv */
#define DSPFW_PLANEA_SHIFT 0
#define DSPFW_PLANEA_MASK (0x7f<<0)
#define DSPFW_PLANEA_MASK_VLV (0xff<<0) /* vlv/chv */
#define DSPFW2 (dev_priv->info.display_mmio_offset + 0x70038)
#define DSPFW_CURSORA_MASK 0x00003f00
#define DSPFW_FBC_SR_EN (1<<31) /* g4x */
#define DSPFW_FBC_SR_SHIFT 28
#define DSPFW_FBC_SR_MASK (0x7<<28) /* g4x */
#define DSPFW_FBC_HPLL_SR_SHIFT 24
#define DSPFW_FBC_HPLL_SR_MASK (0xf<<24) /* g4x */
#define DSPFW_SPRITEB_SHIFT (16)
#define DSPFW_SPRITEB_MASK (0x7f<<16) /* g4x */
#define DSPFW_SPRITEB_MASK_VLV (0xff<<16) /* vlv/chv */
#define DSPFW_CURSORA_SHIFT 8
#define DSPFW_PLANEC_MASK (0x7f)
#define DSPFW_CURSORA_MASK (0x3f<<8)
#define DSPFW_PLANEC_SHIFT_OLD 0
#define DSPFW_PLANEC_MASK_OLD (0x7f<<0) /* pre-gen4 sprite C */
#define DSPFW_SPRITEA_SHIFT 0
#define DSPFW_SPRITEA_MASK (0x7f<<0) /* g4x */
#define DSPFW_SPRITEA_MASK_VLV (0xff<<0) /* vlv/chv */
#define DSPFW3 (dev_priv->info.display_mmio_offset + 0x7003c)
#define DSPFW_HPLL_SR_EN (1<<31)
#define PINEVIEW_SELF_REFRESH_EN (1<<30)
#define DSPFW_CURSOR_SR_SHIFT 24
#define PINEVIEW_SELF_REFRESH_EN (1<<30)
#define DSPFW_CURSOR_SR_MASK (0x3f<<24)
#define DSPFW_HPLL_CURSOR_SHIFT 16
#define DSPFW_HPLL_CURSOR_MASK (0x3f<<16)
#define DSPFW_HPLL_SR_MASK (0x1ff)
#define DSPFW4 (dev_priv->info.display_mmio_offset + 0x70070)
#define DSPFW7 (dev_priv->info.display_mmio_offset + 0x7007c)
#define DSPFW_HPLL_SR_SHIFT 0
#define DSPFW_HPLL_SR_MASK (0x1ff<<0)
 
/* vlv/chv */
#define DSPFW4 (VLV_DISPLAY_BASE + 0x70070)
#define DSPFW_SPRITEB_WM1_SHIFT 16
#define DSPFW_SPRITEB_WM1_MASK (0xff<<16)
#define DSPFW_CURSORA_WM1_SHIFT 8
#define DSPFW_CURSORA_WM1_MASK (0x3f<<8)
#define DSPFW_SPRITEA_WM1_SHIFT 0
#define DSPFW_SPRITEA_WM1_MASK (0xff<<0)
#define DSPFW5 (VLV_DISPLAY_BASE + 0x70074)
#define DSPFW_PLANEB_WM1_SHIFT 24
#define DSPFW_PLANEB_WM1_MASK (0xff<<24)
#define DSPFW_PLANEA_WM1_SHIFT 16
#define DSPFW_PLANEA_WM1_MASK (0xff<<16)
#define DSPFW_CURSORB_WM1_SHIFT 8
#define DSPFW_CURSORB_WM1_MASK (0x3f<<8)
#define DSPFW_CURSOR_SR_WM1_SHIFT 0
#define DSPFW_CURSOR_SR_WM1_MASK (0x3f<<0)
#define DSPFW6 (VLV_DISPLAY_BASE + 0x70078)
#define DSPFW_SR_WM1_SHIFT 0
#define DSPFW_SR_WM1_MASK (0x1ff<<0)
#define DSPFW7 (VLV_DISPLAY_BASE + 0x7007c)
#define DSPFW7_CHV (VLV_DISPLAY_BASE + 0x700b4) /* wtf #1? */
#define DSPFW_SPRITED_WM1_SHIFT 24
#define DSPFW_SPRITED_WM1_MASK (0xff<<24)
#define DSPFW_SPRITED_SHIFT 16
#define DSPFW_SPRITED_MASK (0xff<<16)
#define DSPFW_SPRITEC_WM1_SHIFT 8
#define DSPFW_SPRITEC_WM1_MASK (0xff<<8)
#define DSPFW_SPRITEC_SHIFT 0
#define DSPFW_SPRITEC_MASK (0xff<<0)
#define DSPFW8_CHV (VLV_DISPLAY_BASE + 0x700b8)
#define DSPFW_SPRITEF_WM1_SHIFT 24
#define DSPFW_SPRITEF_WM1_MASK (0xff<<24)
#define DSPFW_SPRITEF_SHIFT 16
#define DSPFW_SPRITEF_MASK (0xff<<16)
#define DSPFW_SPRITEE_WM1_SHIFT 8
#define DSPFW_SPRITEE_WM1_MASK (0xff<<8)
#define DSPFW_SPRITEE_SHIFT 0
#define DSPFW_SPRITEE_MASK (0xff<<0)
#define DSPFW9_CHV (VLV_DISPLAY_BASE + 0x7007c) /* wtf #2? */
#define DSPFW_PLANEC_WM1_SHIFT 24
#define DSPFW_PLANEC_WM1_MASK (0xff<<24)
#define DSPFW_PLANEC_SHIFT 16
#define DSPFW_PLANEC_MASK (0xff<<16)
#define DSPFW_CURSORC_WM1_SHIFT 8
#define DSPFW_CURSORC_WM1_MASK (0x3f<<16)
#define DSPFW_CURSORC_SHIFT 0
#define DSPFW_CURSORC_MASK (0x3f<<0)
 
/* vlv/chv high order bits */
#define DSPHOWM (VLV_DISPLAY_BASE + 0x70064)
#define DSPFW_SR_HI_SHIFT 24
#define DSPFW_SR_HI_MASK (1<<24)
#define DSPFW_SPRITEF_HI_SHIFT 23
#define DSPFW_SPRITEF_HI_MASK (1<<23)
#define DSPFW_SPRITEE_HI_SHIFT 22
#define DSPFW_SPRITEE_HI_MASK (1<<22)
#define DSPFW_PLANEC_HI_SHIFT 21
#define DSPFW_PLANEC_HI_MASK (1<<21)
#define DSPFW_SPRITED_HI_SHIFT 20
#define DSPFW_SPRITED_HI_MASK (1<<20)
#define DSPFW_SPRITEC_HI_SHIFT 16
#define DSPFW_SPRITEC_HI_MASK (1<<16)
#define DSPFW_PLANEB_HI_SHIFT 12
#define DSPFW_PLANEB_HI_MASK (1<<12)
#define DSPFW_SPRITEB_HI_SHIFT 8
#define DSPFW_SPRITEB_HI_MASK (1<<8)
#define DSPFW_SPRITEA_HI_SHIFT 4
#define DSPFW_SPRITEA_HI_MASK (1<<4)
#define DSPFW_PLANEA_HI_SHIFT 0
#define DSPFW_PLANEA_HI_MASK (1<<0)
#define DSPHOWM1 (VLV_DISPLAY_BASE + 0x70068)
#define DSPFW_SR_WM1_HI_SHIFT 24
#define DSPFW_SR_WM1_HI_MASK (1<<24)
#define DSPFW_SPRITEF_WM1_HI_SHIFT 23
#define DSPFW_SPRITEF_WM1_HI_MASK (1<<23)
#define DSPFW_SPRITEE_WM1_HI_SHIFT 22
#define DSPFW_SPRITEE_WM1_HI_MASK (1<<22)
#define DSPFW_PLANEC_WM1_HI_SHIFT 21
#define DSPFW_PLANEC_WM1_HI_MASK (1<<21)
#define DSPFW_SPRITED_WM1_HI_SHIFT 20
#define DSPFW_SPRITED_WM1_HI_MASK (1<<20)
#define DSPFW_SPRITEC_WM1_HI_SHIFT 16
#define DSPFW_SPRITEC_WM1_HI_MASK (1<<16)
#define DSPFW_PLANEB_WM1_HI_SHIFT 12
#define DSPFW_PLANEB_WM1_HI_MASK (1<<12)
#define DSPFW_SPRITEB_WM1_HI_SHIFT 8
#define DSPFW_SPRITEB_WM1_HI_MASK (1<<8)
#define DSPFW_SPRITEA_WM1_HI_SHIFT 4
#define DSPFW_SPRITEA_WM1_HI_MASK (1<<4)
#define DSPFW_PLANEA_WM1_HI_SHIFT 0
#define DSPFW_PLANEA_WM1_HI_MASK (1<<0)
 
/* drain latency register values*/
#define DRAIN_LATENCY_PRECISION_16 16
#define DRAIN_LATENCY_PRECISION_32 32
#define DRAIN_LATENCY_PRECISION_64 64
#define VLV_DDL1 (VLV_DISPLAY_BASE + 0x70050)
#define DDL_CURSORA_PRECISION_64 (1<<31)
#define DDL_CURSORA_PRECISION_32 (0<<31)
#define DDL_CURSORA_SHIFT 24
#define DDL_SPRITEB_PRECISION_64 (1<<23)
#define DDL_SPRITEB_PRECISION_32 (0<<23)
#define DDL_SPRITEB_SHIFT 16
#define DDL_SPRITEA_PRECISION_64 (1<<15)
#define DDL_SPRITEA_PRECISION_32 (0<<15)
#define DDL_SPRITEA_SHIFT 8
#define DDL_PLANEA_PRECISION_64 (1<<7)
#define DDL_PLANEA_PRECISION_32 (0<<7)
#define DDL_PLANEA_SHIFT 0
#define VLV_DDL(pipe) (VLV_DISPLAY_BASE + 0x70050 + 4 * (pipe))
#define DDL_CURSOR_PRECISION_HIGH (1<<31)
#define DDL_CURSOR_PRECISION_LOW (0<<31)
#define DDL_CURSOR_SHIFT 24
#define DDL_SPRITE_PRECISION_HIGH(sprite) (1<<(15+8*(sprite)))
#define DDL_SPRITE_PRECISION_LOW(sprite) (0<<(15+8*(sprite)))
#define DDL_SPRITE_SHIFT(sprite) (8+8*(sprite))
#define DDL_PLANE_PRECISION_HIGH (1<<7)
#define DDL_PLANE_PRECISION_LOW (0<<7)
#define DDL_PLANE_SHIFT 0
#define DRAIN_LATENCY_MASK 0x7f
 
#define VLV_DDL2 (VLV_DISPLAY_BASE + 0x70054)
#define DDL_CURSORB_PRECISION_64 (1<<31)
#define DDL_CURSORB_PRECISION_32 (0<<31)
#define DDL_CURSORB_SHIFT 24
#define DDL_SPRITED_PRECISION_64 (1<<23)
#define DDL_SPRITED_PRECISION_32 (0<<23)
#define DDL_SPRITED_SHIFT 16
#define DDL_SPRITEC_PRECISION_64 (1<<15)
#define DDL_SPRITEC_PRECISION_32 (0<<15)
#define DDL_SPRITEC_SHIFT 8
#define DDL_PLANEB_PRECISION_64 (1<<7)
#define DDL_PLANEB_PRECISION_32 (0<<7)
#define DDL_PLANEB_SHIFT 0
 
#define VLV_DDL3 (VLV_DISPLAY_BASE + 0x70058)
#define DDL_CURSORC_PRECISION_64 (1<<31)
#define DDL_CURSORC_PRECISION_32 (0<<31)
#define DDL_CURSORC_SHIFT 24
#define DDL_SPRITEF_PRECISION_64 (1<<23)
#define DDL_SPRITEF_PRECISION_32 (0<<23)
#define DDL_SPRITEF_SHIFT 16
#define DDL_SPRITEE_PRECISION_64 (1<<15)
#define DDL_SPRITEE_PRECISION_32 (0<<15)
#define DDL_SPRITEE_SHIFT 8
#define DDL_PLANEC_PRECISION_64 (1<<7)
#define DDL_PLANEC_PRECISION_32 (0<<7)
#define DDL_PLANEC_SHIFT 0
 
/* FIFO watermark sizes etc */
#define G4X_FIFO_LINE_SIZE 64
#define I915_FIFO_LINE_SIZE 64
3943,6 → 4132,41
#define I965_CURSOR_MAX_WM 32
#define I965_CURSOR_DFT_WM 8
 
/* Watermark register definitions for SKL */
#define CUR_WM_A_0 0x70140
#define CUR_WM_B_0 0x71140
#define PLANE_WM_1_A_0 0x70240
#define PLANE_WM_1_B_0 0x71240
#define PLANE_WM_2_A_0 0x70340
#define PLANE_WM_2_B_0 0x71340
#define PLANE_WM_TRANS_1_A_0 0x70268
#define PLANE_WM_TRANS_1_B_0 0x71268
#define PLANE_WM_TRANS_2_A_0 0x70368
#define PLANE_WM_TRANS_2_B_0 0x71368
#define CUR_WM_TRANS_A_0 0x70168
#define CUR_WM_TRANS_B_0 0x71168
#define PLANE_WM_EN (1 << 31)
#define PLANE_WM_LINES_SHIFT 14
#define PLANE_WM_LINES_MASK 0x1f
#define PLANE_WM_BLOCKS_MASK 0x3ff
 
#define CUR_WM_0(pipe) _PIPE(pipe, CUR_WM_A_0, CUR_WM_B_0)
#define CUR_WM(pipe, level) (CUR_WM_0(pipe) + ((4) * (level)))
#define CUR_WM_TRANS(pipe) _PIPE(pipe, CUR_WM_TRANS_A_0, CUR_WM_TRANS_B_0)
 
#define _PLANE_WM_1(pipe) _PIPE(pipe, PLANE_WM_1_A_0, PLANE_WM_1_B_0)
#define _PLANE_WM_2(pipe) _PIPE(pipe, PLANE_WM_2_A_0, PLANE_WM_2_B_0)
#define _PLANE_WM_BASE(pipe, plane) \
_PLANE(plane, _PLANE_WM_1(pipe), _PLANE_WM_2(pipe))
#define PLANE_WM(pipe, plane, level) \
(_PLANE_WM_BASE(pipe, plane) + ((4) * (level)))
#define _PLANE_WM_TRANS_1(pipe) \
_PIPE(pipe, PLANE_WM_TRANS_1_A_0, PLANE_WM_TRANS_1_B_0)
#define _PLANE_WM_TRANS_2(pipe) \
_PIPE(pipe, PLANE_WM_TRANS_2_A_0, PLANE_WM_TRANS_2_B_0)
#define PLANE_WM_TRANS(pipe, plane) \
_PLANE(plane, _PLANE_WM_TRANS_1(pipe), _PLANE_WM_TRANS_2(pipe))
 
/* define the Watermark register on Ironlake */
#define WM0_PIPEA_ILK 0x45100
#define WM0_PIPE_PLANE_MASK (0xffff<<16)
4026,7 → 4250,8
/* Old style CUR*CNTR flags (desktop 8xx) */
#define CURSOR_ENABLE 0x80000000
#define CURSOR_GAMMA_ENABLE 0x40000000
#define CURSOR_STRIDE_MASK 0x30000000
#define CURSOR_STRIDE_SHIFT 28
#define CURSOR_STRIDE(x) ((ffs(x)-9) << CURSOR_STRIDE_SHIFT) /* 256,512,1k,2k */
#define CURSOR_PIPE_CSC_ENABLE (1<<24)
#define CURSOR_FORMAT_SHIFT 24
#define CURSOR_FORMAT_MASK (0x07 << CURSOR_FORMAT_SHIFT)
4048,6 → 4273,7
#define MCURSOR_PIPE_A 0x00
#define MCURSOR_PIPE_B (1 << 28)
#define MCURSOR_GAMMA_ENABLE (1 << 26)
#define CURSOR_ROTATE_180 (1<<15)
#define CURSOR_TRICKLE_FEED_DISABLE (1 << 14)
#define _CURABASE 0x70084
#define _CURAPOS 0x70088
4111,8 → 4337,11
#define DISPPLANE_NO_LINE_DOUBLE 0
#define DISPPLANE_STEREO_POLARITY_FIRST 0
#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18)
#define DISPPLANE_ALPHA_PREMULTIPLY (1<<16) /* CHV pipe B */
#define DISPPLANE_ROTATE_180 (1<<15)
#define DISPPLANE_TRICKLE_FEED_DISABLE (1<<14) /* Ironlake */
#define DISPPLANE_TILED (1<<10)
#define DISPPLANE_MIRROR (1<<8) /* CHV pipe B */
#define _DSPAADDR 0x70184
#define _DSPASTRIDE 0x70188
#define _DSPAPOS 0x7018C /* reserved */
4133,6 → 4362,24
#define DSPOFFSET(plane) _PIPE2(plane, _DSPAOFFSET)
#define DSPSURFLIVE(plane) _PIPE2(plane, _DSPASURFLIVE)
 
/* CHV pipe B blender and primary plane */
#define _CHV_BLEND_A 0x60a00
#define CHV_BLEND_LEGACY (0<<30)
#define CHV_BLEND_ANDROID (1<<30)
#define CHV_BLEND_MPO (2<<30)
#define CHV_BLEND_MASK (3<<30)
#define _CHV_CANVAS_A 0x60a04
#define _PRIMPOS_A 0x60a08
#define _PRIMSIZE_A 0x60a0c
#define _PRIMCNSTALPHA_A 0x60a10
#define PRIM_CONST_ALPHA_ENABLE (1<<31)
 
#define CHV_BLEND(pipe) _TRANSCODER2(pipe, _CHV_BLEND_A)
#define CHV_CANVAS(pipe) _TRANSCODER2(pipe, _CHV_CANVAS_A)
#define PRIMPOS(plane) _TRANSCODER2(plane, _PRIMPOS_A)
#define PRIMSIZE(plane) _TRANSCODER2(plane, _PRIMSIZE_A)
#define PRIMCNSTALPHA(plane) _TRANSCODER2(plane, _PRIMCNSTALPHA_A)
 
/* Display/Sprite base address macros */
#define DISP_BASEADDR_MASK (0xfffff000)
#define I915_LO_DISPBASE(val) (val & ~DISP_BASEADDR_MASK)
4195,6 → 4442,7
#define DVS_YUV_ORDER_UYVY (1<<16)
#define DVS_YUV_ORDER_YVYU (2<<16)
#define DVS_YUV_ORDER_VYUY (3<<16)
#define DVS_ROTATE_180 (1<<15)
#define DVS_DEST_KEY (1<<2)
#define DVS_TRICKLE_FEED_DISABLE (1<<14)
#define DVS_TILED (1<<10)
4265,6 → 4513,7
#define SPRITE_YUV_ORDER_UYVY (1<<16)
#define SPRITE_YUV_ORDER_YVYU (2<<16)
#define SPRITE_YUV_ORDER_VYUY (3<<16)
#define SPRITE_ROTATE_180 (1<<15)
#define SPRITE_TRICKLE_FEED_DISABLE (1<<14)
#define SPRITE_INT_GAMMA_ENABLE (1<<13)
#define SPRITE_TILED (1<<10)
4332,6 → 4581,7
#define SP_FORMAT_RGBA1010102 (9<<26)
#define SP_FORMAT_RGBX8888 (0xe<<26)
#define SP_FORMAT_RGBA8888 (0xf<<26)
#define SP_ALPHA_PREMULTIPLY (1<<23) /* CHV pipe B */
#define SP_SOURCE_KEY (1<<22)
#define SP_YUV_BYTE_ORDER_MASK (3<<16)
#define SP_YUV_ORDER_YUYV (0<<16)
4338,7 → 4588,9
#define SP_YUV_ORDER_UYVY (1<<16)
#define SP_YUV_ORDER_YVYU (2<<16)
#define SP_YUV_ORDER_VYUY (3<<16)
#define SP_ROTATE_180 (1<<15)
#define SP_TILED (1<<10)
#define SP_MIRROR (1<<8) /* CHV pipe B */
#define _SPALINOFF (VLV_DISPLAY_BASE + 0x72184)
#define _SPASTRIDE (VLV_DISPLAY_BASE + 0x72188)
#define _SPAPOS (VLV_DISPLAY_BASE + 0x7218c)
4349,6 → 4601,7
#define _SPAKEYMAXVAL (VLV_DISPLAY_BASE + 0x721a0)
#define _SPATILEOFF (VLV_DISPLAY_BASE + 0x721a4)
#define _SPACONSTALPHA (VLV_DISPLAY_BASE + 0x721a8)
#define SP_CONST_ALPHA_ENABLE (1<<31)
#define _SPAGAMC (VLV_DISPLAY_BASE + 0x721f4)
 
#define _SPBCNTR (VLV_DISPLAY_BASE + 0x72280)
4377,6 → 4630,195
#define SPCONSTALPHA(pipe, plane) _PIPE(pipe * 2 + plane, _SPACONSTALPHA, _SPBCONSTALPHA)
#define SPGAMC(pipe, plane) _PIPE(pipe * 2 + plane, _SPAGAMC, _SPBGAMC)
 
/*
* CHV pipe B sprite CSC
*
* |cr| |c0 c1 c2| |cr + cr_ioff| |cr_ooff|
* |yg| = |c3 c4 c5| x |yg + yg_ioff| + |yg_ooff|
* |cb| |c6 c7 c8| |cb + cr_ioff| |cb_ooff|
*/
#define SPCSCYGOFF(sprite) (VLV_DISPLAY_BASE + 0x6d900 + (sprite) * 0x1000)
#define SPCSCCBOFF(sprite) (VLV_DISPLAY_BASE + 0x6d904 + (sprite) * 0x1000)
#define SPCSCCROFF(sprite) (VLV_DISPLAY_BASE + 0x6d908 + (sprite) * 0x1000)
#define SPCSC_OOFF(x) (((x) & 0x7ff) << 16) /* s11 */
#define SPCSC_IOFF(x) (((x) & 0x7ff) << 0) /* s11 */
 
#define SPCSCC01(sprite) (VLV_DISPLAY_BASE + 0x6d90c + (sprite) * 0x1000)
#define SPCSCC23(sprite) (VLV_DISPLAY_BASE + 0x6d910 + (sprite) * 0x1000)
#define SPCSCC45(sprite) (VLV_DISPLAY_BASE + 0x6d914 + (sprite) * 0x1000)
#define SPCSCC67(sprite) (VLV_DISPLAY_BASE + 0x6d918 + (sprite) * 0x1000)
#define SPCSCC8(sprite) (VLV_DISPLAY_BASE + 0x6d91c + (sprite) * 0x1000)
#define SPCSC_C1(x) (((x) & 0x7fff) << 16) /* s3.12 */
#define SPCSC_C0(x) (((x) & 0x7fff) << 0) /* s3.12 */
 
#define SPCSCYGICLAMP(sprite) (VLV_DISPLAY_BASE + 0x6d920 + (sprite) * 0x1000)
#define SPCSCCBICLAMP(sprite) (VLV_DISPLAY_BASE + 0x6d924 + (sprite) * 0x1000)
#define SPCSCCRICLAMP(sprite) (VLV_DISPLAY_BASE + 0x6d928 + (sprite) * 0x1000)
#define SPCSC_IMAX(x) (((x) & 0x7ff) << 16) /* s11 */
#define SPCSC_IMIN(x) (((x) & 0x7ff) << 0) /* s11 */
 
#define SPCSCYGOCLAMP(sprite) (VLV_DISPLAY_BASE + 0x6d92c + (sprite) * 0x1000)
#define SPCSCCBOCLAMP(sprite) (VLV_DISPLAY_BASE + 0x6d930 + (sprite) * 0x1000)
#define SPCSCCROCLAMP(sprite) (VLV_DISPLAY_BASE + 0x6d934 + (sprite) * 0x1000)
#define SPCSC_OMAX(x) ((x) << 16) /* u10 */
#define SPCSC_OMIN(x) ((x) << 0) /* u10 */
 
/* Skylake plane registers */
 
#define _PLANE_CTL_1_A 0x70180
#define _PLANE_CTL_2_A 0x70280
#define _PLANE_CTL_3_A 0x70380
#define PLANE_CTL_ENABLE (1 << 31)
#define PLANE_CTL_PIPE_GAMMA_ENABLE (1 << 30)
#define PLANE_CTL_FORMAT_MASK (0xf << 24)
#define PLANE_CTL_FORMAT_YUV422 ( 0 << 24)
#define PLANE_CTL_FORMAT_NV12 ( 1 << 24)
#define PLANE_CTL_FORMAT_XRGB_2101010 ( 2 << 24)
#define PLANE_CTL_FORMAT_XRGB_8888 ( 4 << 24)
#define PLANE_CTL_FORMAT_XRGB_16161616F ( 6 << 24)
#define PLANE_CTL_FORMAT_AYUV ( 8 << 24)
#define PLANE_CTL_FORMAT_INDEXED ( 12 << 24)
#define PLANE_CTL_FORMAT_RGB_565 ( 14 << 24)
#define PLANE_CTL_PIPE_CSC_ENABLE (1 << 23)
#define PLANE_CTL_KEY_ENABLE_MASK (0x3 << 21)
#define PLANE_CTL_KEY_ENABLE_SOURCE ( 1 << 21)
#define PLANE_CTL_KEY_ENABLE_DESTINATION ( 2 << 21)
#define PLANE_CTL_ORDER_BGRX (0 << 20)
#define PLANE_CTL_ORDER_RGBX (1 << 20)
#define PLANE_CTL_YUV422_ORDER_MASK (0x3 << 16)
#define PLANE_CTL_YUV422_YUYV ( 0 << 16)
#define PLANE_CTL_YUV422_UYVY ( 1 << 16)
#define PLANE_CTL_YUV422_YVYU ( 2 << 16)
#define PLANE_CTL_YUV422_VYUY ( 3 << 16)
#define PLANE_CTL_DECOMPRESSION_ENABLE (1 << 15)
#define PLANE_CTL_TRICKLE_FEED_DISABLE (1 << 14)
#define PLANE_CTL_PLANE_GAMMA_DISABLE (1 << 13)
#define PLANE_CTL_TILED_MASK (0x7 << 10)
#define PLANE_CTL_TILED_LINEAR ( 0 << 10)
#define PLANE_CTL_TILED_X ( 1 << 10)
#define PLANE_CTL_TILED_Y ( 4 << 10)
#define PLANE_CTL_TILED_YF ( 5 << 10)
#define PLANE_CTL_ALPHA_MASK (0x3 << 4)
#define PLANE_CTL_ALPHA_DISABLE ( 0 << 4)
#define PLANE_CTL_ALPHA_SW_PREMULTIPLY ( 2 << 4)
#define PLANE_CTL_ALPHA_HW_PREMULTIPLY ( 3 << 4)
#define PLANE_CTL_ROTATE_MASK 0x3
#define PLANE_CTL_ROTATE_0 0x0
#define PLANE_CTL_ROTATE_180 0x2
#define _PLANE_STRIDE_1_A 0x70188
#define _PLANE_STRIDE_2_A 0x70288
#define _PLANE_STRIDE_3_A 0x70388
#define _PLANE_POS_1_A 0x7018c
#define _PLANE_POS_2_A 0x7028c
#define _PLANE_POS_3_A 0x7038c
#define _PLANE_SIZE_1_A 0x70190
#define _PLANE_SIZE_2_A 0x70290
#define _PLANE_SIZE_3_A 0x70390
#define _PLANE_SURF_1_A 0x7019c
#define _PLANE_SURF_2_A 0x7029c
#define _PLANE_SURF_3_A 0x7039c
#define _PLANE_OFFSET_1_A 0x701a4
#define _PLANE_OFFSET_2_A 0x702a4
#define _PLANE_OFFSET_3_A 0x703a4
#define _PLANE_KEYVAL_1_A 0x70194
#define _PLANE_KEYVAL_2_A 0x70294
#define _PLANE_KEYMSK_1_A 0x70198
#define _PLANE_KEYMSK_2_A 0x70298
#define _PLANE_KEYMAX_1_A 0x701a0
#define _PLANE_KEYMAX_2_A 0x702a0
#define _PLANE_BUF_CFG_1_A 0x7027c
#define _PLANE_BUF_CFG_2_A 0x7037c
 
#define _PLANE_CTL_1_B 0x71180
#define _PLANE_CTL_2_B 0x71280
#define _PLANE_CTL_3_B 0x71380
#define _PLANE_CTL_1(pipe) _PIPE(pipe, _PLANE_CTL_1_A, _PLANE_CTL_1_B)
#define _PLANE_CTL_2(pipe) _PIPE(pipe, _PLANE_CTL_2_A, _PLANE_CTL_2_B)
#define _PLANE_CTL_3(pipe) _PIPE(pipe, _PLANE_CTL_3_A, _PLANE_CTL_3_B)
#define PLANE_CTL(pipe, plane) \
_PLANE(plane, _PLANE_CTL_1(pipe), _PLANE_CTL_2(pipe))
 
#define _PLANE_STRIDE_1_B 0x71188
#define _PLANE_STRIDE_2_B 0x71288
#define _PLANE_STRIDE_3_B 0x71388
#define _PLANE_STRIDE_1(pipe) \
_PIPE(pipe, _PLANE_STRIDE_1_A, _PLANE_STRIDE_1_B)
#define _PLANE_STRIDE_2(pipe) \
_PIPE(pipe, _PLANE_STRIDE_2_A, _PLANE_STRIDE_2_B)
#define _PLANE_STRIDE_3(pipe) \
_PIPE(pipe, _PLANE_STRIDE_3_A, _PLANE_STRIDE_3_B)
#define PLANE_STRIDE(pipe, plane) \
_PLANE(plane, _PLANE_STRIDE_1(pipe), _PLANE_STRIDE_2(pipe))
 
#define _PLANE_POS_1_B 0x7118c
#define _PLANE_POS_2_B 0x7128c
#define _PLANE_POS_3_B 0x7138c
#define _PLANE_POS_1(pipe) _PIPE(pipe, _PLANE_POS_1_A, _PLANE_POS_1_B)
#define _PLANE_POS_2(pipe) _PIPE(pipe, _PLANE_POS_2_A, _PLANE_POS_2_B)
#define _PLANE_POS_3(pipe) _PIPE(pipe, _PLANE_POS_3_A, _PLANE_POS_3_B)
#define PLANE_POS(pipe, plane) \
_PLANE(plane, _PLANE_POS_1(pipe), _PLANE_POS_2(pipe))
 
#define _PLANE_SIZE_1_B 0x71190
#define _PLANE_SIZE_2_B 0x71290
#define _PLANE_SIZE_3_B 0x71390
#define _PLANE_SIZE_1(pipe) _PIPE(pipe, _PLANE_SIZE_1_A, _PLANE_SIZE_1_B)
#define _PLANE_SIZE_2(pipe) _PIPE(pipe, _PLANE_SIZE_2_A, _PLANE_SIZE_2_B)
#define _PLANE_SIZE_3(pipe) _PIPE(pipe, _PLANE_SIZE_3_A, _PLANE_SIZE_3_B)
#define PLANE_SIZE(pipe, plane) \
_PLANE(plane, _PLANE_SIZE_1(pipe), _PLANE_SIZE_2(pipe))
 
#define _PLANE_SURF_1_B 0x7119c
#define _PLANE_SURF_2_B 0x7129c
#define _PLANE_SURF_3_B 0x7139c
#define _PLANE_SURF_1(pipe) _PIPE(pipe, _PLANE_SURF_1_A, _PLANE_SURF_1_B)
#define _PLANE_SURF_2(pipe) _PIPE(pipe, _PLANE_SURF_2_A, _PLANE_SURF_2_B)
#define _PLANE_SURF_3(pipe) _PIPE(pipe, _PLANE_SURF_3_A, _PLANE_SURF_3_B)
#define PLANE_SURF(pipe, plane) \
_PLANE(plane, _PLANE_SURF_1(pipe), _PLANE_SURF_2(pipe))
 
#define _PLANE_OFFSET_1_B 0x711a4
#define _PLANE_OFFSET_2_B 0x712a4
#define _PLANE_OFFSET_1(pipe) _PIPE(pipe, _PLANE_OFFSET_1_A, _PLANE_OFFSET_1_B)
#define _PLANE_OFFSET_2(pipe) _PIPE(pipe, _PLANE_OFFSET_2_A, _PLANE_OFFSET_2_B)
#define PLANE_OFFSET(pipe, plane) \
_PLANE(plane, _PLANE_OFFSET_1(pipe), _PLANE_OFFSET_2(pipe))
 
#define _PLANE_KEYVAL_1_B 0x71194
#define _PLANE_KEYVAL_2_B 0x71294
#define _PLANE_KEYVAL_1(pipe) _PIPE(pipe, _PLANE_KEYVAL_1_A, _PLANE_KEYVAL_1_B)
#define _PLANE_KEYVAL_2(pipe) _PIPE(pipe, _PLANE_KEYVAL_2_A, _PLANE_KEYVAL_2_B)
#define PLANE_KEYVAL(pipe, plane) \
_PLANE(plane, _PLANE_KEYVAL_1(pipe), _PLANE_KEYVAL_2(pipe))
 
#define _PLANE_KEYMSK_1_B 0x71198
#define _PLANE_KEYMSK_2_B 0x71298
#define _PLANE_KEYMSK_1(pipe) _PIPE(pipe, _PLANE_KEYMSK_1_A, _PLANE_KEYMSK_1_B)
#define _PLANE_KEYMSK_2(pipe) _PIPE(pipe, _PLANE_KEYMSK_2_A, _PLANE_KEYMSK_2_B)
#define PLANE_KEYMSK(pipe, plane) \
_PLANE(plane, _PLANE_KEYMSK_1(pipe), _PLANE_KEYMSK_2(pipe))
 
#define _PLANE_KEYMAX_1_B 0x711a0
#define _PLANE_KEYMAX_2_B 0x712a0
#define _PLANE_KEYMAX_1(pipe) _PIPE(pipe, _PLANE_KEYMAX_1_A, _PLANE_KEYMAX_1_B)
#define _PLANE_KEYMAX_2(pipe) _PIPE(pipe, _PLANE_KEYMAX_2_A, _PLANE_KEYMAX_2_B)
#define PLANE_KEYMAX(pipe, plane) \
_PLANE(plane, _PLANE_KEYMAX_1(pipe), _PLANE_KEYMAX_2(pipe))
 
#define _PLANE_BUF_CFG_1_B 0x7127c
#define _PLANE_BUF_CFG_2_B 0x7137c
#define _PLANE_BUF_CFG_1(pipe) \
_PIPE(pipe, _PLANE_BUF_CFG_1_A, _PLANE_BUF_CFG_1_B)
#define _PLANE_BUF_CFG_2(pipe) \
_PIPE(pipe, _PLANE_BUF_CFG_2_A, _PLANE_BUF_CFG_2_B)
#define PLANE_BUF_CFG(pipe, plane) \
_PLANE(plane, _PLANE_BUF_CFG_1(pipe), _PLANE_BUF_CFG_2(pipe))
 
/* SKL new cursor registers */
#define _CUR_BUF_CFG_A 0x7017c
#define _CUR_BUF_CFG_B 0x7117c
#define CUR_BUF_CFG(pipe) _PIPE(pipe, _CUR_BUF_CFG_A, _CUR_BUF_CFG_B)
 
/* VBIOS regs */
#define VGACNTRL 0x71400
# define VGA_DISP_DISABLE (1 << 31)
4492,6 → 4934,18
#define PF_VSCALE(pipe) _PIPE(pipe, _PFA_VSCALE, _PFB_VSCALE)
#define PF_HSCALE(pipe) _PIPE(pipe, _PFA_HSCALE, _PFB_HSCALE)
 
#define _PSA_CTL 0x68180
#define _PSB_CTL 0x68980
#define PS_ENABLE (1<<31)
#define _PSA_WIN_SZ 0x68174
#define _PSB_WIN_SZ 0x68974
#define _PSA_WIN_POS 0x68170
#define _PSB_WIN_POS 0x68970
 
#define PS_CTL(pipe) _PIPE(pipe, _PSA_CTL, _PSB_CTL)
#define PS_WIN_SZ(pipe) _PIPE(pipe, _PSA_WIN_SZ, _PSB_WIN_SZ)
#define PS_WIN_POS(pipe) _PIPE(pipe, _PSA_WIN_POS, _PSB_WIN_POS)
 
/* legacy palette */
#define _LGC_PALETTE_A 0x4a000
#define _LGC_PALETTE_B 0x4a800
4613,10 → 5067,23
#define GEN8_PIPE_SCAN_LINE_EVENT (1 << 2)
#define GEN8_PIPE_VSYNC (1 << 1)
#define GEN8_PIPE_VBLANK (1 << 0)
#define GEN9_PIPE_CURSOR_FAULT (1 << 11)
#define GEN9_PIPE_PLANE3_FAULT (1 << 9)
#define GEN9_PIPE_PLANE2_FAULT (1 << 8)
#define GEN9_PIPE_PLANE1_FAULT (1 << 7)
#define GEN9_PIPE_PLANE3_FLIP_DONE (1 << 5)
#define GEN9_PIPE_PLANE2_FLIP_DONE (1 << 4)
#define GEN9_PIPE_PLANE1_FLIP_DONE (1 << 3)
#define GEN9_PIPE_PLANE_FLIP_DONE(p) (1 << (3 + p))
#define GEN8_DE_PIPE_IRQ_FAULT_ERRORS \
(GEN8_PIPE_CURSOR_FAULT | \
GEN8_PIPE_SPRITE_FAULT | \
GEN8_PIPE_PRIMARY_FAULT)
#define GEN9_DE_PIPE_IRQ_FAULT_ERRORS \
(GEN9_PIPE_CURSOR_FAULT | \
GEN9_PIPE_PLANE3_FAULT | \
GEN9_PIPE_PLANE2_FAULT | \
GEN9_PIPE_PLANE1_FAULT)
 
#define GEN8_DE_PORT_ISR 0x44440
#define GEN8_DE_PORT_IMR 0x44444
4623,6 → 5090,9
#define GEN8_DE_PORT_IIR 0x44448
#define GEN8_DE_PORT_IER 0x4444c
#define GEN8_PORT_DP_A_HOTPLUG (1 << 3)
#define GEN9_AUX_CHANNEL_D (1 << 27)
#define GEN9_AUX_CHANNEL_C (1 << 26)
#define GEN9_AUX_CHANNEL_B (1 << 25)
#define GEN8_AUX_CHANNEL_A (1 << 0)
 
#define GEN8_DE_MISC_ISR 0x44460
4706,6 → 5176,8
/* GEN8 chicken */
#define HDC_CHICKEN0 0x7300
#define HDC_FORCE_NON_COHERENT (1<<4)
#define HDC_DONOT_FETCH_MEM_WHEN_MASKED (1<<11)
#define HDC_FENCE_DEST_SLM_DISABLE (1<<14)
 
/* WaCatErrorRejectionIssue */
#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030
5246,8 → 5718,7
#define PIPEA_PP_STATUS (VLV_DISPLAY_BASE + 0x61200)
#define PIPEA_PP_CONTROL (VLV_DISPLAY_BASE + 0x61204)
#define PIPEA_PP_ON_DELAYS (VLV_DISPLAY_BASE + 0x61208)
#define PANEL_PORT_SELECT_DPB_VLV (1 << 30)
#define PANEL_PORT_SELECT_DPC_VLV (2 << 30)
#define PANEL_PORT_SELECT_VLV(port) ((port) << 30)
#define PIPEA_PP_OFF_DELAYS (VLV_DISPLAY_BASE + 0x6120c)
#define PIPEA_PP_DIVISOR (VLV_DISPLAY_BASE + 0x61210)
 
5407,8 → 5878,13
#define VLV_GTLC_ALLOWWAKEERR (1 << 1)
#define VLV_GTLC_PW_MEDIA_STATUS_MASK (1 << 5)
#define VLV_GTLC_PW_RENDER_STATUS_MASK (1 << 7)
#define VLV_GTLC_SURVIVABILITY_REG 0x130098
#define FORCEWAKE_MT 0xa188 /* multi-threaded */
#define FORCEWAKE_MEDIA_GEN9 0xa270
#define FORCEWAKE_RENDER_GEN9 0xa278
#define FORCEWAKE_BLITTER_GEN9 0xa188
#define FORCEWAKE_ACK_MEDIA_GEN9 0x0D88
#define FORCEWAKE_ACK_RENDER_GEN9 0x0D84
#define FORCEWAKE_ACK_BLITTER_GEN9 0x130044
#define FORCEWAKE_KERNEL 0x1
#define FORCEWAKE_USER 0x2
#define FORCEWAKE_MT_ACK 0x130040
5545,12 → 6021,6
GEN6_PM_RP_DOWN_THRESHOLD | \
GEN6_PM_RP_DOWN_TIMEOUT)
 
#define CHV_CZ_CLOCK_FREQ_MODE_200 200
#define CHV_CZ_CLOCK_FREQ_MODE_267 267
#define CHV_CZ_CLOCK_FREQ_MODE_320 320
#define CHV_CZ_CLOCK_FREQ_MODE_333 333
#define CHV_CZ_CLOCK_FREQ_MODE_400 400
 
#define GEN7_GT_SCRATCH_BASE 0x4F100
#define GEN7_GT_SCRATCH_REG_NUM 8
 
5586,10 → 6056,18
#define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5)
#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245)
#define DISPLAY_IPS_CONTROL 0x19
#define HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL 0x1A
#define GEN6_PCODE_DATA 0x138128
#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
#define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16
#define GEN6_PCODE_DATA1 0x13812C
 
#define GEN9_PCODE_READ_MEM_LATENCY 0x6
#define GEN9_MEM_LATENCY_LEVEL_MASK 0xFF
#define GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT 8
#define GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT 16
#define GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT 24
 
#define GEN6_GT_CORE_STATUS 0x138060
#define GEN6_CORE_CPD_STATE_MASK (7<<4)
#define GEN6_RCn_MASK 7
5626,6 → 6104,9
#define GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE (1<<10)
#define GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1<<3)
 
#define GEN9_HALF_SLICE_CHICKEN5 0xe188
#define GEN9_DG_MIRROR_FIX_ENABLE (1<<5)
 
#define GEN8_ROW_CHICKEN 0xe4f0
#define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE (1<<8)
#define STALL_DOP_GATING_DISABLE (1<<5)
5641,6 → 6122,7
#define GEN8_CENTROID_PIXEL_OPT_DIS (1<<8)
#define GEN8_SAMPLER_POWER_BYPASS_DIS (1<<1)
 
/* Audio */
#define G4X_AUD_VID_DID (dev_priv->info.display_mmio_offset + 0x62020)
#define INTEL_AUDIO_DEVCL 0x808629FB
#define INTEL_AUDIO_DEVBLC 0x80862801
5649,49 → 6131,49
#define G4X_AUD_CNTL_ST 0x620B4
#define G4X_ELDV_DEVCL_DEVBLC (1 << 13)
#define G4X_ELDV_DEVCTG (1 << 14)
#define G4X_ELD_ADDR (0xf << 5)
#define G4X_ELD_ADDR_MASK (0xf << 5)
#define G4X_ELD_ACK (1 << 4)
#define G4X_HDMIW_HDMIEDID 0x6210C
 
#define IBX_HDMIW_HDMIEDID_A 0xE2050
#define IBX_HDMIW_HDMIEDID_B 0xE2150
#define _IBX_HDMIW_HDMIEDID_A 0xE2050
#define _IBX_HDMIW_HDMIEDID_B 0xE2150
#define IBX_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \
IBX_HDMIW_HDMIEDID_A, \
IBX_HDMIW_HDMIEDID_B)
#define IBX_AUD_CNTL_ST_A 0xE20B4
#define IBX_AUD_CNTL_ST_B 0xE21B4
_IBX_HDMIW_HDMIEDID_A, \
_IBX_HDMIW_HDMIEDID_B)
#define _IBX_AUD_CNTL_ST_A 0xE20B4
#define _IBX_AUD_CNTL_ST_B 0xE21B4
#define IBX_AUD_CNTL_ST(pipe) _PIPE(pipe, \
IBX_AUD_CNTL_ST_A, \
IBX_AUD_CNTL_ST_B)
#define IBX_ELD_BUFFER_SIZE (0x1f << 10)
#define IBX_ELD_ADDRESS (0x1f << 5)
_IBX_AUD_CNTL_ST_A, \
_IBX_AUD_CNTL_ST_B)
#define IBX_ELD_BUFFER_SIZE_MASK (0x1f << 10)
#define IBX_ELD_ADDRESS_MASK (0x1f << 5)
#define IBX_ELD_ACK (1 << 4)
#define IBX_AUD_CNTL_ST2 0xE20C0
#define IBX_ELD_VALIDB (1 << 0)
#define IBX_CP_READYB (1 << 1)
#define IBX_CP_READY(port) ((1 << 1) << (((port) - 1) * 4))
#define IBX_ELD_VALID(port) ((1 << 0) << (((port) - 1) * 4))
 
#define CPT_HDMIW_HDMIEDID_A 0xE5050
#define CPT_HDMIW_HDMIEDID_B 0xE5150
#define _CPT_HDMIW_HDMIEDID_A 0xE5050
#define _CPT_HDMIW_HDMIEDID_B 0xE5150
#define CPT_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \
CPT_HDMIW_HDMIEDID_A, \
CPT_HDMIW_HDMIEDID_B)
#define CPT_AUD_CNTL_ST_A 0xE50B4
#define CPT_AUD_CNTL_ST_B 0xE51B4
_CPT_HDMIW_HDMIEDID_A, \
_CPT_HDMIW_HDMIEDID_B)
#define _CPT_AUD_CNTL_ST_A 0xE50B4
#define _CPT_AUD_CNTL_ST_B 0xE51B4
#define CPT_AUD_CNTL_ST(pipe) _PIPE(pipe, \
CPT_AUD_CNTL_ST_A, \
CPT_AUD_CNTL_ST_B)
_CPT_AUD_CNTL_ST_A, \
_CPT_AUD_CNTL_ST_B)
#define CPT_AUD_CNTRL_ST2 0xE50C0
 
#define VLV_HDMIW_HDMIEDID_A (VLV_DISPLAY_BASE + 0x62050)
#define VLV_HDMIW_HDMIEDID_B (VLV_DISPLAY_BASE + 0x62150)
#define _VLV_HDMIW_HDMIEDID_A (VLV_DISPLAY_BASE + 0x62050)
#define _VLV_HDMIW_HDMIEDID_B (VLV_DISPLAY_BASE + 0x62150)
#define VLV_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \
VLV_HDMIW_HDMIEDID_A, \
VLV_HDMIW_HDMIEDID_B)
#define VLV_AUD_CNTL_ST_A (VLV_DISPLAY_BASE + 0x620B4)
#define VLV_AUD_CNTL_ST_B (VLV_DISPLAY_BASE + 0x621B4)
_VLV_HDMIW_HDMIEDID_A, \
_VLV_HDMIW_HDMIEDID_B)
#define _VLV_AUD_CNTL_ST_A (VLV_DISPLAY_BASE + 0x620B4)
#define _VLV_AUD_CNTL_ST_B (VLV_DISPLAY_BASE + 0x621B4)
#define VLV_AUD_CNTL_ST(pipe) _PIPE(pipe, \
VLV_AUD_CNTL_ST_A, \
VLV_AUD_CNTL_ST_B)
_VLV_AUD_CNTL_ST_A, \
_VLV_AUD_CNTL_ST_B)
#define VLV_AUD_CNTL_ST2 (VLV_DISPLAY_BASE + 0x620C0)
 
/* These are the 4 32-bit write offset registers for each stream
5700,28 → 6182,28
*/
#define GEN7_SO_WRITE_OFFSET(n) (0x5280 + (n) * 4)
 
#define IBX_AUD_CONFIG_A 0xe2000
#define IBX_AUD_CONFIG_B 0xe2100
#define _IBX_AUD_CONFIG_A 0xe2000
#define _IBX_AUD_CONFIG_B 0xe2100
#define IBX_AUD_CFG(pipe) _PIPE(pipe, \
IBX_AUD_CONFIG_A, \
IBX_AUD_CONFIG_B)
#define CPT_AUD_CONFIG_A 0xe5000
#define CPT_AUD_CONFIG_B 0xe5100
_IBX_AUD_CONFIG_A, \
_IBX_AUD_CONFIG_B)
#define _CPT_AUD_CONFIG_A 0xe5000
#define _CPT_AUD_CONFIG_B 0xe5100
#define CPT_AUD_CFG(pipe) _PIPE(pipe, \
CPT_AUD_CONFIG_A, \
CPT_AUD_CONFIG_B)
#define VLV_AUD_CONFIG_A (VLV_DISPLAY_BASE + 0x62000)
#define VLV_AUD_CONFIG_B (VLV_DISPLAY_BASE + 0x62100)
_CPT_AUD_CONFIG_A, \
_CPT_AUD_CONFIG_B)
#define _VLV_AUD_CONFIG_A (VLV_DISPLAY_BASE + 0x62000)
#define _VLV_AUD_CONFIG_B (VLV_DISPLAY_BASE + 0x62100)
#define VLV_AUD_CFG(pipe) _PIPE(pipe, \
VLV_AUD_CONFIG_A, \
VLV_AUD_CONFIG_B)
_VLV_AUD_CONFIG_A, \
_VLV_AUD_CONFIG_B)
 
#define AUD_CONFIG_N_VALUE_INDEX (1 << 29)
#define AUD_CONFIG_N_PROG_ENABLE (1 << 28)
#define AUD_CONFIG_UPPER_N_SHIFT 20
#define AUD_CONFIG_UPPER_N_VALUE (0xff << 20)
#define AUD_CONFIG_UPPER_N_MASK (0xff << 20)
#define AUD_CONFIG_LOWER_N_SHIFT 4
#define AUD_CONFIG_LOWER_N_VALUE (0xfff << 4)
#define AUD_CONFIG_LOWER_N_MASK (0xfff << 4)
#define AUD_CONFIG_PIXEL_CLOCK_HDMI_SHIFT 16
#define AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK (0xf << 16)
#define AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 (0 << 16)
5737,52 → 6219,44
#define AUD_CONFIG_DISABLE_NCTS (1 << 3)
 
/* HSW Audio */
#define HSW_AUD_CONFIG_A 0x65000 /* Audio Configuration Transcoder A */
#define HSW_AUD_CONFIG_B 0x65100 /* Audio Configuration Transcoder B */
#define _HSW_AUD_CONFIG_A 0x65000
#define _HSW_AUD_CONFIG_B 0x65100
#define HSW_AUD_CFG(pipe) _PIPE(pipe, \
HSW_AUD_CONFIG_A, \
HSW_AUD_CONFIG_B)
_HSW_AUD_CONFIG_A, \
_HSW_AUD_CONFIG_B)
 
#define HSW_AUD_MISC_CTRL_A 0x65010 /* Audio Misc Control Convert 1 */
#define HSW_AUD_MISC_CTRL_B 0x65110 /* Audio Misc Control Convert 2 */
#define _HSW_AUD_MISC_CTRL_A 0x65010
#define _HSW_AUD_MISC_CTRL_B 0x65110
#define HSW_AUD_MISC_CTRL(pipe) _PIPE(pipe, \
HSW_AUD_MISC_CTRL_A, \
HSW_AUD_MISC_CTRL_B)
_HSW_AUD_MISC_CTRL_A, \
_HSW_AUD_MISC_CTRL_B)
 
#define HSW_AUD_DIP_ELD_CTRL_ST_A 0x650b4 /* Audio DIP and ELD Control State Transcoder A */
#define HSW_AUD_DIP_ELD_CTRL_ST_B 0x651b4 /* Audio DIP and ELD Control State Transcoder B */
#define _HSW_AUD_DIP_ELD_CTRL_ST_A 0x650b4
#define _HSW_AUD_DIP_ELD_CTRL_ST_B 0x651b4
#define HSW_AUD_DIP_ELD_CTRL(pipe) _PIPE(pipe, \
HSW_AUD_DIP_ELD_CTRL_ST_A, \
HSW_AUD_DIP_ELD_CTRL_ST_B)
_HSW_AUD_DIP_ELD_CTRL_ST_A, \
_HSW_AUD_DIP_ELD_CTRL_ST_B)
 
/* Audio Digital Converter */
#define HSW_AUD_DIG_CNVT_1 0x65080 /* Audio Converter 1 */
#define HSW_AUD_DIG_CNVT_2 0x65180 /* Audio Converter 1 */
#define _HSW_AUD_DIG_CNVT_1 0x65080
#define _HSW_AUD_DIG_CNVT_2 0x65180
#define AUD_DIG_CNVT(pipe) _PIPE(pipe, \
HSW_AUD_DIG_CNVT_1, \
HSW_AUD_DIG_CNVT_2)
_HSW_AUD_DIG_CNVT_1, \
_HSW_AUD_DIG_CNVT_2)
#define DIP_PORT_SEL_MASK 0x3
 
#define HSW_AUD_EDID_DATA_A 0x65050
#define HSW_AUD_EDID_DATA_B 0x65150
#define _HSW_AUD_EDID_DATA_A 0x65050
#define _HSW_AUD_EDID_DATA_B 0x65150
#define HSW_AUD_EDID_DATA(pipe) _PIPE(pipe, \
HSW_AUD_EDID_DATA_A, \
HSW_AUD_EDID_DATA_B)
_HSW_AUD_EDID_DATA_A, \
_HSW_AUD_EDID_DATA_B)
 
#define HSW_AUD_PIPE_CONV_CFG 0x6507c /* Audio pipe and converter configs */
#define HSW_AUD_PIN_ELD_CP_VLD 0x650c0 /* Audio ELD and CP Ready Status */
#define AUDIO_INACTIVE_C (1<<11)
#define AUDIO_INACTIVE_B (1<<7)
#define AUDIO_INACTIVE_A (1<<3)
#define AUDIO_OUTPUT_ENABLE_A (1<<2)
#define AUDIO_OUTPUT_ENABLE_B (1<<6)
#define AUDIO_OUTPUT_ENABLE_C (1<<10)
#define AUDIO_ELD_VALID_A (1<<0)
#define AUDIO_ELD_VALID_B (1<<4)
#define AUDIO_ELD_VALID_C (1<<8)
#define AUDIO_CP_READY_A (1<<1)
#define AUDIO_CP_READY_B (1<<5)
#define AUDIO_CP_READY_C (1<<9)
#define HSW_AUD_PIPE_CONV_CFG 0x6507c
#define HSW_AUD_PIN_ELD_CP_VLD 0x650c0
#define AUDIO_INACTIVE(trans) ((1 << 3) << ((trans) * 4))
#define AUDIO_OUTPUT_ENABLE(trans) ((1 << 2) << ((trans) * 4))
#define AUDIO_CP_READY(trans) ((1 << 1) << ((trans) * 4))
#define AUDIO_ELD_VALID(trans) ((1 << 0) << ((trans) * 4))
 
/* HSW Power Wells */
#define HSW_PWR_WELL_BIOS 0x45400 /* CTL1 */
5866,15 → 6340,7
#define DDI_BUF_CTL_B 0x64100
#define DDI_BUF_CTL(port) _PORT(port, DDI_BUF_CTL_A, DDI_BUF_CTL_B)
#define DDI_BUF_CTL_ENABLE (1<<31)
#define DDI_BUF_EMP_400MV_0DB_HSW (0<<24) /* Sel0 */
#define DDI_BUF_EMP_400MV_3_5DB_HSW (1<<24) /* Sel1 */
#define DDI_BUF_EMP_400MV_6DB_HSW (2<<24) /* Sel2 */
#define DDI_BUF_EMP_400MV_9_5DB_HSW (3<<24) /* Sel3 */
#define DDI_BUF_EMP_600MV_0DB_HSW (4<<24) /* Sel4 */
#define DDI_BUF_EMP_600MV_3_5DB_HSW (5<<24) /* Sel5 */
#define DDI_BUF_EMP_600MV_6DB_HSW (6<<24) /* Sel6 */
#define DDI_BUF_EMP_800MV_0DB_HSW (7<<24) /* Sel7 */
#define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */
#define DDI_BUF_TRANS_SELECT(n) ((n) << 24)
#define DDI_BUF_EMP_MASK (0xf<<24)
#define DDI_BUF_PORT_REVERSAL (1<<16)
#define DDI_BUF_IS_IDLE (1<<7)
6008,6 → 6474,83
#define LCPLL_CD_SOURCE_FCLK (1<<21)
#define LCPLL_CD_SOURCE_FCLK_DONE (1<<19)
 
/*
* SKL Clocks
*/
 
/* CDCLK_CTL */
#define CDCLK_CTL 0x46000
#define CDCLK_FREQ_SEL_MASK (3<<26)
#define CDCLK_FREQ_450_432 (0<<26)
#define CDCLK_FREQ_540 (1<<26)
#define CDCLK_FREQ_337_308 (2<<26)
#define CDCLK_FREQ_675_617 (3<<26)
#define CDCLK_FREQ_DECIMAL_MASK (0x7ff)
 
/* LCPLL_CTL */
#define LCPLL1_CTL 0x46010
#define LCPLL2_CTL 0x46014
#define LCPLL_PLL_ENABLE (1<<31)
 
/* DPLL control1 */
#define DPLL_CTRL1 0x6C058
#define DPLL_CTRL1_HDMI_MODE(id) (1<<((id)*6+5))
#define DPLL_CTRL1_SSC(id) (1<<((id)*6+4))
#define DPLL_CRTL1_LINK_RATE_MASK(id) (7<<((id)*6+1))
#define DPLL_CRTL1_LINK_RATE_SHIFT(id) ((id)*6+1)
#define DPLL_CRTL1_LINK_RATE(linkrate, id) ((linkrate)<<((id)*6+1))
#define DPLL_CTRL1_OVERRIDE(id) (1<<((id)*6))
#define DPLL_CRTL1_LINK_RATE_2700 0
#define DPLL_CRTL1_LINK_RATE_1350 1
#define DPLL_CRTL1_LINK_RATE_810 2
#define DPLL_CRTL1_LINK_RATE_1620 3
#define DPLL_CRTL1_LINK_RATE_1080 4
#define DPLL_CRTL1_LINK_RATE_2160 5
 
/* DPLL control2 */
#define DPLL_CTRL2 0x6C05C
#define DPLL_CTRL2_DDI_CLK_OFF(port) (1<<(port+15))
#define DPLL_CTRL2_DDI_CLK_SEL_MASK(port) (3<<((port)*3+1))
#define DPLL_CTRL2_DDI_CLK_SEL_SHIFT(port) ((port)*3+1)
#define DPLL_CTRL2_DDI_CLK_SEL(clk, port) (clk<<((port)*3+1))
#define DPLL_CTRL2_DDI_SEL_OVERRIDE(port) (1<<((port)*3))
 
/* DPLL Status */
#define DPLL_STATUS 0x6C060
#define DPLL_LOCK(id) (1<<((id)*8))
 
/* DPLL cfg */
#define DPLL1_CFGCR1 0x6C040
#define DPLL2_CFGCR1 0x6C048
#define DPLL3_CFGCR1 0x6C050
#define DPLL_CFGCR1_FREQ_ENABLE (1<<31)
#define DPLL_CFGCR1_DCO_FRACTION_MASK (0x7fff<<9)
#define DPLL_CFGCR1_DCO_FRACTION(x) (x<<9)
#define DPLL_CFGCR1_DCO_INTEGER_MASK (0x1ff)
 
#define DPLL1_CFGCR2 0x6C044
#define DPLL2_CFGCR2 0x6C04C
#define DPLL3_CFGCR2 0x6C054
#define DPLL_CFGCR2_QDIV_RATIO_MASK (0xff<<8)
#define DPLL_CFGCR2_QDIV_RATIO(x) (x<<8)
#define DPLL_CFGCR2_QDIV_MODE(x) (x<<7)
#define DPLL_CFGCR2_KDIV_MASK (3<<5)
#define DPLL_CFGCR2_KDIV(x) (x<<5)
#define DPLL_CFGCR2_KDIV_5 (0<<5)
#define DPLL_CFGCR2_KDIV_2 (1<<5)
#define DPLL_CFGCR2_KDIV_3 (2<<5)
#define DPLL_CFGCR2_KDIV_1 (3<<5)
#define DPLL_CFGCR2_PDIV_MASK (7<<2)
#define DPLL_CFGCR2_PDIV(x) (x<<2)
#define DPLL_CFGCR2_PDIV_1 (0<<2)
#define DPLL_CFGCR2_PDIV_2 (1<<2)
#define DPLL_CFGCR2_PDIV_3 (2<<2)
#define DPLL_CFGCR2_PDIV_7 (4<<2)
#define DPLL_CFGCR2_CENTRAL_FREQ_MASK (3)
 
#define GET_CFG_CR1_REG(id) (DPLL1_CFGCR1 + (id - SKL_DPLL1) * 8)
#define GET_CFG_CR2_REG(id) (DPLL1_CFGCR2 + (id - SKL_DPLL1) * 8)
 
/* Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register,
* since on HSW we can't write to it using I915_WRITE. */
#define D_COMP_HSW (MCHBAR_MIRROR_BASE_SNB + 0x5F0C)
/drivers/video/drm/i915/i915_trace.h
30,5 → 30,12
#define trace_i915_gem_evict(dev, min_size, alignment, flags)
#define trace_i915_gem_evict_vm(vm)
#define trace_i915_gem_evict_everything(dev)
#define trace_i915_context_free(ctx)
#define trace_i915_context_create(ctx)
#define trace_switch_mm(ring, to)
#define trace_i915_ppgtt_create(base)
#define trace_i915_ppgtt_release(base)
 
 
 
#endif
/drivers/video/drm/i915/intel_audio.c
0,0 → 1,463
/*
* Copyright © 2014 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
 
#include <linux/kernel.h>
 
#include <drm/drmP.h>
#include <drm/drm_edid.h>
#include "intel_drv.h"
#include "i915_drv.h"
 
/**
* DOC: High Definition Audio over HDMI and Display Port
*
* The graphics and audio drivers together support High Definition Audio over
* HDMI and Display Port. The audio programming sequences are divided into audio
* codec and controller enable and disable sequences. The graphics driver
* handles the audio codec sequences, while the audio driver handles the audio
* controller sequences.
*
* The disable sequences must be performed before disabling the transcoder or
* port. The enable sequences may only be performed after enabling the
* transcoder and port, and after completed link training.
*
* The codec and controller sequences could be done either parallel or serial,
* but generally the ELDV/PD change in the codec sequence indicates to the audio
* driver that the controller sequence should start. Indeed, most of the
* co-operation between the graphics and audio drivers is handled via audio
* related registers. (The notable exception is the power management, not
* covered here.)
*/
 
static const struct {
int clock;
u32 config;
} hdmi_audio_clock[] = {
{ DIV_ROUND_UP(25200 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 },
{ 25200, AUD_CONFIG_PIXEL_CLOCK_HDMI_25200 }, /* default per bspec */
{ 27000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27000 },
{ 27000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27027 },
{ 54000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54000 },
{ 54000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54054 },
{ DIV_ROUND_UP(74250 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_74176 },
{ 74250, AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 },
{ DIV_ROUND_UP(148500 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 },
{ 148500, AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 },
};
 
/* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */
static u32 audio_config_hdmi_pixel_clock(struct drm_display_mode *mode)
{
int i;
 
for (i = 0; i < ARRAY_SIZE(hdmi_audio_clock); i++) {
if (mode->clock == hdmi_audio_clock[i].clock)
break;
}
 
if (i == ARRAY_SIZE(hdmi_audio_clock)) {
DRM_DEBUG_KMS("HDMI audio pixel clock setting for %d not found, falling back to defaults\n", mode->clock);
i = 1;
}
 
DRM_DEBUG_KMS("Configuring HDMI audio for pixel clock %d (0x%08x)\n",
hdmi_audio_clock[i].clock,
hdmi_audio_clock[i].config);
 
return hdmi_audio_clock[i].config;
}
 
static bool intel_eld_uptodate(struct drm_connector *connector,
int reg_eldv, uint32_t bits_eldv,
int reg_elda, uint32_t bits_elda,
int reg_edid)
{
struct drm_i915_private *dev_priv = connector->dev->dev_private;
uint8_t *eld = connector->eld;
uint32_t tmp;
int i;
 
tmp = I915_READ(reg_eldv);
tmp &= bits_eldv;
 
if (!tmp)
return false;
 
tmp = I915_READ(reg_elda);
tmp &= ~bits_elda;
I915_WRITE(reg_elda, tmp);
 
for (i = 0; i < drm_eld_size(eld) / 4; i++)
if (I915_READ(reg_edid) != *((uint32_t *)eld + i))
return false;
 
return true;
}
 
static void g4x_audio_codec_disable(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
uint32_t eldv, tmp;
 
DRM_DEBUG_KMS("Disable audio codec\n");
 
tmp = I915_READ(G4X_AUD_VID_DID);
if (tmp == INTEL_AUDIO_DEVBLC || tmp == INTEL_AUDIO_DEVCL)
eldv = G4X_ELDV_DEVCL_DEVBLC;
else
eldv = G4X_ELDV_DEVCTG;
 
/* Invalidate ELD */
tmp = I915_READ(G4X_AUD_CNTL_ST);
tmp &= ~eldv;
I915_WRITE(G4X_AUD_CNTL_ST, tmp);
}
 
static void g4x_audio_codec_enable(struct drm_connector *connector,
struct intel_encoder *encoder,
struct drm_display_mode *mode)
{
struct drm_i915_private *dev_priv = connector->dev->dev_private;
uint8_t *eld = connector->eld;
uint32_t eldv;
uint32_t tmp;
int len, i;
 
DRM_DEBUG_KMS("Enable audio codec, %u bytes ELD\n", eld[2]);
 
tmp = I915_READ(G4X_AUD_VID_DID);
if (tmp == INTEL_AUDIO_DEVBLC || tmp == INTEL_AUDIO_DEVCL)
eldv = G4X_ELDV_DEVCL_DEVBLC;
else
eldv = G4X_ELDV_DEVCTG;
 
if (intel_eld_uptodate(connector,
G4X_AUD_CNTL_ST, eldv,
G4X_AUD_CNTL_ST, G4X_ELD_ADDR_MASK,
G4X_HDMIW_HDMIEDID))
return;
 
tmp = I915_READ(G4X_AUD_CNTL_ST);
tmp &= ~(eldv | G4X_ELD_ADDR_MASK);
len = (tmp >> 9) & 0x1f; /* ELD buffer size */
I915_WRITE(G4X_AUD_CNTL_ST, tmp);
 
len = min(drm_eld_size(eld) / 4, len);
DRM_DEBUG_DRIVER("ELD size %d\n", len);
for (i = 0; i < len; i++)
I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i));
 
tmp = I915_READ(G4X_AUD_CNTL_ST);
tmp |= eldv;
I915_WRITE(G4X_AUD_CNTL_ST, tmp);
}
 
static void hsw_audio_codec_disable(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
enum pipe pipe = intel_crtc->pipe;
uint32_t tmp;
 
DRM_DEBUG_KMS("Disable audio codec on pipe %c\n", pipe_name(pipe));
 
/* Disable timestamps */
tmp = I915_READ(HSW_AUD_CFG(pipe));
tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
tmp |= AUD_CONFIG_N_PROG_ENABLE;
tmp &= ~AUD_CONFIG_UPPER_N_MASK;
tmp &= ~AUD_CONFIG_LOWER_N_MASK;
if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
tmp |= AUD_CONFIG_N_VALUE_INDEX;
I915_WRITE(HSW_AUD_CFG(pipe), tmp);
 
/* Invalidate ELD */
tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
tmp &= ~AUDIO_ELD_VALID(pipe);
tmp &= ~AUDIO_OUTPUT_ENABLE(pipe);
I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
}
 
static void hsw_audio_codec_enable(struct drm_connector *connector,
struct intel_encoder *encoder,
struct drm_display_mode *mode)
{
struct drm_i915_private *dev_priv = connector->dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
enum pipe pipe = intel_crtc->pipe;
const uint8_t *eld = connector->eld;
uint32_t tmp;
int len, i;
 
DRM_DEBUG_KMS("Enable audio codec on pipe %c, %u bytes ELD\n",
pipe_name(pipe), drm_eld_size(eld));
 
/* Enable audio presence detect, invalidate ELD */
tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
tmp |= AUDIO_OUTPUT_ENABLE(pipe);
tmp &= ~AUDIO_ELD_VALID(pipe);
I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
 
/*
* FIXME: We're supposed to wait for vblank here, but we have vblanks
* disabled during the mode set. The proper fix would be to push the
* rest of the setup into a vblank work item, queued here, but the
* infrastructure is not there yet.
*/
 
/* Reset ELD write address */
tmp = I915_READ(HSW_AUD_DIP_ELD_CTRL(pipe));
tmp &= ~IBX_ELD_ADDRESS_MASK;
I915_WRITE(HSW_AUD_DIP_ELD_CTRL(pipe), tmp);
 
/* Up to 84 bytes of hw ELD buffer */
len = min(drm_eld_size(eld), 84);
for (i = 0; i < len / 4; i++)
I915_WRITE(HSW_AUD_EDID_DATA(pipe), *((uint32_t *)eld + i));
 
/* ELD valid */
tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
tmp |= AUDIO_ELD_VALID(pipe);
I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
 
/* Enable timestamps */
tmp = I915_READ(HSW_AUD_CFG(pipe));
tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
tmp |= AUD_CONFIG_N_VALUE_INDEX;
else
tmp |= audio_config_hdmi_pixel_clock(mode);
I915_WRITE(HSW_AUD_CFG(pipe), tmp);
}
 
static void ilk_audio_codec_disable(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct intel_digital_port *intel_dig_port =
enc_to_dig_port(&encoder->base);
enum port port = intel_dig_port->port;
enum pipe pipe = intel_crtc->pipe;
uint32_t tmp, eldv;
int aud_config;
int aud_cntrl_st2;
 
DRM_DEBUG_KMS("Disable audio codec on port %c, pipe %c\n",
port_name(port), pipe_name(pipe));
 
if (HAS_PCH_IBX(dev_priv->dev)) {
aud_config = IBX_AUD_CFG(pipe);
aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
} else if (IS_VALLEYVIEW(dev_priv)) {
aud_config = VLV_AUD_CFG(pipe);
aud_cntrl_st2 = VLV_AUD_CNTL_ST2;
} else {
aud_config = CPT_AUD_CFG(pipe);
aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
}
 
/* Disable timestamps */
tmp = I915_READ(aud_config);
tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
tmp |= AUD_CONFIG_N_PROG_ENABLE;
tmp &= ~AUD_CONFIG_UPPER_N_MASK;
tmp &= ~AUD_CONFIG_LOWER_N_MASK;
if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
tmp |= AUD_CONFIG_N_VALUE_INDEX;
I915_WRITE(aud_config, tmp);
 
if (WARN_ON(!port)) {
eldv = IBX_ELD_VALID(PORT_B) | IBX_ELD_VALID(PORT_C) |
IBX_ELD_VALID(PORT_D);
} else {
eldv = IBX_ELD_VALID(port);
}
 
/* Invalidate ELD */
tmp = I915_READ(aud_cntrl_st2);
tmp &= ~eldv;
I915_WRITE(aud_cntrl_st2, tmp);
}
 
static void ilk_audio_codec_enable(struct drm_connector *connector,
struct intel_encoder *encoder,
struct drm_display_mode *mode)
{
struct drm_i915_private *dev_priv = connector->dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct intel_digital_port *intel_dig_port =
enc_to_dig_port(&encoder->base);
enum port port = intel_dig_port->port;
enum pipe pipe = intel_crtc->pipe;
uint8_t *eld = connector->eld;
uint32_t eldv;
uint32_t tmp;
int len, i;
int hdmiw_hdmiedid;
int aud_config;
int aud_cntl_st;
int aud_cntrl_st2;
 
DRM_DEBUG_KMS("Enable audio codec on port %c, pipe %c, %u bytes ELD\n",
port_name(port), pipe_name(pipe), drm_eld_size(eld));
 
/*
* FIXME: We're supposed to wait for vblank here, but we have vblanks
* disabled during the mode set. The proper fix would be to push the
* rest of the setup into a vblank work item, queued here, but the
* infrastructure is not there yet.
*/
 
if (HAS_PCH_IBX(connector->dev)) {
hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe);
aud_config = IBX_AUD_CFG(pipe);
aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
} else if (IS_VALLEYVIEW(connector->dev)) {
hdmiw_hdmiedid = VLV_HDMIW_HDMIEDID(pipe);
aud_config = VLV_AUD_CFG(pipe);
aud_cntl_st = VLV_AUD_CNTL_ST(pipe);
aud_cntrl_st2 = VLV_AUD_CNTL_ST2;
} else {
hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe);
aud_config = CPT_AUD_CFG(pipe);
aud_cntl_st = CPT_AUD_CNTL_ST(pipe);
aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
}
 
if (WARN_ON(!port)) {
eldv = IBX_ELD_VALID(PORT_B) | IBX_ELD_VALID(PORT_C) |
IBX_ELD_VALID(PORT_D);
} else {
eldv = IBX_ELD_VALID(port);
}
 
/* Invalidate ELD */
tmp = I915_READ(aud_cntrl_st2);
tmp &= ~eldv;
I915_WRITE(aud_cntrl_st2, tmp);
 
/* Reset ELD write address */
tmp = I915_READ(aud_cntl_st);
tmp &= ~IBX_ELD_ADDRESS_MASK;
I915_WRITE(aud_cntl_st, tmp);
 
/* Up to 84 bytes of hw ELD buffer */
len = min(drm_eld_size(eld), 84);
for (i = 0; i < len / 4; i++)
I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
 
/* ELD valid */
tmp = I915_READ(aud_cntrl_st2);
tmp |= eldv;
I915_WRITE(aud_cntrl_st2, tmp);
 
/* Enable timestamps */
tmp = I915_READ(aud_config);
tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
tmp |= AUD_CONFIG_N_VALUE_INDEX;
else
tmp |= audio_config_hdmi_pixel_clock(mode);
I915_WRITE(aud_config, tmp);
}
 
/**
* intel_audio_codec_enable - Enable the audio codec for HD audio
* @intel_encoder: encoder on which to enable audio
*
* The enable sequences may only be performed after enabling the transcoder and
* port, and after completed link training.
*/
void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
{
struct drm_encoder *encoder = &intel_encoder->base;
struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
struct drm_display_mode *mode = &crtc->config.adjusted_mode;
struct drm_connector *connector;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
connector = drm_select_eld(encoder, mode);
if (!connector)
return;
 
DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
connector->base.id,
connector->name,
connector->encoder->base.id,
connector->encoder->name);
 
/* ELD Conn_Type */
connector->eld[5] &= ~(3 << 2);
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
connector->eld[5] |= (1 << 2);
 
connector->eld[6] = drm_av_sync_delay(connector, mode) / 2;
 
if (dev_priv->display.audio_codec_enable)
dev_priv->display.audio_codec_enable(connector, intel_encoder, mode);
}
 
/**
* intel_audio_codec_disable - Disable the audio codec for HD audio
* @encoder: encoder on which to disable audio
*
* The disable sequences must be performed before disabling the transcoder or
* port.
*/
void intel_audio_codec_disable(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (dev_priv->display.audio_codec_disable)
dev_priv->display.audio_codec_disable(encoder);
}
 
/**
* intel_init_audio - Set up chip specific audio functions
* @dev: drm device
*/
void intel_init_audio(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (IS_G4X(dev)) {
dev_priv->display.audio_codec_enable = g4x_audio_codec_enable;
dev_priv->display.audio_codec_disable = g4x_audio_codec_disable;
} else if (IS_VALLEYVIEW(dev)) {
dev_priv->display.audio_codec_enable = ilk_audio_codec_enable;
dev_priv->display.audio_codec_disable = ilk_audio_codec_disable;
} else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8) {
dev_priv->display.audio_codec_enable = hsw_audio_codec_enable;
dev_priv->display.audio_codec_disable = hsw_audio_codec_disable;
} else if (HAS_PCH_SPLIT(dev)) {
dev_priv->display.audio_codec_enable = ilk_audio_codec_enable;
dev_priv->display.audio_codec_disable = ilk_audio_codec_disable;
}
}
/drivers/video/drm/i915/intel_bios.c
627,16 → 627,16
 
switch (edp_link_params->preemphasis) {
case EDP_PREEMPHASIS_NONE:
dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_0;
dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_0;
break;
case EDP_PREEMPHASIS_3_5dB:
dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5;
dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_1;
break;
case EDP_PREEMPHASIS_6dB:
dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_6;
dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_2;
break;
case EDP_PREEMPHASIS_9_5dB:
dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5;
dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_3;
break;
default:
DRM_DEBUG_KMS("VBT has unknown eDP pre-emphasis value %u\n",
646,16 → 646,16
 
switch (edp_link_params->vswing) {
case EDP_VSWING_0_4V:
dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_400;
dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
break;
case EDP_VSWING_0_6V:
dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_600;
dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_1;
break;
case EDP_VSWING_0_8V:
dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_800;
dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
break;
case EDP_VSWING_1_2V:
dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_1200;
dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
break;
default:
DRM_DEBUG_KMS("VBT has unknown eDP voltage swing value %u\n",
946,7 → 946,7
DRM_DEBUG_KMS("Analog port %c is also DP or TMDS compatible\n",
port_name(port));
if (is_dvi && (port == PORT_A || port == PORT_E))
DRM_DEBUG_KMS("Port %c is TMDS compabile\n", port_name(port));
DRM_DEBUG_KMS("Port %c is TMDS compatible\n", port_name(port));
if (!is_dvi && !is_dp && !is_crt)
DRM_DEBUG_KMS("Port %c is not DP/TMDS/CRT compatible\n",
port_name(port));
976,7 → 976,6
if (bdb->version >= 158) {
/* The VBT HDMI level shift values match the table we have. */
hdmi_level_shift = child->raw[7] & 0xF;
if (hdmi_level_shift < 0xC) {
DRM_DEBUG_KMS("VBT HDMI level shift for port %c: %d\n",
port_name(port),
hdmi_level_shift);
983,7 → 982,6
info->hdmi_level_shift = hdmi_level_shift;
}
}
}
 
static void parse_ddi_ports(struct drm_i915_private *dev_priv,
struct bdb_header *bdb)
1114,8 → 1112,7
struct ddi_vbt_port_info *info =
&dev_priv->vbt.ddi_port_info[port];
 
/* Recommended BSpec default: 800mV 0dB. */
info->hdmi_level_shift = 6;
info->hdmi_level_shift = HDMI_LEVEL_SHIFT_UNKNOWN;
 
info->supports_dvi = (port != PORT_A && port != PORT_E);
info->supports_hdmi = info->supports_dvi;
/drivers/video/drm/i915/intel_bios.h
46,7 → 46,7
u16 version; /**< decimal */
u16 header_size; /**< in bytes */
u16 bdb_size; /**< in bytes */
};
} __packed;
 
/* strictly speaking, this is a "skip" block, but it has interesting info */
struct vbios_data {
252,7 → 252,7
/* This one should also be safe to use anywhere, even without version
* checks. */
struct common_child_dev_config common;
};
} __packed;
 
struct bdb_general_definitions {
/* DDC GPIO */
802,7 → 802,8
 
u16 rsvd4;
 
u8 rsvd5[5];
u8 rsvd5;
u32 target_burst_mode_freq;
u32 dsi_ddr_clk;
u32 bridge_ref_clk;
 
887,12 → 888,12
u16 bl_disable_delay;
u16 panel_off_delay;
u16 panel_power_cycle_delay;
};
} __packed;
 
struct bdb_mipi_config {
struct mipi_config config[MAX_MIPI_CONFIGURATIONS];
struct mipi_pps_data pps[MAX_MIPI_CONFIGURATIONS];
};
} __packed;
 
/* Block 53 contains MIPI sequences as needed by the panel
* for enabling it. This block can be variable in size and
901,7 → 902,7
struct bdb_mipi_sequence {
u8 version;
u8 data[0];
};
} __packed;
 
/* MIPI Sequnece Block definitions */
enum mipi_seq {
/drivers/video/drm/i915/intel_crt.c
72,7 → 72,7
u32 tmp;
 
power_domain = intel_display_port_power_domain(encoder);
if (!intel_display_power_enabled(dev_priv, power_domain))
if (!intel_display_power_is_enabled(dev_priv, power_domain))
return false;
 
tmp = I915_READ(crt->adpa_reg);
775,7 → 775,7
I915_WRITE(crt->adpa_reg, adpa);
POSTING_READ(crt->adpa_reg);
 
DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa);
DRM_DEBUG_KMS("crt adpa set to 0x%x\n", adpa);
crt->force_hotplug_required = 1;
}
 
/drivers/video/drm/i915/intel_ddi.c
28,89 → 28,131
#include "i915_drv.h"
#include "intel_drv.h"
 
struct ddi_buf_trans {
u32 trans1; /* balance leg enable, de-emph level */
u32 trans2; /* vref sel, vswing */
};
 
/* HDMI/DVI modes ignore everything but the last 2 items. So we share
* them for both DP and FDI transports, allowing those ports to
* automatically adapt to HDMI connections as well
*/
static const u32 hsw_ddi_translations_dp[] = {
0x00FFFFFF, 0x0006000E, /* DP parameters */
0x00D75FFF, 0x0005000A,
0x00C30FFF, 0x00040006,
0x80AAAFFF, 0x000B0000,
0x00FFFFFF, 0x0005000A,
0x00D75FFF, 0x000C0004,
0x80C30FFF, 0x000B0000,
0x00FFFFFF, 0x00040006,
0x80D75FFF, 0x000B0000,
static const struct ddi_buf_trans hsw_ddi_translations_dp[] = {
{ 0x00FFFFFF, 0x0006000E },
{ 0x00D75FFF, 0x0005000A },
{ 0x00C30FFF, 0x00040006 },
{ 0x80AAAFFF, 0x000B0000 },
{ 0x00FFFFFF, 0x0005000A },
{ 0x00D75FFF, 0x000C0004 },
{ 0x80C30FFF, 0x000B0000 },
{ 0x00FFFFFF, 0x00040006 },
{ 0x80D75FFF, 0x000B0000 },
};
 
static const u32 hsw_ddi_translations_fdi[] = {
0x00FFFFFF, 0x0007000E, /* FDI parameters */
0x00D75FFF, 0x000F000A,
0x00C30FFF, 0x00060006,
0x00AAAFFF, 0x001E0000,
0x00FFFFFF, 0x000F000A,
0x00D75FFF, 0x00160004,
0x00C30FFF, 0x001E0000,
0x00FFFFFF, 0x00060006,
0x00D75FFF, 0x001E0000,
static const struct ddi_buf_trans hsw_ddi_translations_fdi[] = {
{ 0x00FFFFFF, 0x0007000E },
{ 0x00D75FFF, 0x000F000A },
{ 0x00C30FFF, 0x00060006 },
{ 0x00AAAFFF, 0x001E0000 },
{ 0x00FFFFFF, 0x000F000A },
{ 0x00D75FFF, 0x00160004 },
{ 0x00C30FFF, 0x001E0000 },
{ 0x00FFFFFF, 0x00060006 },
{ 0x00D75FFF, 0x001E0000 },
};
 
static const u32 hsw_ddi_translations_hdmi[] = {
/* Idx NT mV diff T mV diff db */
0x00FFFFFF, 0x0006000E, /* 0: 400 400 0 */
0x00E79FFF, 0x000E000C, /* 1: 400 500 2 */
0x00D75FFF, 0x0005000A, /* 2: 400 600 3.5 */
0x00FFFFFF, 0x0005000A, /* 3: 600 600 0 */
0x00E79FFF, 0x001D0007, /* 4: 600 750 2 */
0x00D75FFF, 0x000C0004, /* 5: 600 900 3.5 */
0x00FFFFFF, 0x00040006, /* 6: 800 800 0 */
0x80E79FFF, 0x00030002, /* 7: 800 1000 2 */
0x00FFFFFF, 0x00140005, /* 8: 850 850 0 */
0x00FFFFFF, 0x000C0004, /* 9: 900 900 0 */
0x00FFFFFF, 0x001C0003, /* 10: 950 950 0 */
0x80FFFFFF, 0x00030002, /* 11: 1000 1000 0 */
static const struct ddi_buf_trans hsw_ddi_translations_hdmi[] = {
/* Idx NT mV d T mV d db */
{ 0x00FFFFFF, 0x0006000E }, /* 0: 400 400 0 */
{ 0x00E79FFF, 0x000E000C }, /* 1: 400 500 2 */
{ 0x00D75FFF, 0x0005000A }, /* 2: 400 600 3.5 */
{ 0x00FFFFFF, 0x0005000A }, /* 3: 600 600 0 */
{ 0x00E79FFF, 0x001D0007 }, /* 4: 600 750 2 */
{ 0x00D75FFF, 0x000C0004 }, /* 5: 600 900 3.5 */
{ 0x00FFFFFF, 0x00040006 }, /* 6: 800 800 0 */
{ 0x80E79FFF, 0x00030002 }, /* 7: 800 1000 2 */
{ 0x00FFFFFF, 0x00140005 }, /* 8: 850 850 0 */
{ 0x00FFFFFF, 0x000C0004 }, /* 9: 900 900 0 */
{ 0x00FFFFFF, 0x001C0003 }, /* 10: 950 950 0 */
{ 0x80FFFFFF, 0x00030002 }, /* 11: 1000 1000 0 */
};
 
static const u32 bdw_ddi_translations_edp[] = {
0x00FFFFFF, 0x00000012, /* eDP parameters */
0x00EBAFFF, 0x00020011,
0x00C71FFF, 0x0006000F,
0x00AAAFFF, 0x000E000A,
0x00FFFFFF, 0x00020011,
0x00DB6FFF, 0x0005000F,
0x00BEEFFF, 0x000A000C,
0x00FFFFFF, 0x0005000F,
0x00DB6FFF, 0x000A000C,
0x00FFFFFF, 0x00140006 /* HDMI parameters 800mV 0dB*/
static const struct ddi_buf_trans bdw_ddi_translations_edp[] = {
{ 0x00FFFFFF, 0x00000012 },
{ 0x00EBAFFF, 0x00020011 },
{ 0x00C71FFF, 0x0006000F },
{ 0x00AAAFFF, 0x000E000A },
{ 0x00FFFFFF, 0x00020011 },
{ 0x00DB6FFF, 0x0005000F },
{ 0x00BEEFFF, 0x000A000C },
{ 0x00FFFFFF, 0x0005000F },
{ 0x00DB6FFF, 0x000A000C },
};
 
static const u32 bdw_ddi_translations_dp[] = {
0x00FFFFFF, 0x0007000E, /* DP parameters */
0x00D75FFF, 0x000E000A,
0x00BEFFFF, 0x00140006,
0x80B2CFFF, 0x001B0002,
0x00FFFFFF, 0x000E000A,
0x00D75FFF, 0x00180004,
0x80CB2FFF, 0x001B0002,
0x00F7DFFF, 0x00180004,
0x80D75FFF, 0x001B0002,
0x00FFFFFF, 0x00140006 /* HDMI parameters 800mV 0dB*/
static const struct ddi_buf_trans bdw_ddi_translations_dp[] = {
{ 0x00FFFFFF, 0x0007000E },
{ 0x00D75FFF, 0x000E000A },
{ 0x00BEFFFF, 0x00140006 },
{ 0x80B2CFFF, 0x001B0002 },
{ 0x00FFFFFF, 0x000E000A },
{ 0x00DB6FFF, 0x00160005 },
{ 0x80C71FFF, 0x001A0002 },
{ 0x00F7DFFF, 0x00180004 },
{ 0x80D75FFF, 0x001B0002 },
};
 
static const u32 bdw_ddi_translations_fdi[] = {
0x00FFFFFF, 0x0001000E, /* FDI parameters */
0x00D75FFF, 0x0004000A,
0x00C30FFF, 0x00070006,
0x00AAAFFF, 0x000C0000,
0x00FFFFFF, 0x0004000A,
0x00D75FFF, 0x00090004,
0x00C30FFF, 0x000C0000,
0x00FFFFFF, 0x00070006,
0x00D75FFF, 0x000C0000,
0x00FFFFFF, 0x00140006 /* HDMI parameters 800mV 0dB*/
static const struct ddi_buf_trans bdw_ddi_translations_fdi[] = {
{ 0x00FFFFFF, 0x0001000E },
{ 0x00D75FFF, 0x0004000A },
{ 0x00C30FFF, 0x00070006 },
{ 0x00AAAFFF, 0x000C0000 },
{ 0x00FFFFFF, 0x0004000A },
{ 0x00D75FFF, 0x00090004 },
{ 0x00C30FFF, 0x000C0000 },
{ 0x00FFFFFF, 0x00070006 },
{ 0x00D75FFF, 0x000C0000 },
};
 
static const struct ddi_buf_trans bdw_ddi_translations_hdmi[] = {
/* Idx NT mV d T mV df db */
{ 0x00FFFFFF, 0x0007000E }, /* 0: 400 400 0 */
{ 0x00D75FFF, 0x000E000A }, /* 1: 400 600 3.5 */
{ 0x00BEFFFF, 0x00140006 }, /* 2: 400 800 6 */
{ 0x00FFFFFF, 0x0009000D }, /* 3: 450 450 0 */
{ 0x00FFFFFF, 0x000E000A }, /* 4: 600 600 0 */
{ 0x00D7FFFF, 0x00140006 }, /* 5: 600 800 2.5 */
{ 0x80CB2FFF, 0x001B0002 }, /* 6: 600 1000 4.5 */
{ 0x00FFFFFF, 0x00140006 }, /* 7: 800 800 0 */
{ 0x80E79FFF, 0x001B0002 }, /* 8: 800 1000 2 */
{ 0x80FFFFFF, 0x001B0002 }, /* 9: 1000 1000 0 */
};
 
static const struct ddi_buf_trans skl_ddi_translations_dp[] = {
{ 0x00000018, 0x000000a0 },
{ 0x00004014, 0x00000098 },
{ 0x00006012, 0x00000088 },
{ 0x00008010, 0x00000080 },
{ 0x00000018, 0x00000098 },
{ 0x00004014, 0x00000088 },
{ 0x00006012, 0x00000080 },
{ 0x00000018, 0x00000088 },
{ 0x00004014, 0x00000080 },
};
 
static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = {
/* Idx NT mV T mV db */
{ 0x00000018, 0x000000a0 }, /* 0: 400 400 0 */
{ 0x00004014, 0x00000098 }, /* 1: 400 600 3.5 */
{ 0x00006012, 0x00000088 }, /* 2: 400 800 6 */
{ 0x00000018, 0x0000003c }, /* 3: 450 450 0 */
{ 0x00000018, 0x00000098 }, /* 4: 600 600 0 */
{ 0x00003015, 0x00000088 }, /* 5: 600 800 2.5 */
{ 0x00005013, 0x00000080 }, /* 6: 600 1000 4.5 */
{ 0x00000018, 0x00000088 }, /* 7: 800 800 0 */
{ 0x00000096, 0x00000080 }, /* 8: 800 1000 2 */
{ 0x00000018, 0x00000080 }, /* 9: 1200 1200 0 */
};
 
enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
{
struct drm_encoder *encoder = &intel_encoder->base;
145,26 → 187,43
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg;
int i;
int i, n_hdmi_entries, hdmi_800mV_0dB;
int hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
const u32 *ddi_translations_fdi;
const u32 *ddi_translations_dp;
const u32 *ddi_translations_edp;
const u32 *ddi_translations;
const struct ddi_buf_trans *ddi_translations_fdi;
const struct ddi_buf_trans *ddi_translations_dp;
const struct ddi_buf_trans *ddi_translations_edp;
const struct ddi_buf_trans *ddi_translations_hdmi;
const struct ddi_buf_trans *ddi_translations;
 
if (IS_BROADWELL(dev)) {
if (IS_SKYLAKE(dev)) {
ddi_translations_fdi = NULL;
ddi_translations_dp = skl_ddi_translations_dp;
ddi_translations_edp = skl_ddi_translations_dp;
ddi_translations_hdmi = skl_ddi_translations_hdmi;
n_hdmi_entries = ARRAY_SIZE(skl_ddi_translations_hdmi);
hdmi_800mV_0dB = 7;
} else if (IS_BROADWELL(dev)) {
ddi_translations_fdi = bdw_ddi_translations_fdi;
ddi_translations_dp = bdw_ddi_translations_dp;
ddi_translations_edp = bdw_ddi_translations_edp;
ddi_translations_hdmi = bdw_ddi_translations_hdmi;
n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
hdmi_800mV_0dB = 7;
} else if (IS_HASWELL(dev)) {
ddi_translations_fdi = hsw_ddi_translations_fdi;
ddi_translations_dp = hsw_ddi_translations_dp;
ddi_translations_edp = hsw_ddi_translations_dp;
ddi_translations_hdmi = hsw_ddi_translations_hdmi;
n_hdmi_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi);
hdmi_800mV_0dB = 6;
} else {
WARN(1, "ddi translation table missing\n");
ddi_translations_edp = bdw_ddi_translations_dp;
ddi_translations_fdi = bdw_ddi_translations_fdi;
ddi_translations_dp = bdw_ddi_translations_dp;
ddi_translations_hdmi = bdw_ddi_translations_hdmi;
n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
hdmi_800mV_0dB = 7;
}
 
switch (port) {
182,7 → 241,10
ddi_translations = ddi_translations_dp;
break;
case PORT_E:
if (ddi_translations_fdi)
ddi_translations = ddi_translations_fdi;
else
ddi_translations = ddi_translations_dp;
break;
default:
BUG();
190,15 → 252,23
 
for (i = 0, reg = DDI_BUF_TRANS(port);
i < ARRAY_SIZE(hsw_ddi_translations_fdi); i++) {
I915_WRITE(reg, ddi_translations[i]);
I915_WRITE(reg, ddi_translations[i].trans1);
reg += 4;
I915_WRITE(reg, ddi_translations[i].trans2);
reg += 4;
}
 
/* Choose a good default if VBT is badly populated */
if (hdmi_level == HDMI_LEVEL_SHIFT_UNKNOWN ||
hdmi_level >= n_hdmi_entries)
hdmi_level = hdmi_800mV_0dB;
 
/* Entry 9 is for HDMI: */
for (i = 0; i < 2; i++) {
I915_WRITE(reg, hsw_ddi_translations_hdmi[hdmi_level * 2 + i]);
I915_WRITE(reg, ddi_translations_hdmi[hdmi_level].trans1);
reg += 4;
I915_WRITE(reg, ddi_translations_hdmi[hdmi_level].trans2);
reg += 4;
}
}
 
/* Program DDI buffers translations for DP. By default, program ports A-D in DP
* mode and port E for FDI.
214,18 → 284,6
intel_prepare_ddi_buffers(dev, port);
}
 
static const long hsw_ddi_buf_ctl_values[] = {
DDI_BUF_EMP_400MV_0DB_HSW,
DDI_BUF_EMP_400MV_3_5DB_HSW,
DDI_BUF_EMP_400MV_6DB_HSW,
DDI_BUF_EMP_400MV_9_5DB_HSW,
DDI_BUF_EMP_600MV_0DB_HSW,
DDI_BUF_EMP_600MV_3_5DB_HSW,
DDI_BUF_EMP_600MV_6DB_HSW,
DDI_BUF_EMP_800MV_0DB_HSW,
DDI_BUF_EMP_800MV_3_5DB_HSW
};
 
static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
enum port port)
{
285,7 → 343,7
 
/* Start the training iterating through available voltages and emphasis,
* testing each value twice. */
for (i = 0; i < ARRAY_SIZE(hsw_ddi_buf_ctl_values) * 2; i++) {
for (i = 0; i < ARRAY_SIZE(hsw_ddi_translations_fdi) * 2; i++) {
/* Configure DP_TP_CTL with auto-training */
I915_WRITE(DP_TP_CTL(PORT_E),
DP_TP_CTL_FDI_AUTOTRAIN |
300,7 → 358,7
I915_WRITE(DDI_BUF_CTL(PORT_E),
DDI_BUF_CTL_ENABLE |
((intel_crtc->config.fdi_lanes - 1) << 1) |
hsw_ddi_buf_ctl_values[i / 2]);
DDI_BUF_TRANS_SELECT(i / 2));
POSTING_READ(DDI_BUF_CTL(PORT_E));
 
udelay(600);
375,7 → 433,7
enc_to_dig_port(&encoder->base);
 
intel_dp->DP = intel_dig_port->saved_port_bits |
DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW;
DDI_BUF_CTL_ENABLE | DDI_BUF_TRANS_SELECT(0);
intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count);
 
}
401,8 → 459,29
return ret;
}
 
static struct intel_encoder *
intel_ddi_get_crtc_new_encoder(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct intel_encoder *intel_encoder, *ret = NULL;
int num_encoders = 0;
 
for_each_intel_encoder(dev, intel_encoder) {
if (intel_encoder->new_crtc == crtc) {
ret = intel_encoder;
num_encoders++;
}
}
 
WARN(num_encoders != 1, "%d encoders on crtc for pipe %c\n", num_encoders,
pipe_name(crtc->pipe));
 
BUG_ON(ret == NULL);
return ret;
}
 
#define LC_FREQ 2700
#define LC_FREQ_2K (LC_FREQ * 2000)
#define LC_FREQ_2K U64_C(LC_FREQ * 2000)
 
#define P_MIN 2
#define P_MAX 64
414,7 → 493,11
#define VCO_MIN 2400
#define VCO_MAX 4800
 
#define ABS_DIFF(a, b) ((a > b) ? (a - b) : (b - a))
#define abs_diff(a, b) ({ \
typeof(a) __a = (a); \
typeof(b) __b = (b); \
(void) (&__a == &__b); \
__a > __b ? (__a - __b) : (__b - __a); })
 
struct wrpll_rnp {
unsigned p, n2, r2;
524,9 → 607,9
*/
a = freq2k * budget * p * r2;
b = freq2k * budget * best->p * best->r2;
diff = ABS_DIFF((freq2k * p * r2), (LC_FREQ_2K * n2));
diff_best = ABS_DIFF((freq2k * best->p * best->r2),
(LC_FREQ_2K * best->n2));
diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
diff_best = abs_diff(freq2k * best->p * best->r2,
LC_FREQ_2K * best->n2);
c = 1000000 * diff;
d = 1000000 * diff_best;
 
587,11 → 670,116
return (refclk * n * 100) / (p * r);
}
 
void intel_ddi_clock_get(struct intel_encoder *encoder,
static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv,
uint32_t dpll)
{
uint32_t cfgcr1_reg, cfgcr2_reg;
uint32_t cfgcr1_val, cfgcr2_val;
uint32_t p0, p1, p2, dco_freq;
 
cfgcr1_reg = GET_CFG_CR1_REG(dpll);
cfgcr2_reg = GET_CFG_CR2_REG(dpll);
 
cfgcr1_val = I915_READ(cfgcr1_reg);
cfgcr2_val = I915_READ(cfgcr2_reg);
 
p0 = cfgcr2_val & DPLL_CFGCR2_PDIV_MASK;
p2 = cfgcr2_val & DPLL_CFGCR2_KDIV_MASK;
 
if (cfgcr2_val & DPLL_CFGCR2_QDIV_MODE(1))
p1 = (cfgcr2_val & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
else
p1 = 1;
 
 
switch (p0) {
case DPLL_CFGCR2_PDIV_1:
p0 = 1;
break;
case DPLL_CFGCR2_PDIV_2:
p0 = 2;
break;
case DPLL_CFGCR2_PDIV_3:
p0 = 3;
break;
case DPLL_CFGCR2_PDIV_7:
p0 = 7;
break;
}
 
switch (p2) {
case DPLL_CFGCR2_KDIV_5:
p2 = 5;
break;
case DPLL_CFGCR2_KDIV_2:
p2 = 2;
break;
case DPLL_CFGCR2_KDIV_3:
p2 = 3;
break;
case DPLL_CFGCR2_KDIV_1:
p2 = 1;
break;
}
 
dco_freq = (cfgcr1_val & DPLL_CFGCR1_DCO_INTEGER_MASK) * 24 * 1000;
 
dco_freq += (((cfgcr1_val & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) * 24 *
1000) / 0x8000;
 
return dco_freq / (p0 * p1 * p2 * 5);
}
 
 
static void skl_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
int link_clock = 0;
uint32_t dpll_ctl1, dpll;
 
dpll = pipe_config->ddi_pll_sel;
 
dpll_ctl1 = I915_READ(DPLL_CTRL1);
 
if (dpll_ctl1 & DPLL_CTRL1_HDMI_MODE(dpll)) {
link_clock = skl_calc_wrpll_link(dev_priv, dpll);
} else {
link_clock = dpll_ctl1 & DPLL_CRTL1_LINK_RATE_MASK(dpll);
link_clock >>= DPLL_CRTL1_LINK_RATE_SHIFT(dpll);
 
switch (link_clock) {
case DPLL_CRTL1_LINK_RATE_810:
link_clock = 81000;
break;
case DPLL_CRTL1_LINK_RATE_1350:
link_clock = 135000;
break;
case DPLL_CRTL1_LINK_RATE_2700:
link_clock = 270000;
break;
default:
WARN(1, "Unsupported link rate\n");
break;
}
link_clock *= 2;
}
 
pipe_config->port_clock = link_clock;
 
if (pipe_config->has_dp_encoder)
pipe_config->adjusted_mode.crtc_clock =
intel_dotclock_calculate(pipe_config->port_clock,
&pipe_config->dp_m_n);
else
pipe_config->adjusted_mode.crtc_clock = pipe_config->port_clock;
}
 
static void hsw_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
int link_clock = 0;
u32 val, pll;
 
val = pipe_config->ddi_pll_sel;
643,8 → 831,14
pipe_config->adjusted_mode.crtc_clock = pipe_config->port_clock;
}
 
void intel_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
{
hsw_ddi_clock_get(encoder, pipe_config);
}
 
static void
intel_ddi_calculate_wrpll(int clock /* in Hz */,
hsw_ddi_calculate_wrpll(int clock /* in Hz */,
unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
{
uint64_t freq2k;
708,33 → 902,23
*r2_out = best.r2;
}
 
/*
* Tries to find a PLL for the CRTC. If it finds, it increases the refcount and
* stores it in intel_crtc->ddi_pll_sel, so other mode sets won't be able to
* steal the selected PLL. You need to call intel_ddi_pll_enable to actually
* enable the PLL.
*/
bool intel_ddi_pll_select(struct intel_crtc *intel_crtc)
static bool
hsw_ddi_pll_select(struct intel_crtc *intel_crtc,
struct intel_encoder *intel_encoder,
int clock)
{
struct drm_crtc *crtc = &intel_crtc->base;
struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
int type = intel_encoder->type;
int clock = intel_crtc->config.port_clock;
 
intel_put_shared_dpll(intel_crtc);
 
if (type == INTEL_OUTPUT_HDMI) {
if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
struct intel_shared_dpll *pll;
uint32_t val;
unsigned p, n2, r2;
 
intel_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
hsw_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
 
val = WRPLL_PLL_ENABLE | WRPLL_PLL_LCPLL |
WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
WRPLL_DIVIDER_POST(p);
 
intel_crtc->config.dpll_hw_state.wrpll = val;
intel_crtc->new_config->dpll_hw_state.wrpll = val;
 
pll = intel_get_shared_dpll(intel_crtc);
if (pll == NULL) {
743,12 → 927,255
return false;
}
 
intel_crtc->config.ddi_pll_sel = PORT_CLK_SEL_WRPLL(pll->id);
intel_crtc->new_config->ddi_pll_sel = PORT_CLK_SEL_WRPLL(pll->id);
}
 
return true;
}
 
struct skl_wrpll_params {
uint32_t dco_fraction;
uint32_t dco_integer;
uint32_t qdiv_ratio;
uint32_t qdiv_mode;
uint32_t kdiv;
uint32_t pdiv;
uint32_t central_freq;
};
 
static void
skl_ddi_calculate_wrpll(int clock /* in Hz */,
struct skl_wrpll_params *wrpll_params)
{
uint64_t afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
uint64_t dco_central_freq[3] = {8400000000ULL,
9000000000ULL,
9600000000ULL};
uint32_t min_dco_deviation = 400;
uint32_t min_dco_index = 3;
uint32_t P0[4] = {1, 2, 3, 7};
uint32_t P2[4] = {1, 2, 3, 5};
bool found = false;
uint32_t candidate_p = 0;
uint32_t candidate_p0[3] = {0}, candidate_p1[3] = {0};
uint32_t candidate_p2[3] = {0};
uint32_t dco_central_freq_deviation[3];
uint32_t i, P1, k, dco_count;
bool retry_with_odd = false;
uint64_t dco_freq;
 
/* Determine P0, P1 or P2 */
for (dco_count = 0; dco_count < 3; dco_count++) {
found = false;
candidate_p =
div64_u64(dco_central_freq[dco_count], afe_clock);
if (retry_with_odd == false)
candidate_p = (candidate_p % 2 == 0 ?
candidate_p : candidate_p + 1);
 
for (P1 = 1; P1 < candidate_p; P1++) {
for (i = 0; i < 4; i++) {
if (!(P0[i] != 1 || P1 == 1))
continue;
 
for (k = 0; k < 4; k++) {
if (P1 != 1 && P2[k] != 2)
continue;
 
if (candidate_p == P0[i] * P1 * P2[k]) {
/* Found possible P0, P1, P2 */
found = true;
candidate_p0[dco_count] = P0[i];
candidate_p1[dco_count] = P1;
candidate_p2[dco_count] = P2[k];
goto found;
}
 
}
}
}
 
found:
if (found) {
dco_central_freq_deviation[dco_count] =
div64_u64(10000 *
abs_diff((candidate_p * afe_clock),
dco_central_freq[dco_count]),
dco_central_freq[dco_count]);
 
if (dco_central_freq_deviation[dco_count] <
min_dco_deviation) {
min_dco_deviation =
dco_central_freq_deviation[dco_count];
min_dco_index = dco_count;
}
}
 
if (min_dco_index > 2 && dco_count == 2) {
retry_with_odd = true;
dco_count = 0;
}
}
 
if (min_dco_index > 2) {
WARN(1, "No valid values found for the given pixel clock\n");
} else {
wrpll_params->central_freq = dco_central_freq[min_dco_index];
 
switch (dco_central_freq[min_dco_index]) {
case 9600000000ULL:
wrpll_params->central_freq = 0;
break;
case 9000000000ULL:
wrpll_params->central_freq = 1;
break;
case 8400000000ULL:
wrpll_params->central_freq = 3;
}
 
switch (candidate_p0[min_dco_index]) {
case 1:
wrpll_params->pdiv = 0;
break;
case 2:
wrpll_params->pdiv = 1;
break;
case 3:
wrpll_params->pdiv = 2;
break;
case 7:
wrpll_params->pdiv = 4;
break;
default:
WARN(1, "Incorrect PDiv\n");
}
 
switch (candidate_p2[min_dco_index]) {
case 5:
wrpll_params->kdiv = 0;
break;
case 2:
wrpll_params->kdiv = 1;
break;
case 3:
wrpll_params->kdiv = 2;
break;
case 1:
wrpll_params->kdiv = 3;
break;
default:
WARN(1, "Incorrect KDiv\n");
}
 
wrpll_params->qdiv_ratio = candidate_p1[min_dco_index];
wrpll_params->qdiv_mode =
(wrpll_params->qdiv_ratio == 1) ? 0 : 1;
 
dco_freq = candidate_p0[min_dco_index] *
candidate_p1[min_dco_index] *
candidate_p2[min_dco_index] * afe_clock;
 
/*
* Intermediate values are in Hz.
* Divide by MHz to match bsepc
*/
wrpll_params->dco_integer = div_u64(dco_freq, (24 * MHz(1)));
wrpll_params->dco_fraction =
div_u64(((div_u64(dco_freq, 24) -
wrpll_params->dco_integer * MHz(1)) * 0x8000), MHz(1));
 
}
}
 
 
static bool
skl_ddi_pll_select(struct intel_crtc *intel_crtc,
struct intel_encoder *intel_encoder,
int clock)
{
struct intel_shared_dpll *pll;
uint32_t ctrl1, cfgcr1, cfgcr2;
 
/*
* See comment in intel_dpll_hw_state to understand why we always use 0
* as the DPLL id in this function.
*/
 
ctrl1 = DPLL_CTRL1_OVERRIDE(0);
 
if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
struct skl_wrpll_params wrpll_params = { 0, };
 
ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
 
skl_ddi_calculate_wrpll(clock * 1000, &wrpll_params);
 
cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
wrpll_params.dco_integer;
 
cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
wrpll_params.central_freq;
} else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
struct drm_encoder *encoder = &intel_encoder->base;
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 
switch (intel_dp->link_bw) {
case DP_LINK_BW_1_62:
ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_810, 0);
break;
case DP_LINK_BW_2_7:
ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1350, 0);
break;
case DP_LINK_BW_5_4:
ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2700, 0);
break;
}
 
cfgcr1 = cfgcr2 = 0;
} else /* eDP */
return true;
 
intel_crtc->new_config->dpll_hw_state.ctrl1 = ctrl1;
intel_crtc->new_config->dpll_hw_state.cfgcr1 = cfgcr1;
intel_crtc->new_config->dpll_hw_state.cfgcr2 = cfgcr2;
 
pll = intel_get_shared_dpll(intel_crtc);
if (pll == NULL) {
DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
pipe_name(intel_crtc->pipe));
return false;
}
 
/* shared DPLL id 0 is DPLL 1 */
intel_crtc->new_config->ddi_pll_sel = pll->id + 1;
 
return true;
}
 
/*
* Tries to find a *shared* PLL for the CRTC and store it in
* intel_crtc->ddi_pll_sel.
*
* For private DPLLs, compute_config() should do the selection for us. This
* function should be folded into compute_config() eventually.
*/
bool intel_ddi_pll_select(struct intel_crtc *intel_crtc)
{
struct drm_device *dev = intel_crtc->base.dev;
struct intel_encoder *intel_encoder =
intel_ddi_get_crtc_new_encoder(intel_crtc);
int clock = intel_crtc->new_config->port_clock;
 
if (IS_SKYLAKE(dev))
return skl_ddi_pll_select(intel_crtc, intel_encoder, clock);
else
return hsw_ddi_pll_select(intel_crtc, intel_encoder, clock);
}
 
void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->dev->dev_private;
921,7 → 1348,7
uint32_t tmp;
 
power_domain = intel_display_port_power_domain(intel_encoder);
if (!intel_display_power_enabled(dev_priv, power_domain))
if (!intel_display_power_is_enabled(dev_priv, power_domain))
return false;
 
if (!intel_encoder->get_hw_state(intel_encoder, &pipe))
967,7 → 1394,7
int i;
 
power_domain = intel_display_port_power_domain(encoder);
if (!intel_display_power_enabled(dev_priv, power_domain))
if (!intel_display_power_is_enabled(dev_priv, power_domain))
return false;
 
tmp = I915_READ(DDI_BUF_CTL(port));
1038,27 → 1465,53
static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
{
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
enum port port = intel_ddi_get_encoder_port(intel_encoder);
int type = intel_encoder->type;
 
if (crtc->config.has_audio) {
DRM_DEBUG_DRIVER("Audio on pipe %c on DDI\n",
pipe_name(crtc->pipe));
 
/* write eld */
DRM_DEBUG_DRIVER("DDI audio: write eld information\n");
intel_write_eld(encoder, &crtc->config.adjusted_mode);
}
 
if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
intel_edp_panel_on(intel_dp);
}
 
if (IS_SKYLAKE(dev)) {
uint32_t dpll = crtc->config.ddi_pll_sel;
uint32_t val;
 
/*
* DPLL0 is used for eDP and is the only "private" DPLL (as
* opposed to shared) on SKL
*/
if (type == INTEL_OUTPUT_EDP) {
WARN_ON(dpll != SKL_DPLL0);
 
val = I915_READ(DPLL_CTRL1);
 
val &= ~(DPLL_CTRL1_HDMI_MODE(dpll) |
DPLL_CTRL1_SSC(dpll) |
DPLL_CRTL1_LINK_RATE_MASK(dpll));
val |= crtc->config.dpll_hw_state.ctrl1 << (dpll * 6);
 
I915_WRITE(DPLL_CTRL1, val);
POSTING_READ(DPLL_CTRL1);
}
 
/* DDI -> PLL mapping */
val = I915_READ(DPLL_CTRL2);
 
val &= ~(DPLL_CTRL2_DDI_CLK_OFF(port) |
DPLL_CTRL2_DDI_CLK_SEL_MASK(port));
val |= (DPLL_CTRL2_DDI_CLK_SEL(dpll, port) |
DPLL_CTRL2_DDI_SEL_OVERRIDE(port));
 
I915_WRITE(DPLL_CTRL2, val);
 
} else {
WARN_ON(crtc->config.ddi_pll_sel == PORT_CLK_SEL_NONE);
I915_WRITE(PORT_CLK_SEL(port), crtc->config.ddi_pll_sel);
}
 
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1068,7 → 1521,7
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
intel_dp_start_link_train(intel_dp);
intel_dp_complete_link_train(intel_dp);
if (port != PORT_A)
if (port != PORT_A || INTEL_INFO(dev)->gen >= 9)
intel_dp_stop_link_train(intel_dp);
} else if (type == INTEL_OUTPUT_HDMI) {
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
1082,7 → 1535,8
static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
{
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum port port = intel_ddi_get_encoder_port(intel_encoder);
int type = intel_encoder->type;
uint32_t val;
1110,6 → 1564,10
intel_edp_panel_off(intel_dp);
}
 
if (IS_SKYLAKE(dev))
I915_WRITE(DPLL_CTRL2, (I915_READ(DPLL_CTRL2) |
DPLL_CTRL2_DDI_CLK_OFF(port)));
else
I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
}
 
1118,12 → 1576,10
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum port port = intel_ddi_get_encoder_port(intel_encoder);
int type = intel_encoder->type;
uint32_t tmp;
 
if (type == INTEL_OUTPUT_HDMI) {
struct intel_digital_port *intel_dig_port =
1139,18 → 1595,16
} else if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 
if (port == PORT_A)
if (port == PORT_A && INTEL_INFO(dev)->gen < 9)
intel_dp_stop_link_train(intel_dp);
 
intel_edp_backlight_on(intel_dp);
intel_edp_psr_enable(intel_dp);
intel_psr_enable(intel_dp);
}
 
if (intel_crtc->config.has_audio) {
intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
tmp |= ((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) << (pipe * 4));
I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
intel_audio_codec_enable(intel_encoder);
}
}
 
1159,19 → 1613,12
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int type = intel_encoder->type;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t tmp;
 
/* We can't touch HSW_AUD_PIN_ELD_CP_VLD uncionditionally because this
* register is part of the power well on Haswell. */
if (intel_crtc->config.has_audio) {
tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
tmp &= ~((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) <<
(pipe * 4));
I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
intel_audio_codec_disable(intel_encoder);
intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
}
 
1178,42 → 1625,114
if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 
intel_edp_psr_disable(intel_dp);
intel_psr_disable(intel_dp);
intel_edp_backlight_off(intel_dp);
}
}
 
int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
static int skl_get_cdclk_freq(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
uint32_t lcpll1 = I915_READ(LCPLL1_CTL);
uint32_t cdctl = I915_READ(CDCLK_CTL);
uint32_t linkrate;
 
if (!(lcpll1 & LCPLL_PLL_ENABLE)) {
WARN(1, "LCPLL1 not enabled\n");
return 24000; /* 24MHz is the cd freq with NSSC ref */
}
 
if ((cdctl & CDCLK_FREQ_SEL_MASK) == CDCLK_FREQ_540)
return 540000;
 
linkrate = (I915_READ(DPLL_CTRL1) &
DPLL_CRTL1_LINK_RATE_MASK(SKL_DPLL0)) >> 1;
 
if (linkrate == DPLL_CRTL1_LINK_RATE_2160 ||
linkrate == DPLL_CRTL1_LINK_RATE_1080) {
/* vco 8640 */
switch (cdctl & CDCLK_FREQ_SEL_MASK) {
case CDCLK_FREQ_450_432:
return 432000;
case CDCLK_FREQ_337_308:
return 308570;
case CDCLK_FREQ_675_617:
return 617140;
default:
WARN(1, "Unknown cd freq selection\n");
}
} else {
/* vco 8100 */
switch (cdctl & CDCLK_FREQ_SEL_MASK) {
case CDCLK_FREQ_450_432:
return 450000;
case CDCLK_FREQ_337_308:
return 337500;
case CDCLK_FREQ_675_617:
return 675000;
default:
WARN(1, "Unknown cd freq selection\n");
}
}
 
/* error case, do as if DPLL0 isn't enabled */
return 24000;
}
 
static int bdw_get_cdclk_freq(struct drm_i915_private *dev_priv)
{
uint32_t lcpll = I915_READ(LCPLL_CTL);
uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
 
if (lcpll & LCPLL_CD_SOURCE_FCLK) {
if (lcpll & LCPLL_CD_SOURCE_FCLK)
return 800000;
} else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT) {
else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
return 450000;
} else if (freq == LCPLL_CLK_FREQ_450) {
else if (freq == LCPLL_CLK_FREQ_450)
return 450000;
} else if (IS_HASWELL(dev)) {
if (IS_ULT(dev))
return 337500;
else
else if (freq == LCPLL_CLK_FREQ_54O_BDW)
return 540000;
} else {
if (freq == LCPLL_CLK_FREQ_54O_BDW)
return 540000;
else if (freq == LCPLL_CLK_FREQ_337_5_BDW)
return 337500;
else
return 675000;
}
 
static int hsw_get_cdclk_freq(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
uint32_t lcpll = I915_READ(LCPLL_CTL);
uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
 
if (lcpll & LCPLL_CD_SOURCE_FCLK)
return 800000;
else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
return 450000;
else if (freq == LCPLL_CLK_FREQ_450)
return 450000;
else if (IS_HSW_ULT(dev))
return 337500;
else
return 540000;
}
 
int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
 
if (IS_SKYLAKE(dev))
return skl_get_cdclk_freq(dev_priv);
 
if (IS_BROADWELL(dev))
return bdw_get_cdclk_freq(dev_priv);
 
/* Haswell */
return hsw_get_cdclk_freq(dev_priv);
}
 
static void hsw_ddi_pll_enable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
I915_WRITE(WRPLL_CTL(pll->id), pll->hw_state.wrpll);
I915_WRITE(WRPLL_CTL(pll->id), pll->config.hw_state.wrpll);
POSTING_READ(WRPLL_CTL(pll->id));
udelay(20);
}
1234,7 → 1753,7
{
uint32_t val;
 
if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_PLLS))
if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
return false;
 
val = I915_READ(WRPLL_CTL(pll->id));
1248,10 → 1767,8
"WRPLL 2",
};
 
void intel_ddi_pll_init(struct drm_device *dev)
static void hsw_shared_dplls_init(struct drm_i915_private *dev_priv)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t val = I915_READ(LCPLL_CTL);
int i;
 
dev_priv->num_shared_dpll = 2;
1264,15 → 1781,152
dev_priv->shared_dplls[i].get_hw_state =
hsw_ddi_pll_get_hw_state;
}
}
 
/* The LCPLL register should be turned on by the BIOS. For now let's
* just check its state and print errors in case something is wrong.
* Don't even try to turn it on.
*/
static const char * const skl_ddi_pll_names[] = {
"DPLL 1",
"DPLL 2",
"DPLL 3",
};
 
struct skl_dpll_regs {
u32 ctl, cfgcr1, cfgcr2;
};
 
/* this array is indexed by the *shared* pll id */
static const struct skl_dpll_regs skl_dpll_regs[3] = {
{
/* DPLL 1 */
.ctl = LCPLL2_CTL,
.cfgcr1 = DPLL1_CFGCR1,
.cfgcr2 = DPLL1_CFGCR2,
},
{
/* DPLL 2 */
.ctl = WRPLL_CTL1,
.cfgcr1 = DPLL2_CFGCR1,
.cfgcr2 = DPLL2_CFGCR2,
},
{
/* DPLL 3 */
.ctl = WRPLL_CTL2,
.cfgcr1 = DPLL3_CFGCR1,
.cfgcr2 = DPLL3_CFGCR2,
},
};
 
static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
uint32_t val;
unsigned int dpll;
const struct skl_dpll_regs *regs = skl_dpll_regs;
 
/* DPLL0 is not part of the shared DPLLs, so pll->id is 0 for DPLL1 */
dpll = pll->id + 1;
 
val = I915_READ(DPLL_CTRL1);
 
val &= ~(DPLL_CTRL1_HDMI_MODE(dpll) | DPLL_CTRL1_SSC(dpll) |
DPLL_CRTL1_LINK_RATE_MASK(dpll));
val |= pll->config.hw_state.ctrl1 << (dpll * 6);
 
I915_WRITE(DPLL_CTRL1, val);
POSTING_READ(DPLL_CTRL1);
 
I915_WRITE(regs[pll->id].cfgcr1, pll->config.hw_state.cfgcr1);
I915_WRITE(regs[pll->id].cfgcr2, pll->config.hw_state.cfgcr2);
POSTING_READ(regs[pll->id].cfgcr1);
POSTING_READ(regs[pll->id].cfgcr2);
 
/* the enable bit is always bit 31 */
I915_WRITE(regs[pll->id].ctl,
I915_READ(regs[pll->id].ctl) | LCPLL_PLL_ENABLE);
 
if (wait_for(I915_READ(DPLL_STATUS) & DPLL_LOCK(dpll), 5))
DRM_ERROR("DPLL %d not locked\n", dpll);
}
 
static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
const struct skl_dpll_regs *regs = skl_dpll_regs;
 
/* the enable bit is always bit 31 */
I915_WRITE(regs[pll->id].ctl,
I915_READ(regs[pll->id].ctl) & ~LCPLL_PLL_ENABLE);
POSTING_READ(regs[pll->id].ctl);
}
 
static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
struct intel_dpll_hw_state *hw_state)
{
uint32_t val;
unsigned int dpll;
const struct skl_dpll_regs *regs = skl_dpll_regs;
 
if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
return false;
 
/* DPLL0 is not part of the shared DPLLs, so pll->id is 0 for DPLL1 */
dpll = pll->id + 1;
 
val = I915_READ(regs[pll->id].ctl);
if (!(val & LCPLL_PLL_ENABLE))
return false;
 
val = I915_READ(DPLL_CTRL1);
hw_state->ctrl1 = (val >> (dpll * 6)) & 0x3f;
 
/* avoid reading back stale values if HDMI mode is not enabled */
if (val & DPLL_CTRL1_HDMI_MODE(dpll)) {
hw_state->cfgcr1 = I915_READ(regs[pll->id].cfgcr1);
hw_state->cfgcr2 = I915_READ(regs[pll->id].cfgcr2);
}
 
return true;
}
 
static void skl_shared_dplls_init(struct drm_i915_private *dev_priv)
{
int i;
 
dev_priv->num_shared_dpll = 3;
 
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
dev_priv->shared_dplls[i].id = i;
dev_priv->shared_dplls[i].name = skl_ddi_pll_names[i];
dev_priv->shared_dplls[i].disable = skl_ddi_pll_disable;
dev_priv->shared_dplls[i].enable = skl_ddi_pll_enable;
dev_priv->shared_dplls[i].get_hw_state =
skl_ddi_pll_get_hw_state;
}
}
 
void intel_ddi_pll_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t val = I915_READ(LCPLL_CTL);
 
if (IS_SKYLAKE(dev))
skl_shared_dplls_init(dev_priv);
else
hsw_shared_dplls_init(dev_priv);
 
DRM_DEBUG_KMS("CDCLK running at %dKHz\n",
intel_ddi_get_cdclk_freq(dev_priv));
 
if (IS_SKYLAKE(dev)) {
if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE))
DRM_ERROR("LCPLL1 is disabled\n");
} else {
/*
* The LCPLL register should be turned on by the BIOS. For now
* let's just check its state and print errors in case
* something is wrong. Don't even try to turn it on.
*/
 
if (val & LCPLL_CD_SOURCE_FCLK)
DRM_ERROR("CDCLK source is not LCPLL\n");
 
1279,6 → 1933,7
if (val & LCPLL_PLL_DISABLE)
DRM_ERROR("LCPLL is disabled\n");
}
}
 
void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder)
{
1372,7 → 2027,9
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
struct intel_hdmi *intel_hdmi;
u32 temp, flags = 0;
struct drm_device *dev = dev_priv->dev;
 
temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
if (temp & TRANS_DDI_PHSYNC)
1406,6 → 2063,11
switch (temp & TRANS_DDI_MODE_SELECT_MASK) {
case TRANS_DDI_MODE_SELECT_HDMI:
pipe_config->has_hdmi_sink = true;
intel_hdmi = enc_to_intel_hdmi(&encoder->base);
 
if (intel_hdmi->infoframe_enabled(&encoder->base))
pipe_config->has_infoframe = true;
break;
case TRANS_DDI_MODE_SELECT_DVI:
case TRANS_DDI_MODE_SELECT_FDI:
break;
1418,9 → 2080,9
break;
}
 
if (intel_display_power_enabled(dev_priv, POWER_DOMAIN_AUDIO)) {
if (intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_AUDIO)) {
temp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
if (temp & (AUDIO_OUTPUT_ENABLE_A << (intel_crtc->pipe * 4)))
if (temp & AUDIO_OUTPUT_ENABLE(intel_crtc->pipe))
pipe_config->has_audio = true;
}
 
1444,7 → 2106,10
dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
}
 
intel_ddi_clock_get(encoder, pipe_config);
if (INTEL_INFO(dev)->gen <= 8)
hsw_ddi_clock_get(encoder, pipe_config);
else
skl_ddi_clock_get(encoder, pipe_config);
}
 
static void intel_ddi_destroy(struct drm_encoder *encoder)
/drivers/video/drm/i915/intel_display.c
30,7 → 30,7
#include <linux/i2c.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/math64.h>
#include <linux/vgaarb.h>
#include <drm/drm_edid.h>
#include <drm/drmP.h>
#include "intel_drv.h"
43,11 → 43,6
#include <drm/drm_rect.h>
#include <linux/dma_remapping.h>
 
static inline void ndelay(unsigned long x)
{
udelay(DIV_ROUND_UP(x, 1000));
}
 
/* Primary plane formats supported by all gen */
#define COMMON_PRIMARY_FORMATS \
DRM_FORMAT_C8, \
78,25 → 73,6
DRM_FORMAT_ARGB8888,
};
 
#define DIV_ROUND_CLOSEST_ULL(ll, d) \
({ unsigned long long _tmp = (ll)+(d)/2; do_div(_tmp, d); _tmp; })
 
#define MAX_ERRNO 4095
phys_addr_t get_bus_addr(void);
 
static inline void outb(u8 v, u16 port)
{
asm volatile("outb %0,%1" : : "a" (v), "dN" (port));
}
static inline u8 inb(u16 port)
{
u8 v;
asm volatile("inb %1,%0" : "=a" (v) : "dN" (port));
return v;
}
 
static void intel_increase_pllclock(struct drm_device *dev,
enum pipe pipe);
void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
 
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
110,15 → 86,18
struct intel_framebuffer *ifb,
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_i915_gem_object *obj);
static void intel_dp_set_m_n(struct intel_crtc *crtc);
static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc);
static void intel_set_pipe_timings(struct intel_crtc *intel_crtc);
static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
struct intel_link_m_n *m_n);
struct intel_link_m_n *m_n,
struct intel_link_m_n *m2_n2);
static void ironlake_set_pipeconf(struct drm_crtc *crtc);
static void haswell_set_pipeconf(struct drm_crtc *crtc);
static void intel_set_pipe_csc(struct drm_crtc *crtc);
static void vlv_prepare_pll(struct intel_crtc *crtc);
static void vlv_prepare_pll(struct intel_crtc *crtc,
const struct intel_crtc_config *pipe_config);
static void chv_prepare_pll(struct intel_crtc *crtc,
const struct intel_crtc_config *pipe_config);
 
static struct intel_encoder *intel_find_encoder(struct intel_connector *connector, int pipe)
{
429,12 → 408,12
/**
* Returns whether any output on the specified pipe is of the specified type
*/
static bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type)
{
struct drm_device *dev = crtc->dev;
struct drm_device *dev = crtc->base.dev;
struct intel_encoder *encoder;
 
for_each_encoder_on_crtc(dev, crtc, encoder)
for_each_encoder_on_crtc(dev, &crtc->base, encoder)
if (encoder->type == type)
return true;
 
441,13 → 420,31
return false;
}
 
static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
/**
* Returns whether any output on the specified pipe will have the specified
* type after a staged modeset is complete, i.e., the same as
* intel_pipe_has_type() but looking at encoder->new_crtc instead of
* encoder->crtc.
*/
static bool intel_pipe_will_have_type(struct intel_crtc *crtc, int type)
{
struct drm_device *dev = crtc->base.dev;
struct intel_encoder *encoder;
 
for_each_intel_encoder(dev, encoder)
if (encoder->new_crtc == crtc && encoder->type == type)
return true;
 
return false;
}
 
static const intel_limit_t *intel_ironlake_limit(struct intel_crtc *crtc,
int refclk)
{
struct drm_device *dev = crtc->dev;
struct drm_device *dev = crtc->base.dev;
const intel_limit_t *limit;
 
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
if (intel_is_dual_link_lvds(dev)) {
if (refclk == 100000)
limit = &intel_limits_ironlake_dual_lvds_100m;
465,20 → 462,20
return limit;
}
 
static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
static const intel_limit_t *intel_g4x_limit(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_device *dev = crtc->base.dev;
const intel_limit_t *limit;
 
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
if (intel_is_dual_link_lvds(dev))
limit = &intel_limits_g4x_dual_channel_lvds;
else
limit = &intel_limits_g4x_single_channel_lvds;
} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
} else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_HDMI) ||
intel_pipe_will_have_type(crtc, INTEL_OUTPUT_ANALOG)) {
limit = &intel_limits_g4x_hdmi;
} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
} else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_SDVO)) {
limit = &intel_limits_g4x_sdvo;
} else /* The option is for other outputs */
limit = &intel_limits_i9xx_sdvo;
486,9 → 483,9
return limit;
}
 
static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
static const intel_limit_t *intel_limit(struct intel_crtc *crtc, int refclk)
{
struct drm_device *dev = crtc->dev;
struct drm_device *dev = crtc->base.dev;
const intel_limit_t *limit;
 
if (HAS_PCH_SPLIT(dev))
496,7 → 493,7
else if (IS_G4X(dev)) {
limit = intel_g4x_limit(crtc);
} else if (IS_PINEVIEW(dev)) {
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS))
limit = &intel_limits_pineview_lvds;
else
limit = &intel_limits_pineview_sdvo;
505,14 → 502,14
} else if (IS_VALLEYVIEW(dev)) {
limit = &intel_limits_vlv;
} else if (!IS_GEN2(dev)) {
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS))
limit = &intel_limits_i9xx_lvds;
else
limit = &intel_limits_i9xx_sdvo;
} else {
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS))
limit = &intel_limits_i8xx_lvds;
else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO))
else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_DVO))
limit = &intel_limits_i8xx_dvo;
else
limit = &intel_limits_i8xx_dac;
599,15 → 596,15
}
 
static bool
i9xx_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
i9xx_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock)
{
struct drm_device *dev = crtc->dev;
struct drm_device *dev = crtc->base.dev;
intel_clock_t clock;
int err = target;
 
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
/*
* For LVDS just rely on its current settings for dual-channel.
* We haven't figured out how to reliably set up different
660,15 → 657,15
}
 
static bool
pnv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
pnv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock)
{
struct drm_device *dev = crtc->dev;
struct drm_device *dev = crtc->base.dev;
intel_clock_t clock;
int err = target;
 
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
/*
* For LVDS just rely on its current settings for dual-channel.
* We haven't figured out how to reliably set up different
719,11 → 716,11
}
 
static bool
g4x_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
g4x_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock)
{
struct drm_device *dev = crtc->dev;
struct drm_device *dev = crtc->base.dev;
intel_clock_t clock;
int max_n;
bool found;
731,7 → 728,7
int err_most = (target >> 8) + (target >> 9);
found = false;
 
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
if (intel_is_dual_link_lvds(dev))
clock.p2 = limit->p2.p2_fast;
else
776,11 → 773,11
}
 
static bool
vlv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
vlv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock)
{
struct drm_device *dev = crtc->dev;
struct drm_device *dev = crtc->base.dev;
intel_clock_t clock;
unsigned int bestppm = 1000000;
/* min update 19.2 MHz */
833,11 → 830,11
}
 
static bool
chv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
chv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock)
{
struct drm_device *dev = crtc->dev;
struct drm_device *dev = crtc->base.dev;
intel_clock_t clock;
uint64_t m2;
int found = false;
910,58 → 907,6
return intel_crtc->config.cpu_transcoder;
}
 
static void g4x_wait_for_vblank(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 frame, frame_reg = PIPE_FRMCOUNT_GM45(pipe);
 
frame = I915_READ(frame_reg);
 
if (wait_for(I915_READ_NOTRACE(frame_reg) != frame, 50))
WARN(1, "vblank wait timed out\n");
}
 
/**
* intel_wait_for_vblank - wait for vblank on a given pipe
* @dev: drm device
* @pipe: pipe to wait for
*
* Wait for vblank to occur on a given pipe. Needed for various bits of
* mode setting code.
*/
void intel_wait_for_vblank(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipestat_reg = PIPESTAT(pipe);
 
if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
g4x_wait_for_vblank(dev, pipe);
return;
}
 
/* Clear existing vblank status. Note this will clear any other
* sticky status fields as well.
*
* This races with i915_driver_irq_handler() with the result
* that either function could miss a vblank event. Here it is not
* fatal, as we will either wait upon the next vblank interrupt or
* timeout. Generally speaking intel_wait_for_vblank() is only
* called during modeset at which time the GPU should be idle and
* should *not* be performing page flips and thus not waiting on
* vblanks...
* Currently, the result of us stealing a vblank from the irq
* handler is that a single frame will be skipped during swapbuffers.
*/
I915_WRITE(pipestat_reg,
I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
 
/* Wait for vblank interrupt bit to set */
if (wait_for(I915_READ(pipestat_reg) &
PIPE_VBLANK_INTERRUPT_STATUS,
50))
DRM_DEBUG_KMS("vblank wait timed out\n");
}
 
static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
983,8 → 928,7
 
/*
* intel_wait_for_pipe_off - wait for pipe to turn off
* @dev: drm device
* @pipe: pipe to wait for
* @crtc: crtc whose pipe to wait for
*
* After disabling a pipe, we can't wait for vblank in the usual way,
* spinning on the vblank interrupt status bit, since we won't actually
998,11 → 942,12
* ends up stopping at the start of the next frame).
*
*/
void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
pipe);
enum transcoder cpu_transcoder = crtc->config.cpu_transcoder;
enum pipe pipe = crtc->pipe;
 
if (INTEL_INFO(dev)->gen >= 4) {
int reg = PIPECONF(cpu_transcoder);
1208,30 → 1153,43
state_string(state), state_string(cur_state));
}
 
static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
void assert_panel_unlocked(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
int pp_reg, lvds_reg;
struct drm_device *dev = dev_priv->dev;
int pp_reg;
u32 val;
enum pipe panel_pipe = PIPE_A;
bool locked = true;
 
if (HAS_PCH_SPLIT(dev_priv->dev)) {
if (WARN_ON(HAS_DDI(dev)))
return;
 
if (HAS_PCH_SPLIT(dev)) {
u32 port_sel;
 
pp_reg = PCH_PP_CONTROL;
lvds_reg = PCH_LVDS;
port_sel = I915_READ(PCH_PP_ON_DELAYS) & PANEL_PORT_SELECT_MASK;
 
if (port_sel == PANEL_PORT_SELECT_LVDS &&
I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT)
panel_pipe = PIPE_B;
/* XXX: else fix for eDP */
} else if (IS_VALLEYVIEW(dev)) {
/* presumably write lock depends on pipe, not port select */
pp_reg = VLV_PIPE_PP_CONTROL(pipe);
panel_pipe = pipe;
} else {
pp_reg = PP_CONTROL;
lvds_reg = LVDS;
if (I915_READ(LVDS) & LVDS_PIPEB_SELECT)
panel_pipe = PIPE_B;
}
 
val = I915_READ(pp_reg);
if (!(val & PANEL_POWER_ON) ||
((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS))
((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
locked = false;
 
if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT)
panel_pipe = PIPE_B;
 
WARN(panel_pipe == pipe && locked,
"panel assertion failure, pipe %c regs locked\n",
pipe_name(pipe));
1264,11 → 1222,12
enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
pipe);
 
/* if we need the pipe A quirk it must be always on */
if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
/* if we need the pipe quirk it must be always on */
if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
(pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
state = true;
 
if (!intel_display_power_enabled(dev_priv,
if (!intel_display_power_is_enabled(dev_priv,
POWER_DOMAIN_TRANSCODER(cpu_transcoder))) {
cur_state = false;
} else {
1319,7 → 1278,7
}
 
/* Need to check both planes against the pipe */
for_each_pipe(i) {
for_each_pipe(dev_priv, i) {
reg = DSPCNTR(i);
val = I915_READ(reg);
cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
1337,8 → 1296,15
int reg, sprite;
u32 val;
 
if (IS_VALLEYVIEW(dev)) {
if (INTEL_INFO(dev)->gen >= 9) {
for_each_sprite(pipe, sprite) {
val = I915_READ(PLANE_CTL(pipe, sprite));
WARN(val & PLANE_CTL_ENABLE,
"plane %d assertion failure, should be off on pipe %c but is still active\n",
sprite, pipe_name(pipe));
}
} else if (IS_VALLEYVIEW(dev)) {
for_each_sprite(pipe, sprite) {
reg = SPCNTR(pipe, sprite);
val = I915_READ(reg);
WARN(val & SP_ENABLE,
1360,6 → 1326,12
}
}
 
static void assert_vblank_disabled(struct drm_crtc *crtc)
{
if (WARN_ON(drm_crtc_vblank_get(crtc) == 0))
drm_crtc_vblank_put(crtc);
}
 
static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
{
u32 val;
1532,40 → 1504,13
}
}
 
static void intel_reset_dpio(struct drm_device *dev)
static void vlv_enable_pll(struct intel_crtc *crtc,
const struct intel_crtc_config *pipe_config)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (IS_CHERRYVIEW(dev)) {
enum dpio_phy phy;
u32 val;
 
for (phy = DPIO_PHY0; phy < I915_NUM_PHYS_VLV; phy++) {
/* Poll for phypwrgood signal */
if (wait_for(I915_READ(DISPLAY_PHY_STATUS) &
PHY_POWERGOOD(phy), 1))
DRM_ERROR("Display PHY %d is not power up\n", phy);
 
/*
* Deassert common lane reset for PHY.
*
* This should only be done on init and resume from S3
* with both PLLs disabled, or we risk losing DPIO and
* PLL synchronization.
*/
val = I915_READ(DISPLAY_PHY_CONTROL);
I915_WRITE(DISPLAY_PHY_CONTROL,
PHY_COM_LANE_RESET_DEASSERT(phy, val));
}
}
}
 
static void vlv_enable_pll(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int reg = DPLL(crtc->pipe);
u32 dpll = crtc->config.dpll_hw_state.dpll;
u32 dpll = pipe_config->dpll_hw_state.dpll;
 
assert_pipe_disabled(dev_priv, crtc->pipe);
 
1573,7 → 1518,7
BUG_ON(!IS_VALLEYVIEW(dev_priv->dev));
 
/* PLL is protected by panel, make sure we can write it */
if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev))
if (IS_MOBILE(dev_priv->dev))
assert_panel_unlocked(dev_priv, crtc->pipe);
 
I915_WRITE(reg, dpll);
1583,7 → 1528,7
if (wait_for(((I915_READ(reg) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
DRM_ERROR("DPLL %d failed to lock\n", crtc->pipe);
 
I915_WRITE(DPLL_MD(crtc->pipe), crtc->config.dpll_hw_state.dpll_md);
I915_WRITE(DPLL_MD(crtc->pipe), pipe_config->dpll_hw_state.dpll_md);
POSTING_READ(DPLL_MD(crtc->pipe));
 
/* We do this three times for luck */
1598,7 → 1543,8
udelay(150); /* wait for warmup */
}
 
static void chv_enable_pll(struct intel_crtc *crtc)
static void chv_enable_pll(struct intel_crtc *crtc,
const struct intel_crtc_config *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
1623,7 → 1569,7
udelay(1);
 
/* Enable PLL */
I915_WRITE(DPLL(pipe), crtc->config.dpll_hw_state.dpll);
I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
 
/* Check PLL is locked */
if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1630,12 → 1576,24
DRM_ERROR("PLL %d failed to lock\n", pipe);
 
/* not sure when this should be written */
I915_WRITE(DPLL_MD(pipe), crtc->config.dpll_hw_state.dpll_md);
I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
POSTING_READ(DPLL_MD(pipe));
 
mutex_unlock(&dev_priv->dpio_lock);
}
 
static int intel_num_dvo_pipes(struct drm_device *dev)
{
struct intel_crtc *crtc;
int count = 0;
 
for_each_intel_crtc(dev, crtc)
count += crtc->active &&
intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO);
 
return count;
}
 
static void i9xx_enable_pll(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
1652,7 → 1610,18
if (IS_MOBILE(dev) && !IS_I830(dev))
assert_panel_unlocked(dev_priv, crtc->pipe);
 
I915_WRITE(reg, dpll);
/* Enable DVO 2x clock on both PLLs if necessary */
if (IS_I830(dev) && intel_num_dvo_pipes(dev) > 0) {
/*
* It appears to be important that we don't enable this
* for the current pipe before otherwise configuring the
* PLL. No idea how this should be handled if multiple
* DVO outputs are enabled simultaneosly.
*/
dpll |= DPLL_DVO_2X_MODE;
I915_WRITE(DPLL(!crtc->pipe),
I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE);
}
 
/* Wait for the clocks to stabilize. */
POSTING_READ(reg);
1691,10 → 1660,25
*
* Note! This is for pre-ILK only.
*/
static void i9xx_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
static void i9xx_disable_pll(struct intel_crtc *crtc)
{
/* Don't disable pipe A or pipe A PLLs if needed */
if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe pipe = crtc->pipe;
 
/* Disable DVO 2x clock on both PLLs if necessary */
if (IS_I830(dev) &&
intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO) &&
intel_num_dvo_pipes(dev) == 1) {
I915_WRITE(DPLL(PIPE_B),
I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
I915_WRITE(DPLL(PIPE_A),
I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE);
}
 
/* Don't disable pipe or pipe PLLs if needed */
if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
(pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
return;
 
/* Make sure the pipe isn't still relying on us */
1731,7 → 1715,7
assert_pipe_disabled(dev_priv, pipe);
 
/* Set PLL en = 0 */
val = DPLL_SSC_REF_CLOCK_CHV;
val = DPLL_SSC_REF_CLOCK_CHV | DPLL_REFA_CLK_ENABLE_VLV;
if (pipe != PIPE_A)
val |= DPLL_INTEGRATED_CRI_CLK_VLV;
I915_WRITE(DPLL(pipe), val);
1795,7 → 1779,7
if (WARN_ON(pll == NULL))
return;
 
WARN_ON(!pll->refcount);
WARN_ON(!pll->config.crtc_mask);
if (pll->active == 0) {
DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
WARN_ON(pll->on);
1822,7 → 1806,7
if (WARN_ON(pll == NULL))
return;
 
if (WARN_ON(pll->refcount == 0))
if (WARN_ON(pll->config.crtc_mask == 0))
return;
 
DRM_DEBUG_KMS("enable %s (active %d, on? %d)for crtc %d\n",
1843,7 → 1827,7
pll->on = true;
}
 
void intel_disable_shared_dpll(struct intel_crtc *crtc)
static void intel_disable_shared_dpll(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
1854,7 → 1838,7
if (WARN_ON(pll == NULL))
return;
 
if (WARN_ON(pll->refcount == 0))
if (WARN_ON(pll->config.crtc_mask == 0))
return;
 
DRM_DEBUG_KMS("disable %s (active %d, on? %d) for crtc %d\n",
1887,7 → 1871,7
uint32_t reg, val, pipeconf_val;
 
/* PCH only available on ILK+ */
BUG_ON(INTEL_INFO(dev)->gen < 5);
BUG_ON(!HAS_PCH_SPLIT(dev));
 
/* Make sure PCH DPLL is enabled */
assert_shared_dpll_enabled(dev_priv,
1922,7 → 1906,7
val &= ~TRANS_INTERLACE_MASK;
if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
if (HAS_PCH_IBX(dev_priv->dev) &&
intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO))
intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
val |= TRANS_LEGACY_INTERLACED_ILK;
else
val |= TRANS_INTERLACED;
1940,7 → 1924,7
u32 val, pipeconf_val;
 
/* PCH only available on ILK+ */
BUG_ON(INTEL_INFO(dev_priv->dev)->gen < 5);
BUG_ON(!HAS_PCH_SPLIT(dev_priv->dev));
 
/* FDI must be feeding us bits for PCH ports */
assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
2045,7 → 2029,7
* need the check.
*/
if (!HAS_PCH_SPLIT(dev_priv->dev))
if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DSI))
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
assert_dsi_pll_enabled(dev_priv);
else
assert_pll_enabled(dev_priv, pipe);
2062,8 → 2046,8
reg = PIPECONF(cpu_transcoder);
val = I915_READ(reg);
if (val & PIPECONF_ENABLE) {
WARN_ON(!(pipe == PIPE_A &&
dev_priv->quirks & QUIRK_PIPEA_FORCE));
WARN_ON(!((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
(pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)));
return;
}
 
2073,21 → 2057,19
 
/**
* intel_disable_pipe - disable a pipe, asserting requirements
* @dev_priv: i915 private structure
* @pipe: pipe to disable
* @crtc: crtc whose pipes is to be disabled
*
* Disable @pipe, making sure that various hardware specific requirements
* are met, if applicable, e.g. plane disabled, panel fitter off, etc.
* Disable the pipe of @crtc, making sure that various hardware
* specific requirements are met, if applicable, e.g. plane
* disabled, panel fitter off, etc.
*
* @pipe should be %PIPE_A or %PIPE_B.
*
* Will wait until the pipe has shut down before returning.
*/
static void intel_disable_pipe(struct drm_i915_private *dev_priv,
enum pipe pipe)
static void intel_disable_pipe(struct intel_crtc *crtc)
{
enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
pipe);
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
enum transcoder cpu_transcoder = crtc->config.cpu_transcoder;
enum pipe pipe = crtc->pipe;
int reg;
u32 val;
 
2099,17 → 2081,26
assert_cursor_disabled(dev_priv, pipe);
assert_sprites_disabled(dev_priv, pipe);
 
/* Don't disable pipe A or pipe A PLLs if needed */
if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
return;
 
reg = PIPECONF(cpu_transcoder);
val = I915_READ(reg);
if ((val & PIPECONF_ENABLE) == 0)
return;
 
I915_WRITE(reg, val & ~PIPECONF_ENABLE);
intel_wait_for_pipe_off(dev_priv->dev, pipe);
/*
* Double wide has implications for planes
* so best keep it disabled when not needed.
*/
if (crtc->config.double_wide)
val &= ~PIPECONF_DOUBLE_WIDE;
 
/* Don't disable pipe or pipe PLLs if needed */
if (!(pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) &&
!(pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
val &= ~PIPECONF_ENABLE;
 
I915_WRITE(reg, val);
if ((val & PIPECONF_ENABLE) == 0)
intel_wait_for_pipe_off(crtc);
}
 
/*
2128,23 → 2119,20
 
/**
* intel_enable_primary_hw_plane - enable the primary plane on a given pipe
* @dev_priv: i915 private structure
* @plane: plane to enable
* @pipe: pipe being fed
* @plane: plane to be enabled
* @crtc: crtc for the plane
*
* Enable @plane on @pipe, making sure that @pipe is running first.
* Enable @plane on @crtc, making sure that the pipe is running first.
*/
static void intel_enable_primary_hw_plane(struct drm_i915_private *dev_priv,
enum plane plane, enum pipe pipe)
static void intel_enable_primary_hw_plane(struct drm_plane *plane,
struct drm_crtc *crtc)
{
struct drm_device *dev = dev_priv->dev;
struct intel_crtc *intel_crtc =
to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
int reg;
u32 val;
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
/* If the pipe isn't enabled, we can't pump pixels and may hang */
assert_pipe_enabled(dev_priv, pipe);
assert_pipe_enabled(dev_priv, intel_crtc->pipe);
 
if (intel_crtc->primary_enabled)
return;
2151,41 → 2139,41
 
intel_crtc->primary_enabled = true;
 
reg = DSPCNTR(plane);
val = I915_READ(reg);
WARN_ON(val & DISPLAY_PLANE_ENABLE);
dev_priv->display.update_primary_plane(crtc, plane->fb,
crtc->x, crtc->y);
 
I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
intel_flush_primary_plane(dev_priv, plane);
/*
* BDW signals flip done immediately if the plane
* is disabled, even if the plane enable is already
* armed to occur at the next vblank :(
*/
if (IS_BROADWELL(dev))
intel_wait_for_vblank(dev, intel_crtc->pipe);
}
 
/**
* intel_disable_primary_hw_plane - disable the primary hardware plane
* @dev_priv: i915 private structure
* @plane: plane to disable
* @pipe: pipe consuming the data
* @plane: plane to be disabled
* @crtc: crtc for the plane
*
* Disable @plane; should be an independent operation.
* Disable @plane on @crtc, making sure that the pipe is running first.
*/
static void intel_disable_primary_hw_plane(struct drm_i915_private *dev_priv,
enum plane plane, enum pipe pipe)
static void intel_disable_primary_hw_plane(struct drm_plane *plane,
struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc =
to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
int reg;
u32 val;
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
assert_pipe_enabled(dev_priv, intel_crtc->pipe);
 
if (!intel_crtc->primary_enabled)
return;
 
intel_crtc->primary_enabled = false;
 
reg = DSPCNTR(plane);
val = I915_READ(reg);
WARN_ON((val & DISPLAY_PLANE_ENABLE) == 0);
 
I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
intel_flush_primary_plane(dev_priv, plane);
dev_priv->display.update_primary_plane(crtc, plane->fb,
crtc->x, crtc->y);
}
 
static bool need_vtd_wa(struct drm_device *dev)
2206,11 → 2194,13
}
 
int
intel_pin_and_fence_fb_obj(struct drm_device *dev,
struct drm_i915_gem_object *obj,
intel_pin_and_fence_fb_obj(struct drm_plane *plane,
struct drm_framebuffer *fb,
struct intel_engine_cs *pipelined)
{
struct drm_device *dev = fb->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
u32 alignment;
int ret;
 
2218,7 → 2208,9
 
switch (obj->tiling_mode) {
case I915_TILING_NONE:
if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
if (INTEL_INFO(dev)->gen >= 9)
alignment = 256 * 1024;
else if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
alignment = 128 * 1024;
else if (INTEL_INFO(dev)->gen >= 4)
alignment = 4 * 1024;
2226,8 → 2218,12
alignment = 64 * 1024;
break;
case I915_TILING_X:
if (INTEL_INFO(dev)->gen >= 9)
alignment = 256 * 1024;
else {
/* pin() will align the object as required by fence */
alignment = 0;
}
break;
case I915_TILING_Y:
WARN(1, "Y tiled bo slipped through, driver bug!\n");
2389,6 → 2385,7
struct intel_plane_config *plane_config)
{
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *c;
struct intel_crtc *i;
struct drm_i915_gem_object *obj;
2420,6 → 2417,9
continue;
 
if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
if (obj->tiling_mode != I915_TILING_NONE)
dev_priv->preserve_bios_swizzle = true;
 
drm_framebuffer_reference(c->primary->fb);
intel_crtc->base.primary->fb = c->primary->fb;
obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
2435,16 → 2435,52
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct drm_i915_gem_object *obj;
int plane = intel_crtc->plane;
unsigned long linear_offset;
u32 dspcntr;
u32 reg;
u32 reg = DSPCNTR(plane);
int pixel_size;
 
reg = DSPCNTR(plane);
dspcntr = I915_READ(reg);
/* Mask out pixel format bits in case we change it */
dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
if (!intel_crtc->primary_enabled) {
I915_WRITE(reg, 0);
if (INTEL_INFO(dev)->gen >= 4)
I915_WRITE(DSPSURF(plane), 0);
else
I915_WRITE(DSPADDR(plane), 0);
POSTING_READ(reg);
return;
}
 
obj = intel_fb_obj(fb);
if (WARN_ON(obj == NULL))
return;
 
pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
 
dspcntr = DISPPLANE_GAMMA_ENABLE;
 
dspcntr |= DISPLAY_PLANE_ENABLE;
 
if (INTEL_INFO(dev)->gen < 4) {
if (intel_crtc->pipe == PIPE_B)
dspcntr |= DISPPLANE_SEL_PIPE_B;
 
/* pipesrc and dspsize control the size that is scaled from,
* which should always be the user's requested size.
*/
I915_WRITE(DSPSIZE(plane),
((intel_crtc->config.pipe_src_h - 1) << 16) |
(intel_crtc->config.pipe_src_w - 1));
I915_WRITE(DSPPOS(plane), 0);
} else if (IS_CHERRYVIEW(dev) && plane == PLANE_B) {
I915_WRITE(PRIMSIZE(plane),
((intel_crtc->config.pipe_src_h - 1) << 16) |
(intel_crtc->config.pipe_src_w - 1));
I915_WRITE(PRIMPOS(plane), 0);
I915_WRITE(PRIMCNSTALPHA(plane), 0);
}
 
switch (fb->pixel_format) {
case DRM_FORMAT_C8:
dspcntr |= DISPPLANE_8BPP;
2476,24 → 2512,19
BUG();
}
 
if (INTEL_INFO(dev)->gen >= 4) {
if (obj->tiling_mode != I915_TILING_NONE)
if (INTEL_INFO(dev)->gen >= 4 &&
obj->tiling_mode != I915_TILING_NONE)
dspcntr |= DISPPLANE_TILED;
else
dspcntr &= ~DISPPLANE_TILED;
}
 
if (IS_G4X(dev))
dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
 
I915_WRITE(reg, dspcntr);
linear_offset = y * fb->pitches[0] + x * pixel_size;
 
linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
 
if (INTEL_INFO(dev)->gen >= 4) {
intel_crtc->dspaddr_offset =
intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
fb->bits_per_pixel / 8,
pixel_size,
fb->pitches[0]);
linear_offset -= intel_crtc->dspaddr_offset;
} else {
2500,6 → 2531,21
intel_crtc->dspaddr_offset = linear_offset;
}
 
if (to_intel_plane(crtc->primary)->rotation == BIT(DRM_ROTATE_180)) {
dspcntr |= DISPPLANE_ROTATE_180;
 
x += (intel_crtc->config.pipe_src_w - 1);
y += (intel_crtc->config.pipe_src_h - 1);
 
/* Finding the last pixel of the last line of the display
data and adding to linear_offset*/
linear_offset +=
(intel_crtc->config.pipe_src_h - 1) * fb->pitches[0] +
(intel_crtc->config.pipe_src_w - 1) * pixel_size;
}
 
I915_WRITE(reg, dspcntr);
 
DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
fb->pitches[0]);
2521,16 → 2567,33
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct drm_i915_gem_object *obj;
int plane = intel_crtc->plane;
unsigned long linear_offset;
u32 dspcntr;
u32 reg;
u32 reg = DSPCNTR(plane);
int pixel_size;
 
reg = DSPCNTR(plane);
dspcntr = I915_READ(reg);
/* Mask out pixel format bits in case we change it */
dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
if (!intel_crtc->primary_enabled) {
I915_WRITE(reg, 0);
I915_WRITE(DSPSURF(plane), 0);
POSTING_READ(reg);
return;
}
 
obj = intel_fb_obj(fb);
if (WARN_ON(obj == NULL))
return;
 
pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
 
dspcntr = DISPPLANE_GAMMA_ENABLE;
 
dspcntr |= DISPLAY_PLANE_ENABLE;
 
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
 
switch (fb->pixel_format) {
case DRM_FORMAT_C8:
dspcntr |= DISPPLANE_8BPP;
2560,23 → 2623,33
 
if (obj->tiling_mode != I915_TILING_NONE)
dspcntr |= DISPPLANE_TILED;
else
dspcntr &= ~DISPPLANE_TILED;
 
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
dspcntr &= ~DISPPLANE_TRICKLE_FEED_DISABLE;
else
if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
 
I915_WRITE(reg, dspcntr);
 
linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
linear_offset = y * fb->pitches[0] + x * pixel_size;
intel_crtc->dspaddr_offset =
intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
fb->bits_per_pixel / 8,
pixel_size,
fb->pitches[0]);
linear_offset -= intel_crtc->dspaddr_offset;
if (to_intel_plane(crtc->primary)->rotation == BIT(DRM_ROTATE_180)) {
dspcntr |= DISPPLANE_ROTATE_180;
 
if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
x += (intel_crtc->config.pipe_src_w - 1);
y += (intel_crtc->config.pipe_src_h - 1);
 
/* Finding the last pixel of the last line of the display
data and adding to linear_offset*/
linear_offset +=
(intel_crtc->config.pipe_src_h - 1) * fb->pitches[0] +
(intel_crtc->config.pipe_src_w - 1) * pixel_size;
}
}
 
I915_WRITE(reg, dspcntr);
 
DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
fb->pitches[0]);
2592,6 → 2665,92
POSTING_READ(reg);
}
 
static void skylake_update_primary_plane(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int x, int y)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_framebuffer *intel_fb;
struct drm_i915_gem_object *obj;
int pipe = intel_crtc->pipe;
u32 plane_ctl, stride;
 
if (!intel_crtc->primary_enabled) {
I915_WRITE(PLANE_CTL(pipe, 0), 0);
I915_WRITE(PLANE_SURF(pipe, 0), 0);
POSTING_READ(PLANE_CTL(pipe, 0));
return;
}
 
plane_ctl = PLANE_CTL_ENABLE |
PLANE_CTL_PIPE_GAMMA_ENABLE |
PLANE_CTL_PIPE_CSC_ENABLE;
 
switch (fb->pixel_format) {
case DRM_FORMAT_RGB565:
plane_ctl |= PLANE_CTL_FORMAT_RGB_565;
break;
case DRM_FORMAT_XRGB8888:
plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
break;
case DRM_FORMAT_XBGR8888:
plane_ctl |= PLANE_CTL_ORDER_RGBX;
plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
break;
case DRM_FORMAT_XRGB2101010:
plane_ctl |= PLANE_CTL_FORMAT_XRGB_2101010;
break;
case DRM_FORMAT_XBGR2101010:
plane_ctl |= PLANE_CTL_ORDER_RGBX;
plane_ctl |= PLANE_CTL_FORMAT_XRGB_2101010;
break;
default:
BUG();
}
 
intel_fb = to_intel_framebuffer(fb);
obj = intel_fb->obj;
 
/*
* The stride is either expressed as a multiple of 64 bytes chunks for
* linear buffers or in number of tiles for tiled buffers.
*/
switch (obj->tiling_mode) {
case I915_TILING_NONE:
stride = fb->pitches[0] >> 6;
break;
case I915_TILING_X:
plane_ctl |= PLANE_CTL_TILED_X;
stride = fb->pitches[0] >> 9;
break;
default:
BUG();
}
 
plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
if (to_intel_plane(crtc->primary)->rotation == BIT(DRM_ROTATE_180))
plane_ctl |= PLANE_CTL_ROTATE_180;
 
I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
 
DRM_DEBUG_KMS("Writing base %08lX %d,%d,%d,%d pitch=%d\n",
i915_gem_obj_ggtt_offset(obj),
x, y, fb->width, fb->height,
fb->pitches[0]);
 
I915_WRITE(PLANE_POS(pipe, 0), 0);
I915_WRITE(PLANE_OFFSET(pipe, 0), (y << 16) | x);
I915_WRITE(PLANE_SIZE(pipe, 0),
(intel_crtc->config.pipe_src_h - 1) << 16 |
(intel_crtc->config.pipe_src_w - 1));
I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
I915_WRITE(PLANE_SURF(pipe, 0), i915_gem_obj_ggtt_offset(obj));
 
POSTING_READ(PLANE_SURF(pipe, 0));
}
 
/* Assume fb object is pinned & idle & fenced and just update base pointers */
static int
intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2602,7 → 2761,6
 
if (dev_priv->display.disable_fbc)
dev_priv->display.disable_fbc(dev);
intel_increase_pllclock(dev, to_intel_crtc(crtc)->pipe);
 
dev_priv->display.update_primary_plane(crtc, fb, x, y);
 
2610,25 → 2768,10
}
 
#if 0
void intel_display_handle_reset(struct drm_device *dev)
static void intel_complete_page_flips(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
 
/*
* Flips in the rings have been nuked by the reset,
* so complete all pending flips so that user space
* will get its events and not get stuck.
*
* Also update the base address of all primary
* planes to the the last fb to make sure we're
* showing the correct fb after a reset.
*
* Need to make two loops over the crtcs so that we
* don't try to grab a crtc mutex before the
* pending_flip_queue really got woken up.
*/
 
for_each_crtc(dev, crtc) {
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum plane plane = intel_crtc->plane;
2636,7 → 2779,13
intel_prepare_page_flip(dev, plane);
intel_finish_page_flip_plane(dev, plane);
}
}
 
static void intel_update_primary_planes(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
 
for_each_crtc(dev, crtc) {
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
2655,6 → 2804,79
}
}
 
void intel_prepare_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *crtc;
 
/* no reset support for gen2 */
if (IS_GEN2(dev))
return;
 
/* reset doesn't touch the display */
if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
return;
 
drm_modeset_lock_all(dev);
 
/*
* Disabling the crtcs gracefully seems nicer. Also the
* g33 docs say we should at least disable all the planes.
*/
for_each_intel_crtc(dev, crtc) {
if (crtc->active)
dev_priv->display.crtc_disable(&crtc->base);
}
}
 
void intel_finish_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
 
/*
* Flips in the rings will be nuked by the reset,
* so complete all pending flips so that user space
* will get its events and not get stuck.
*/
intel_complete_page_flips(dev);
 
/* no reset support for gen2 */
if (IS_GEN2(dev))
return;
 
/* reset doesn't touch the display */
if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) {
/*
* Flips in the rings have been nuked by the reset,
* so update the base address of all primary
* planes to the the last fb to make sure we're
* showing the correct fb after a reset.
*/
intel_update_primary_planes(dev);
return;
}
 
/*
* The display has been reset as well,
* so need a full re-initialization.
*/
intel_runtime_pm_disable_interrupts(dev_priv);
intel_runtime_pm_enable_interrupts(dev_priv);
 
intel_modeset_init_hw(dev);
 
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display.hpd_irq_setup)
dev_priv->display.hpd_irq_setup(dev);
spin_unlock_irq(&dev_priv->irq_lock);
 
intel_modeset_setup_hw_state(dev, true);
 
intel_hpd_init(dev_priv);
 
drm_modeset_unlock_all(dev);
}
 
static int
intel_finish_fb(struct drm_framebuffer *old_fb)
{
2683,7 → 2905,6
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
unsigned long flags;
bool pending;
 
if (i915_reset_in_progress(&dev_priv->gpu_error) ||
2690,14 → 2911,53
intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
return false;
 
spin_lock_irqsave(&dev->event_lock, flags);
spin_lock_irq(&dev->event_lock);
pending = to_intel_crtc(crtc)->unpin_work != NULL;
spin_unlock_irqrestore(&dev->event_lock, flags);
spin_unlock_irq(&dev->event_lock);
 
return pending;
}
#endif
 
static void intel_update_pipe_size(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
const struct drm_display_mode *adjusted_mode;
 
if (!i915.fastboot)
return;
 
/*
* Update pipe size and adjust fitter if needed: the reason for this is
* that in compute_mode_changes we check the native mode (not the pfit
* mode) to see if we can flip rather than do a full mode set. In the
* fastboot case, we'll flip, but if we don't update the pipesrc and
* pfit state, we'll end up with a big fb scanned out into the wrong
* sized surface.
*
* To fix this properly, we need to hoist the checks up into
* compute_mode_changes (or above), check the actual pfit state and
* whether the platform allows pfit disable with pipe active, and only
* then update the pipesrc and pfit state, even on the flip path.
*/
 
adjusted_mode = &crtc->config.adjusted_mode;
 
I915_WRITE(PIPESRC(crtc->pipe),
((adjusted_mode->crtc_hdisplay - 1) << 16) |
(adjusted_mode->crtc_vdisplay - 1));
if (!crtc->config.pch_pfit.enabled &&
(intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
I915_WRITE(PF_CTL(crtc->pipe), 0);
I915_WRITE(PF_WIN_POS(crtc->pipe), 0);
I915_WRITE(PF_WIN_SZ(crtc->pipe), 0);
}
crtc->config.pipe_src_w = adjusted_mode->crtc_hdisplay;
crtc->config.pipe_src_h = adjusted_mode->crtc_vdisplay;
}
 
static int
intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *fb)
2707,7 → 2967,6
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum pipe pipe = intel_crtc->pipe;
struct drm_framebuffer *old_fb = crtc->primary->fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct drm_i915_gem_object *old_obj = intel_fb_obj(old_fb);
int ret;
 
2726,9 → 2985,9
}
 
mutex_lock(&dev->struct_mutex);
ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
ret = intel_pin_and_fence_fb_obj(crtc->primary, fb, NULL);
if (ret == 0)
i915_gem_track_fb(old_obj, obj,
i915_gem_track_fb(old_obj, intel_fb_obj(fb),
INTEL_FRONTBUFFER_PRIMARY(pipe));
mutex_unlock(&dev->struct_mutex);
if (ret != 0) {
2736,37 → 2995,6
return ret;
}
 
/*
* Update pipe size and adjust fitter if needed: the reason for this is
* that in compute_mode_changes we check the native mode (not the pfit
* mode) to see if we can flip rather than do a full mode set. In the
* fastboot case, we'll flip, but if we don't update the pipesrc and
* pfit state, we'll end up with a big fb scanned out into the wrong
* sized surface.
*
* To fix this properly, we need to hoist the checks up into
* compute_mode_changes (or above), check the actual pfit state and
* whether the platform allows pfit disable with pipe active, and only
* then update the pipesrc and pfit state, even on the flip path.
*/
if (i915.fastboot) {
const struct drm_display_mode *adjusted_mode =
&intel_crtc->config.adjusted_mode;
 
I915_WRITE(PIPESRC(intel_crtc->pipe),
((adjusted_mode->crtc_hdisplay - 1) << 16) |
(adjusted_mode->crtc_vdisplay - 1));
if (!intel_crtc->config.pch_pfit.enabled &&
(intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
I915_WRITE(PF_CTL(intel_crtc->pipe), 0);
I915_WRITE(PF_WIN_POS(intel_crtc->pipe), 0);
I915_WRITE(PF_WIN_SZ(intel_crtc->pipe), 0);
}
intel_crtc->config.pipe_src_w = adjusted_mode->crtc_hdisplay;
intel_crtc->config.pipe_src_h = adjusted_mode->crtc_vdisplay;
}
 
dev_priv->display.update_primary_plane(crtc, fb, x, y);
 
if (intel_crtc->active)
3363,19 → 3591,26
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (crtc->primary->fb == NULL)
return;
 
WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
 
WARN_ON(wait_event_timeout(dev_priv->pending_flip_queue,
if (WARN_ON(wait_event_timeout(dev_priv->pending_flip_queue,
!intel_crtc_has_pending_flip(crtc),
60*HZ) == 0);
60*HZ) == 0)) {
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
spin_lock_irq(&dev->event_lock);
if (intel_crtc->unpin_work) {
WARN_ONCE(1, "Removing stuck page flip\n");
page_flip_completed(intel_crtc);
}
spin_unlock_irq(&dev->event_lock);
}
 
if (crtc->primary->fb) {
mutex_lock(&dev->struct_mutex);
intel_finish_fb(crtc->primary->fb);
mutex_unlock(&dev->struct_mutex);
}
}
#endif
 
/* Program iCLKIP clock to the desired frequency */
3593,9 → 3828,7
intel_fdi_normal_train(crtc);
 
/* For PCH DP, enable TRANS_DP_CTL */
if (HAS_PCH_CPT(dev) &&
(intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
if (HAS_PCH_CPT(dev) && intel_crtc->config.has_dp_encoder) {
u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
reg = TRANS_DP_CTL(pipe);
temp = I915_READ(reg);
3655,12 → 3888,13
if (pll == NULL)
return;
 
if (pll->refcount == 0) {
WARN(1, "bad %s refcount\n", pll->name);
if (!(pll->config.crtc_mask & (1 << crtc->pipe))) {
WARN(1, "bad %s crtc mask\n", pll->name);
return;
}
 
if (--pll->refcount == 0) {
pll->config.crtc_mask &= ~(1 << crtc->pipe);
if (pll->config.crtc_mask == 0) {
WARN_ON(pll->on);
WARN_ON(pll->active);
}
3671,15 → 3905,9
struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
struct intel_shared_dpll *pll;
enum intel_dpll_id i;
 
if (pll) {
DRM_DEBUG_KMS("CRTC:%d dropping existing %s\n",
crtc->base.base.id, pll->name);
intel_put_shared_dpll(crtc);
}
 
if (HAS_PCH_IBX(dev_priv->dev)) {
/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
i = (enum intel_dpll_id) crtc->pipe;
3688,7 → 3916,7
DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
crtc->base.base.id, pll->name);
 
WARN_ON(pll->refcount);
WARN_ON(pll->new_config->crtc_mask);
 
goto found;
}
3697,15 → 3925,16
pll = &dev_priv->shared_dplls[i];
 
/* Only want to check enabled timings first */
if (pll->refcount == 0)
if (pll->new_config->crtc_mask == 0)
continue;
 
if (memcmp(&crtc->config.dpll_hw_state, &pll->hw_state,
sizeof(pll->hw_state)) == 0) {
DRM_DEBUG_KMS("CRTC:%d sharing existing %s (refcount %d, ative %d)\n",
crtc->base.base.id,
pll->name, pll->refcount, pll->active);
 
if (memcmp(&crtc->new_config->dpll_hw_state,
&pll->new_config->hw_state,
sizeof(pll->new_config->hw_state)) == 0) {
DRM_DEBUG_KMS("CRTC:%d sharing existing %s (crtc mask 0x%08x, ative %d)\n",
crtc->base.base.id, pll->name,
pll->new_config->crtc_mask,
pll->active);
goto found;
}
}
3713,7 → 3942,7
/* Ok no matching timings, maybe there's a free one? */
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
pll = &dev_priv->shared_dplls[i];
if (pll->refcount == 0) {
if (pll->new_config->crtc_mask == 0) {
DRM_DEBUG_KMS("CRTC:%d allocated %s\n",
crtc->base.base.id, pll->name);
goto found;
3723,18 → 3952,86
return NULL;
 
found:
if (pll->refcount == 0)
pll->hw_state = crtc->config.dpll_hw_state;
if (pll->new_config->crtc_mask == 0)
pll->new_config->hw_state = crtc->new_config->dpll_hw_state;
 
crtc->config.shared_dpll = i;
crtc->new_config->shared_dpll = i;
DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name,
pipe_name(crtc->pipe));
 
pll->refcount++;
pll->new_config->crtc_mask |= 1 << crtc->pipe;
 
return pll;
}
 
/**
* intel_shared_dpll_start_config - start a new PLL staged config
* @dev_priv: DRM device
* @clear_pipes: mask of pipes that will have their PLLs freed
*
* Starts a new PLL staged config, copying the current config but
* releasing the references of pipes specified in clear_pipes.
*/
static int intel_shared_dpll_start_config(struct drm_i915_private *dev_priv,
unsigned clear_pipes)
{
struct intel_shared_dpll *pll;
enum intel_dpll_id i;
 
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
pll = &dev_priv->shared_dplls[i];
 
pll->new_config = kmemdup(&pll->config, sizeof pll->config,
GFP_KERNEL);
if (!pll->new_config)
goto cleanup;
 
pll->new_config->crtc_mask &= ~clear_pipes;
}
 
return 0;
 
cleanup:
while (--i >= 0) {
pll = &dev_priv->shared_dplls[i];
kfree(pll->new_config);
pll->new_config = NULL;
}
 
return -ENOMEM;
}
 
static void intel_shared_dpll_commit(struct drm_i915_private *dev_priv)
{
struct intel_shared_dpll *pll;
enum intel_dpll_id i;
 
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
pll = &dev_priv->shared_dplls[i];
 
WARN_ON(pll->new_config == &pll->config);
 
pll->config = *pll->new_config;
kfree(pll->new_config);
pll->new_config = NULL;
}
}
 
static void intel_shared_dpll_abort_config(struct drm_i915_private *dev_priv)
{
struct intel_shared_dpll *pll;
enum intel_dpll_id i;
 
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
pll = &dev_priv->shared_dplls[i];
 
WARN_ON(pll->new_config == &pll->config);
 
kfree(pll->new_config);
pll->new_config = NULL;
}
}
 
static void cpt_verify_modeset(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
3749,6 → 4046,19
}
}
 
static void skylake_pfit_enable(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe = crtc->pipe;
 
if (crtc->config.pch_pfit.enabled) {
I915_WRITE(PS_CTL(pipe), PS_ENABLE);
I915_WRITE(PS_WIN_POS(pipe), crtc->config.pch_pfit.pos);
I915_WRITE(PS_WIN_SZ(pipe), crtc->config.pch_pfit.size);
}
}
 
static void ironlake_pfit_enable(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
3872,7 → 4182,7
return;
 
if (!HAS_PCH_SPLIT(dev_priv->dev)) {
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI))
assert_dsi_pll_enabled(dev_priv);
else
assert_pll_enabled(dev_priv, pipe);
3911,6 → 4221,7
 
mutex_lock(&dev->struct_mutex);
dev_priv->mm.interruptible = false;
// (void) intel_overlay_switch_off(intel_crtc->overlay);
dev_priv->mm.interruptible = true;
mutex_unlock(&dev->struct_mutex);
}
3923,14 → 4234,10
static void intel_crtc_enable_planes(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
 
drm_vblank_on(dev, pipe);
 
intel_enable_primary_hw_plane(dev_priv, plane, pipe);
intel_enable_primary_hw_plane(crtc->primary, crtc);
intel_enable_planes(crtc);
intel_crtc_update_cursor(crtc, true);
intel_crtc_dpms_overlay(intel_crtc, true);
3940,6 → 4247,13
mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev);
mutex_unlock(&dev->struct_mutex);
 
/*
* FIXME: Once we grow proper nuclear flip support out of this we need
* to compute the mask of flip planes precisely. For the time being
* consider this a flip from a NULL plane.
*/
intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
}
 
static void intel_crtc_disable_planes(struct drm_crtc *crtc)
3959,8 → 4273,14
intel_crtc_dpms_overlay(intel_crtc, false);
intel_crtc_update_cursor(crtc, false);
intel_disable_planes(crtc);
intel_disable_primary_hw_plane(dev_priv, plane, pipe);
drm_vblank_off(dev, pipe);
intel_disable_primary_hw_plane(crtc->primary, crtc);
 
/*
* FIXME: Once we grow proper nuclear flip support out of this we need
* to compute the mask of flip planes precisely. For the time being
* consider this a flip to a NULL plane.
*/
// intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
}
 
static void ironlake_crtc_enable(struct drm_crtc *crtc)
3970,7 → 4290,6
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
enum plane plane = intel_crtc->plane;
 
WARN_ON(!crtc->enabled);
 
3987,22 → 4306,15
 
if (intel_crtc->config.has_pch_encoder) {
intel_cpu_transcoder_set_m_n(intel_crtc,
&intel_crtc->config.fdi_m_n);
&intel_crtc->config.fdi_m_n, NULL);
}
 
ironlake_set_pipeconf(crtc);
 
/* Set up the display plane register */
I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE);
POSTING_READ(DSPCNTR(plane));
 
dev_priv->display.update_primary_plane(crtc, crtc->primary->fb,
crtc->x, crtc->y);
 
intel_crtc->active = true;
 
intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
intel_set_pch_fifo_underrun_reporting(dev, pipe, true);
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
 
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_enable)
4038,6 → 4350,9
if (HAS_PCH_CPT(dev))
cpt_verify_modeset(dev, intel_crtc->pipe);
 
assert_vblank_disabled(crtc);
drm_crtc_vblank_on(crtc);
 
intel_crtc_enable_planes(crtc);
}
 
4083,7 → 4398,6
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
enum plane plane = intel_crtc->plane;
 
WARN_ON(!crtc->enabled);
 
4098,9 → 4412,14
 
intel_set_pipe_timings(intel_crtc);
 
if (intel_crtc->config.cpu_transcoder != TRANSCODER_EDP) {
I915_WRITE(PIPE_MULT(intel_crtc->config.cpu_transcoder),
intel_crtc->config.pixel_multiplier - 1);
}
 
if (intel_crtc->config.has_pch_encoder) {
intel_cpu_transcoder_set_m_n(intel_crtc,
&intel_crtc->config.fdi_m_n);
&intel_crtc->config.fdi_m_n, NULL);
}
 
haswell_set_pipeconf(crtc);
4107,27 → 4426,24
 
intel_set_pipe_csc(crtc);
 
/* Set up the display plane register */
I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE | DISPPLANE_PIPE_CSC_ENABLE);
POSTING_READ(DSPCNTR(plane));
 
dev_priv->display.update_primary_plane(crtc, crtc->primary->fb,
crtc->x, crtc->y);
 
intel_crtc->active = true;
 
intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_enable)
encoder->pre_enable(encoder);
 
if (intel_crtc->config.has_pch_encoder) {
intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
true);
dev_priv->display.fdi_link_train(crtc);
}
 
intel_ddi_enable_pipe_clock(intel_crtc);
 
if (IS_SKYLAKE(dev))
skylake_pfit_enable(intel_crtc);
else
ironlake_pfit_enable(intel_crtc);
 
/*
4153,6 → 4469,9
intel_opregion_notify_encoder(encoder, true);
}
 
assert_vblank_disabled(crtc);
drm_crtc_vblank_on(crtc);
 
/* If we change the relative order between pipe/planes enabling, we need
* to change the workaround. */
haswell_mode_set_planes_workaround(intel_crtc);
4159,6 → 4478,21
intel_crtc_enable_planes(crtc);
}
 
static void skylake_pfit_disable(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe = crtc->pipe;
 
/* To avoid upsetting the power well on haswell only disable the pfit if
* it's in use. The hw state code will make sure we get this right. */
if (crtc->config.pch_pfit.enabled) {
I915_WRITE(PS_CTL(pipe), 0);
I915_WRITE(PS_WIN_POS(pipe), 0);
I915_WRITE(PS_WIN_SZ(pipe), 0);
}
}
 
static void ironlake_pfit_disable(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
4188,13 → 4522,17
 
intel_crtc_disable_planes(crtc);
 
drm_crtc_vblank_off(crtc);
assert_vblank_disabled(crtc);
 
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->disable(encoder);
 
if (intel_crtc->config.has_pch_encoder)
intel_set_pch_fifo_underrun_reporting(dev, pipe, false);
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
 
intel_disable_pipe(dev_priv, pipe);
intel_disable_pipe(intel_crtc);
 
ironlake_pfit_disable(intel_crtc);
 
for_each_encoder_on_crtc(dev, crtc, encoder)
4205,7 → 4543,6
ironlake_fdi_disable(crtc);
 
ironlake_disable_pch_transcoder(dev_priv, pipe);
intel_set_pch_fifo_underrun_reporting(dev, pipe, true);
 
if (HAS_PCH_CPT(dev)) {
/* disable TRANS_DP_CTL */
4242,7 → 4579,6
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
 
if (!intel_crtc->active)
4250,6 → 4586,9
 
intel_crtc_disable_planes(crtc);
 
drm_crtc_vblank_off(crtc);
assert_vblank_disabled(crtc);
 
for_each_encoder_on_crtc(dev, crtc, encoder) {
intel_opregion_notify_encoder(encoder, false);
encoder->disable(encoder);
4256,8 → 4595,9
}
 
if (intel_crtc->config.has_pch_encoder)
intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, false);
intel_disable_pipe(dev_priv, pipe);
intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
false);
intel_disable_pipe(intel_crtc);
 
if (intel_crtc->config.dp_encoder_is_mst)
intel_ddi_set_vc_payload_alloc(crtc, false);
4264,6 → 4604,9
 
intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
 
if (IS_SKYLAKE(dev))
skylake_pfit_disable(intel_crtc);
else
ironlake_pfit_disable(intel_crtc);
 
intel_ddi_disable_pipe_clock(intel_crtc);
4270,7 → 4613,6
 
if (intel_crtc->config.has_pch_encoder) {
lpt_disable_pch_transcoder(dev_priv);
intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
intel_ddi_fdi_disable(crtc);
}
 
4391,20 → 4733,6
return mask;
}
 
void intel_display_set_init_power(struct drm_i915_private *dev_priv,
bool enable)
{
if (dev_priv->power_domains.init_power_on == enable)
return;
 
if (enable)
intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
else
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
 
dev_priv->power_domains.init_power_on = enable;
}
 
static void modeset_update_crtc_power_domains(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
4427,6 → 4755,9
intel_display_power_get(dev_priv, domain);
}
 
if (dev_priv->display.modeset_global_resources)
dev_priv->display.modeset_global_resources(dev);
 
for_each_intel_crtc(dev, crtc) {
enum intel_display_power_domain domain;
 
4458,7 → 4789,7
struct drm_i915_private *dev_priv = dev->dev_private;
 
dev_priv->vlv_cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz",
DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n",
dev_priv->vlv_cdclk_freq);
 
/*
4466,7 → 4797,7
* BSpec erroneously claims we should aim for 4MHz, but
* in fact 1MHz is the correct frequency.
*/
I915_WRITE(GMBUSFREQ_VLV, dev_priv->vlv_cdclk_freq);
I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->vlv_cdclk_freq, 1000));
}
 
/* Adjust CDclk dividers to allow high res or save power if possible */
4497,10 → 4828,9
mutex_unlock(&dev_priv->rps.hw_lock);
 
if (cdclk == 400000) {
u32 divider, vco;
u32 divider;
 
vco = valleyview_get_vco(dev_priv);
divider = DIV_ROUND_CLOSEST(vco << 1, cdclk) - 1;
divider = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
 
mutex_lock(&dev_priv->dpio_lock);
/* adjust cdclk divider */
4535,12 → 4865,56
vlv_update_cdclk(dev);
}
 
static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 val, cmd;
 
WARN_ON(dev_priv->display.get_display_clock_speed(dev) != dev_priv->vlv_cdclk_freq);
 
switch (cdclk) {
case 400000:
cmd = 3;
break;
case 333333:
case 320000:
cmd = 2;
break;
case 266667:
cmd = 1;
break;
case 200000:
cmd = 0;
break;
default:
WARN_ON(1);
return;
}
 
mutex_lock(&dev_priv->rps.hw_lock);
val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
val &= ~DSPFREQGUAR_MASK_CHV;
val |= (cmd << DSPFREQGUAR_SHIFT_CHV);
vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV),
50)) {
DRM_ERROR("timed out waiting for CDclk change\n");
}
mutex_unlock(&dev_priv->rps.hw_lock);
 
vlv_update_cdclk(dev);
}
 
static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
int max_pixclk)
{
int vco = valleyview_get_vco(dev_priv);
int freq_320 = (vco << 1) % 320000 != 0 ? 333333 : 320000;
int freq_320 = (dev_priv->hpll_freq << 1) % 320000 != 0 ? 333333 : 320000;
 
/* FIXME: Punit isn't quite ready yet */
if (IS_CHERRYVIEW(dev_priv->dev))
return 400000;
 
/*
* Really only a few cases to deal with, as only 4 CDclks are supported:
* 200MHz
4603,21 → 4977,35
int max_pixclk = intel_mode_max_pixclk(dev_priv);
int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk);
 
if (req_cdclk != dev_priv->vlv_cdclk_freq)
if (req_cdclk != dev_priv->vlv_cdclk_freq) {
/*
* FIXME: We can end up here with all power domains off, yet
* with a CDCLK frequency other than the minimum. To account
* for this take the PIPE-A power domain, which covers the HW
* blocks needed for the following programming. This can be
* removed once it's guaranteed that we get here either with
* the minimum CDCLK set, or the required power domains
* enabled.
*/
intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
 
if (IS_CHERRYVIEW(dev))
cherryview_set_cdclk(dev, req_cdclk);
else
valleyview_set_cdclk(dev, req_cdclk);
modeset_update_crtc_power_domains(dev);
 
intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
}
}
 
static void valleyview_crtc_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
bool is_dsi;
u32 dspcntr;
 
WARN_ON(!crtc->enabled);
 
4624,38 → 5012,32
if (intel_crtc->active)
return;
 
is_dsi = intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI);
is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI);
 
if (!is_dsi && !IS_CHERRYVIEW(dev))
vlv_prepare_pll(intel_crtc);
if (!is_dsi) {
if (IS_CHERRYVIEW(dev))
chv_prepare_pll(intel_crtc, &intel_crtc->config);
else
vlv_prepare_pll(intel_crtc, &intel_crtc->config);
}
 
/* Set up the display plane register */
dspcntr = DISPPLANE_GAMMA_ENABLE;
 
if (intel_crtc->config.has_dp_encoder)
intel_dp_set_m_n(intel_crtc);
 
intel_set_pipe_timings(intel_crtc);
 
/* pipesrc and dspsize control the size that is scaled from,
* which should always be the user's requested size.
*/
I915_WRITE(DSPSIZE(plane),
((intel_crtc->config.pipe_src_h - 1) << 16) |
(intel_crtc->config.pipe_src_w - 1));
I915_WRITE(DSPPOS(plane), 0);
if (IS_CHERRYVIEW(dev) && pipe == PIPE_B) {
struct drm_i915_private *dev_priv = dev->dev_private;
 
I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
I915_WRITE(CHV_CANVAS(pipe), 0);
}
 
i9xx_set_pipeconf(intel_crtc);
 
I915_WRITE(DSPCNTR(plane), dspcntr);
POSTING_READ(DSPCNTR(plane));
 
dev_priv->display.update_primary_plane(crtc, crtc->primary->fb,
crtc->x, crtc->y);
 
intel_crtc->active = true;
 
intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
 
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_pll_enable)
4663,9 → 5045,9
 
if (!is_dsi) {
if (IS_CHERRYVIEW(dev))
chv_enable_pll(intel_crtc);
chv_enable_pll(intel_crtc, &intel_crtc->config);
else
vlv_enable_pll(intel_crtc);
vlv_enable_pll(intel_crtc, &intel_crtc->config);
}
 
for_each_encoder_on_crtc(dev, crtc, encoder)
4682,10 → 5064,13
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->enable(encoder);
 
assert_vblank_disabled(crtc);
drm_crtc_vblank_on(crtc);
 
intel_crtc_enable_planes(crtc);
 
/* Underruns don't raise interrupts, so check manually. */
i9xx_check_fifo_underruns(dev);
i9xx_check_fifo_underruns(dev_priv);
}
 
static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
4700,12 → 5085,10
static void i9xx_crtc_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
u32 dspcntr;
 
WARN_ON(!crtc->enabled);
 
4714,39 → 5097,17
 
i9xx_set_pll_dividers(intel_crtc);
 
/* Set up the display plane register */
dspcntr = DISPPLANE_GAMMA_ENABLE;
 
if (pipe == 0)
dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
else
dspcntr |= DISPPLANE_SEL_PIPE_B;
 
if (intel_crtc->config.has_dp_encoder)
intel_dp_set_m_n(intel_crtc);
 
intel_set_pipe_timings(intel_crtc);
 
/* pipesrc and dspsize control the size that is scaled from,
* which should always be the user's requested size.
*/
I915_WRITE(DSPSIZE(plane),
((intel_crtc->config.pipe_src_h - 1) << 16) |
(intel_crtc->config.pipe_src_w - 1));
I915_WRITE(DSPPOS(plane), 0);
 
i9xx_set_pipeconf(intel_crtc);
 
I915_WRITE(DSPCNTR(plane), dspcntr);
POSTING_READ(DSPCNTR(plane));
 
dev_priv->display.update_primary_plane(crtc, crtc->primary->fb,
crtc->x, crtc->y);
 
intel_crtc->active = true;
 
if (!IS_GEN2(dev))
intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
 
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_enable)
4764,6 → 5125,9
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->enable(encoder);
 
assert_vblank_disabled(crtc);
drm_crtc_vblank_on(crtc);
 
intel_crtc_enable_planes(crtc);
 
/*
4774,10 → 5138,10
* but leave the pipe running.
*/
if (IS_GEN2(dev))
intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
 
/* Underruns don't raise interrupts, so check manually. */
i9xx_check_fifo_underruns(dev);
i9xx_check_fifo_underruns(dev_priv);
}
 
static void i9xx_pfit_disable(struct intel_crtc *crtc)
4813,7 → 5177,7
* but leave the pipe running.
*/
if (IS_GEN2(dev))
intel_set_cpu_fifo_underrun_reporting(dev, pipe, false);
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
 
/*
* Vblank time updates from the shadow to live plane control register
4827,9 → 5191,6
intel_set_memory_cxsr(dev_priv, false);
intel_crtc_disable_planes(crtc);
 
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->disable(encoder);
 
/*
* On gen2 planes are double buffered but the pipe isn't, so we must
* wait for planes to fully turn off before disabling the pipe.
4838,8 → 5199,14
*/
intel_wait_for_vblank(dev, pipe);
 
intel_disable_pipe(dev_priv, pipe);
drm_crtc_vblank_off(crtc);
assert_vblank_disabled(crtc);
 
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->disable(encoder);
 
intel_disable_pipe(intel_crtc);
 
i9xx_pfit_disable(intel_crtc);
 
for_each_encoder_on_crtc(dev, crtc, encoder)
4846,17 → 5213,17
if (encoder->post_disable)
encoder->post_disable(encoder);
 
if (!intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI)) {
if (!intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI)) {
if (IS_CHERRYVIEW(dev))
chv_disable_pll(dev_priv, pipe);
else if (IS_VALLEYVIEW(dev))
vlv_disable_pll(dev_priv, pipe);
else
i9xx_disable_pll(dev_priv, pipe);
i9xx_disable_pll(intel_crtc);
}
 
if (!IS_GEN2(dev))
intel_set_cpu_fifo_underrun_reporting(dev, pipe, false);
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
 
intel_crtc->active = false;
intel_update_watermarks(crtc);
4870,39 → 5237,6
{
}
 
static void intel_crtc_update_sarea(struct drm_crtc *crtc,
bool enabled)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_master_private *master_priv;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
 
 
#if 0
if (!dev->primary->master)
return;
 
master_priv = dev->primary->master->driver_priv;
if (!master_priv->sarea_priv)
return;
 
switch (pipe) {
case 0:
master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0;
break;
case 1:
master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0;
master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0;
break;
default:
DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe));
break;
}
#endif
}
 
/* Master function to enable/disable CRTC and corresponding power wells */
void intel_crtc_control(struct drm_crtc *crtc, bool enable)
{
4946,8 → 5280,6
enable |= intel_encoder->connectors_active;
 
intel_crtc_control(crtc, enable);
 
intel_crtc_update_sarea(crtc, enable);
}
 
static void intel_crtc_disable(struct drm_crtc *crtc)
4962,7 → 5294,6
WARN_ON(!crtc->enabled);
 
dev_priv->display.crtc_disable(crtc);
intel_crtc_update_sarea(crtc, false);
dev_priv->display.off(crtc);
 
if (crtc->primary->fb) {
5201,11 → 5532,11
struct intel_crtc_config *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
 
/* FIXME should check pixel clock limits on all platforms */
if (INTEL_INFO(dev)->gen < 4) {
struct drm_i915_private *dev_priv = dev->dev_private;
int clock_limit =
dev_priv->display.get_display_clock_speed(dev);
 
5232,7 → 5563,7
* - LVDS dual channel mode
* - Double wide pipe
*/
if ((intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
if ((intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
pipe_config->pipe_src_w &= ~1;
 
5254,13 → 5585,6
if (HAS_IPS(dev))
hsw_compute_ips_config(crtc, pipe_config);
 
/*
* XXX: PCH/WRPLL clock sharing is done in ->mode_set, so make sure the
* old clock survives for now.
*/
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev) || HAS_DDI(dev))
pipe_config->shared_dpll = crtc->config.shared_dpll;
 
if (pipe_config->has_pch_encoder)
return ironlake_fdi_compute_config(crtc, pipe_config);
 
5270,10 → 5594,16
static int valleyview_get_display_clock_speed(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int vco = valleyview_get_vco(dev_priv);
u32 val;
int divider;
 
/* FIXME: Punit isn't quite ready yet */
if (IS_CHERRYVIEW(dev))
return 400000;
 
if (dev_priv->hpll_freq == 0)
dev_priv->hpll_freq = valleyview_get_vco(dev_priv);
 
mutex_lock(&dev_priv->dpio_lock);
val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
mutex_unlock(&dev_priv->dpio_lock);
5284,7 → 5614,7
(divider << DISPLAY_FREQUENCY_STATUS_SHIFT),
"cdclk change in progress\n");
 
return DIV_ROUND_CLOSEST(vco << 1, divider + 1);
return DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, divider + 1);
}
 
static int i945_get_display_clock_speed(struct drm_device *dev)
5416,15 → 5746,15
&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
}
 
static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
static int i9xx_get_refclk(struct intel_crtc *crtc, int num_connectors)
{
struct drm_device *dev = crtc->dev;
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int refclk;
 
if (IS_VALLEYVIEW(dev)) {
refclk = 100000;
} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
} else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) &&
intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
refclk = dev_priv->vbt.lvds_ssc_freq;
DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
5454,24 → 5784,24
u32 fp, fp2 = 0;
 
if (IS_PINEVIEW(dev)) {
fp = pnv_dpll_compute_fp(&crtc->config.dpll);
fp = pnv_dpll_compute_fp(&crtc->new_config->dpll);
if (reduced_clock)
fp2 = pnv_dpll_compute_fp(reduced_clock);
} else {
fp = i9xx_dpll_compute_fp(&crtc->config.dpll);
fp = i9xx_dpll_compute_fp(&crtc->new_config->dpll);
if (reduced_clock)
fp2 = i9xx_dpll_compute_fp(reduced_clock);
}
 
crtc->config.dpll_hw_state.fp0 = fp;
crtc->new_config->dpll_hw_state.fp0 = fp;
 
crtc->lowfreq_avail = false;
if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) &&
reduced_clock && i915.powersave) {
crtc->config.dpll_hw_state.fp1 = fp2;
crtc->new_config->dpll_hw_state.fp1 = fp2;
crtc->lowfreq_avail = true;
} else {
crtc->config.dpll_hw_state.fp1 = fp;
crtc->new_config->dpll_hw_state.fp1 = fp;
}
}
 
5518,7 → 5848,8
}
 
static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
struct intel_link_m_n *m_n)
struct intel_link_m_n *m_n,
struct intel_link_m_n *m2_n2)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
5530,6 → 5861,18
I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
/* M2_N2 registers to be set only for gen < 8 (M2_N2 available
* for gen < 8) and if DRRS is supported (to make sure the
* registers are not unnecessarily accessed).
*/
if (m2_n2 && INTEL_INFO(dev)->gen < 8 &&
crtc->config.has_drrs) {
I915_WRITE(PIPE_DATA_M2(transcoder),
TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
}
} else {
I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
5538,15 → 5881,17
}
}
 
static void intel_dp_set_m_n(struct intel_crtc *crtc)
void intel_dp_set_m_n(struct intel_crtc *crtc)
{
if (crtc->config.has_pch_encoder)
intel_pch_transcoder_set_m_n(crtc, &crtc->config.dp_m_n);
else
intel_cpu_transcoder_set_m_n(crtc, &crtc->config.dp_m_n);
intel_cpu_transcoder_set_m_n(crtc, &crtc->config.dp_m_n,
&crtc->config.dp_m2_n2);
}
 
static void vlv_update_pll(struct intel_crtc *crtc)
static void vlv_update_pll(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
{
u32 dpll, dpll_md;
 
5561,14 → 5906,15
if (crtc->pipe == PIPE_B)
dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
dpll |= DPLL_VCO_ENABLE;
crtc->config.dpll_hw_state.dpll = dpll;
pipe_config->dpll_hw_state.dpll = dpll;
 
dpll_md = (crtc->config.pixel_multiplier - 1)
dpll_md = (pipe_config->pixel_multiplier - 1)
<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
crtc->config.dpll_hw_state.dpll_md = dpll_md;
pipe_config->dpll_hw_state.dpll_md = dpll_md;
}
 
static void vlv_prepare_pll(struct intel_crtc *crtc)
static void vlv_prepare_pll(struct intel_crtc *crtc,
const struct intel_crtc_config *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
5579,11 → 5925,11
 
mutex_lock(&dev_priv->dpio_lock);
 
bestn = crtc->config.dpll.n;
bestm1 = crtc->config.dpll.m1;
bestm2 = crtc->config.dpll.m2;
bestp1 = crtc->config.dpll.p1;
bestp2 = crtc->config.dpll.p2;
bestn = pipe_config->dpll.n;
bestm1 = pipe_config->dpll.m1;
bestm2 = pipe_config->dpll.m2;
bestp1 = pipe_config->dpll.p1;
bestp2 = pipe_config->dpll.p2;
 
/* See eDP HDMI DPIO driver vbios notes doc */
 
5620,9 → 5966,9
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
 
/* Set HBR and RBR LPF coefficients */
if (crtc->config.port_clock == 162000 ||
intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_ANALOG) ||
intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI))
if (pipe_config->port_clock == 162000 ||
intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG) ||
intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
0x009f0003);
else
5629,8 → 5975,7
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
0x00d0000f);
 
if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP) ||
intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) {
if (crtc->config.has_dp_encoder) {
/* Use SSC source */
if (pipe == PIPE_A)
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
5650,8 → 5995,8
 
coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT) ||
intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP))
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
coreclk |= 0x01000000;
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
 
5659,8 → 6004,22
mutex_unlock(&dev_priv->dpio_lock);
}
 
static void chv_update_pll(struct intel_crtc *crtc)
static void chv_update_pll(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
{
pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLOCK_CHV |
DPLL_REFA_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS |
DPLL_VCO_ENABLE;
if (crtc->pipe != PIPE_A)
pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
 
pipe_config->dpll_hw_state.dpll_md =
(pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
}
 
static void chv_prepare_pll(struct intel_crtc *crtc,
const struct intel_crtc_config *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe = crtc->pipe;
5670,27 → 6029,18
u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
int refclk;
 
crtc->config.dpll_hw_state.dpll = DPLL_SSC_REF_CLOCK_CHV |
DPLL_REFA_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS |
DPLL_VCO_ENABLE;
if (pipe != PIPE_A)
crtc->config.dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
bestn = pipe_config->dpll.n;
bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
bestm1 = pipe_config->dpll.m1;
bestm2 = pipe_config->dpll.m2 >> 22;
bestp1 = pipe_config->dpll.p1;
bestp2 = pipe_config->dpll.p2;
 
crtc->config.dpll_hw_state.dpll_md =
(crtc->config.pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
 
bestn = crtc->config.dpll.n;
bestm2_frac = crtc->config.dpll.m2 & 0x3fffff;
bestm1 = crtc->config.dpll.m1;
bestm2 = crtc->config.dpll.m2 >> 22;
bestp1 = crtc->config.dpll.p1;
bestp2 = crtc->config.dpll.p2;
 
/*
* Enable Refclk and SSC
*/
I915_WRITE(dpll_reg,
crtc->config.dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
 
mutex_lock(&dev_priv->dpio_lock);
 
5718,7 → 6068,7
(2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT));
 
/* Loop filter */
refclk = i9xx_get_refclk(&crtc->base, 0);
refclk = i9xx_get_refclk(crtc, 0);
loopfilter = 5 << DPIO_CHV_PROP_COEFF_SHIFT |
2 << DPIO_CHV_GAIN_CTRL_SHIFT;
if (refclk == 100000)
5738,6 → 6088,53
mutex_unlock(&dev_priv->dpio_lock);
}
 
/**
* vlv_force_pll_on - forcibly enable just the PLL
* @dev_priv: i915 private structure
* @pipe: pipe PLL to enable
* @dpll: PLL configuration
*
* Enable the PLL for @pipe using the supplied @dpll config. To be used
* in cases where we need the PLL enabled even when @pipe is not going to
* be enabled.
*/
void vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
const struct dpll *dpll)
{
struct intel_crtc *crtc =
to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
struct intel_crtc_config pipe_config = {
.pixel_multiplier = 1,
.dpll = *dpll,
};
 
if (IS_CHERRYVIEW(dev)) {
chv_update_pll(crtc, &pipe_config);
chv_prepare_pll(crtc, &pipe_config);
chv_enable_pll(crtc, &pipe_config);
} else {
vlv_update_pll(crtc, &pipe_config);
vlv_prepare_pll(crtc, &pipe_config);
vlv_enable_pll(crtc, &pipe_config);
}
}
 
/**
* vlv_force_pll_off - forcibly disable just the PLL
* @dev_priv: i915 private structure
* @pipe: pipe PLL to disable
*
* Disable the PLL for @pipe. To be used in cases where we need
* the PLL enabled even when @pipe is not going to be enabled.
*/
void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe)
{
if (IS_CHERRYVIEW(dev))
chv_disable_pll(to_i915(dev), pipe);
else
vlv_disable_pll(to_i915(dev), pipe);
}
 
static void i9xx_update_pll(struct intel_crtc *crtc,
intel_clock_t *reduced_clock,
int num_connectors)
5746,22 → 6143,22
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpll;
bool is_sdvo;
struct dpll *clock = &crtc->config.dpll;
struct dpll *clock = &crtc->new_config->dpll;
 
i9xx_update_pll_dividers(crtc, reduced_clock);
 
is_sdvo = intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_SDVO) ||
intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI);
is_sdvo = intel_pipe_will_have_type(crtc, INTEL_OUTPUT_SDVO) ||
intel_pipe_will_have_type(crtc, INTEL_OUTPUT_HDMI);
 
dpll = DPLL_VGA_MODE_DIS;
 
if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS))
if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS))
dpll |= DPLLB_MODE_LVDS;
else
dpll |= DPLLB_MODE_DAC_SERIAL;
 
if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
dpll |= (crtc->config.pixel_multiplier - 1)
dpll |= (crtc->new_config->pixel_multiplier - 1)
<< SDVO_MULTIPLIER_SHIFT_HIRES;
}
 
5768,7 → 6165,7
if (is_sdvo)
dpll |= DPLL_SDVO_HIGH_SPEED;
 
if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT))
if (crtc->new_config->has_dp_encoder)
dpll |= DPLL_SDVO_HIGH_SPEED;
 
/* compute bitmask from p1 value */
5796,9 → 6193,9
if (INTEL_INFO(dev)->gen >= 4)
dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
 
if (crtc->config.sdvo_tv_clock)
if (crtc->new_config->sdvo_tv_clock)
dpll |= PLL_REF_INPUT_TVCLKINBC;
else if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) &&
intel_panel_use_ssc(dev_priv) && num_connectors < 2)
dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
else
5805,12 → 6202,12
dpll |= PLL_REF_INPUT_DREFCLK;
 
dpll |= DPLL_VCO_ENABLE;
crtc->config.dpll_hw_state.dpll = dpll;
crtc->new_config->dpll_hw_state.dpll = dpll;
 
if (INTEL_INFO(dev)->gen >= 4) {
u32 dpll_md = (crtc->config.pixel_multiplier - 1)
u32 dpll_md = (crtc->new_config->pixel_multiplier - 1)
<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
crtc->config.dpll_hw_state.dpll_md = dpll_md;
crtc->new_config->dpll_hw_state.dpll_md = dpll_md;
}
}
 
5821,13 → 6218,13
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpll;
struct dpll *clock = &crtc->config.dpll;
struct dpll *clock = &crtc->new_config->dpll;
 
i9xx_update_pll_dividers(crtc, reduced_clock);
 
dpll = DPLL_VGA_MODE_DIS;
 
if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS)) {
if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
} else {
if (clock->p1 == 2)
5838,10 → 6235,10
dpll |= PLL_P2_DIVIDE_BY_4;
}
 
if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO))
if (!IS_I830(dev) && intel_pipe_will_have_type(crtc, INTEL_OUTPUT_DVO))
dpll |= DPLL_DVO_2X_MODE;
 
if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) &&
intel_panel_use_ssc(dev_priv) && num_connectors < 2)
dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
else
5848,7 → 6245,7
dpll |= PLL_REF_INPUT_DREFCLK;
 
dpll |= DPLL_VCO_ENABLE;
crtc->config.dpll_hw_state.dpll = dpll;
crtc->new_config->dpll_hw_state.dpll = dpll;
}
 
static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
5872,7 → 6269,7
crtc_vtotal -= 1;
crtc_vblank_end -= 1;
 
if (intel_pipe_has_type(&intel_crtc->base, INTEL_OUTPUT_SDVO))
if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
else
vsyncshift = adjusted_mode->crtc_hsync_start -
5989,9 → 6386,9
 
pipeconf = 0;
 
if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE)
pipeconf |= PIPECONF_ENABLE;
if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
(intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
pipeconf |= I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE;
 
if (intel_crtc->config.double_wide)
pipeconf |= PIPECONF_DOUBLE_WIDE;
6030,7 → 6427,7
 
if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
if (INTEL_INFO(dev)->gen < 4 ||
intel_pipe_has_type(&intel_crtc->base, INTEL_OUTPUT_SDVO))
intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
else
pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
6044,13 → 6441,10
POSTING_READ(PIPECONF(intel_crtc->pipe));
}
 
static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
int x, int y,
struct drm_framebuffer *fb)
static int i9xx_crtc_compute_clock(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int refclk, num_connectors = 0;
intel_clock_t clock, reduced_clock;
bool ok, has_reduced_clock = false;
6058,7 → 6452,10
struct intel_encoder *encoder;
const intel_limit_t *limit;
 
for_each_encoder_on_crtc(dev, crtc, encoder) {
for_each_intel_encoder(dev, encoder) {
if (encoder->new_crtc != crtc)
continue;
 
switch (encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
6066,6 → 6463,8
case INTEL_OUTPUT_DSI:
is_dsi = true;
break;
default:
break;
}
 
num_connectors++;
6074,7 → 6473,7
if (is_dsi)
return 0;
 
if (!intel_crtc->config.clock_set) {
if (!crtc->new_config->clock_set) {
refclk = i9xx_get_refclk(crtc, num_connectors);
 
/*
6085,7 → 6484,7
*/
limit = intel_limit(crtc, refclk);
ok = dev_priv->display.find_dpll(limit, crtc,
intel_crtc->config.port_clock,
crtc->new_config->port_clock,
refclk, NULL, &clock);
if (!ok) {
DRM_ERROR("Couldn't find PLL settings for mode!\n");
6106,23 → 6505,23
&reduced_clock);
}
/* Compat-code for transition, will disappear. */
intel_crtc->config.dpll.n = clock.n;
intel_crtc->config.dpll.m1 = clock.m1;
intel_crtc->config.dpll.m2 = clock.m2;
intel_crtc->config.dpll.p1 = clock.p1;
intel_crtc->config.dpll.p2 = clock.p2;
crtc->new_config->dpll.n = clock.n;
crtc->new_config->dpll.m1 = clock.m1;
crtc->new_config->dpll.m2 = clock.m2;
crtc->new_config->dpll.p1 = clock.p1;
crtc->new_config->dpll.p2 = clock.p2;
}
 
if (IS_GEN2(dev)) {
i8xx_update_pll(intel_crtc,
i8xx_update_pll(crtc,
has_reduced_clock ? &reduced_clock : NULL,
num_connectors);
} else if (IS_CHERRYVIEW(dev)) {
chv_update_pll(intel_crtc);
chv_update_pll(crtc, crtc->new_config);
} else if (IS_VALLEYVIEW(dev)) {
vlv_update_pll(intel_crtc);
vlv_update_pll(crtc, crtc->new_config);
} else {
i9xx_update_pll(intel_crtc,
i9xx_update_pll(crtc,
has_reduced_clock ? &reduced_clock : NULL,
num_connectors);
}
6234,7 → 6633,7
crtc->base.primary->fb->height = ((val >> 0) & 0xfff) + 1;
 
val = I915_READ(DSPSTRIDE(pipe));
crtc->base.primary->fb->pitches[0] = val & 0xffffff80;
crtc->base.primary->fb->pitches[0] = val & 0xffffffc0;
 
aligned_height = intel_align_height(dev, crtc->base.primary->fb->height,
plane_config->tiled);
6288,7 → 6687,7
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t tmp;
 
if (!intel_display_power_enabled(dev_priv,
if (!intel_display_power_is_enabled(dev_priv,
POWER_DOMAIN_PIPE(crtc->pipe)))
return false;
 
6344,6 → 6743,14
}
pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
if (!IS_VALLEYVIEW(dev)) {
/*
* DPLL_DVO_2X_MODE must be enabled for both DPLLs
* on 830. Filter it out here so that we don't
* report errors due to that.
*/
if (IS_I830(dev))
pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE;
 
pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
} else {
6366,7 → 6773,6
static void ironlake_init_pch_refclk(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *encoder;
u32 val, final;
bool has_lvds = false;
6376,8 → 6782,7
bool can_ssc = false;
 
/* We need to take the global config into account */
list_for_each_entry(encoder, &mode_config->encoder_list,
base.head) {
for_each_intel_encoder(dev, encoder) {
switch (encoder->type) {
case INTEL_OUTPUT_LVDS:
has_panel = true;
6388,6 → 6793,8
if (enc_to_dig_port(&encoder->base)->port == PORT_A)
has_cpu_edp = true;
break;
default:
break;
}
}
 
6684,15 → 7091,16
 
static void lpt_init_pch_refclk(struct drm_device *dev)
{
struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *encoder;
bool has_vga = false;
 
list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
for_each_intel_encoder(dev, encoder) {
switch (encoder->type) {
case INTEL_OUTPUT_ANALOG:
has_vga = true;
break;
default:
break;
}
}
 
6721,11 → 7129,16
int num_connectors = 0;
bool is_lvds = false;
 
for_each_encoder_on_crtc(dev, crtc, encoder) {
for_each_intel_encoder(dev, encoder) {
if (encoder->new_crtc != to_intel_crtc(crtc))
continue;
 
switch (encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
break;
default:
break;
}
num_connectors++;
}
6870,7 → 7283,7
I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT);
POSTING_READ(GAMMA_MODE(intel_crtc->pipe));
 
if (IS_BROADWELL(dev)) {
if (IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
val = 0;
 
switch (intel_crtc->config.pipe_bpp) {
6905,18 → 7318,12
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *intel_encoder;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int refclk;
const intel_limit_t *limit;
bool ret, is_lvds = false;
 
for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
switch (intel_encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
break;
}
}
is_lvds = intel_pipe_will_have_type(intel_crtc, INTEL_OUTPUT_LVDS);
 
refclk = ironlake_get_refclk(crtc);
 
6925,9 → 7332,9
* refclk, or FALSE. The returned values represent the clock equation:
* reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
*/
limit = intel_limit(crtc, refclk);
ret = dev_priv->display.find_dpll(limit, crtc,
to_intel_crtc(crtc)->config.port_clock,
limit = intel_limit(intel_crtc, refclk);
ret = dev_priv->display.find_dpll(limit, intel_crtc,
intel_crtc->new_config->port_clock,
refclk, NULL, clock);
if (!ret)
return false;
6940,7 → 7347,7
* downclock feature.
*/
*has_reduced_clock =
dev_priv->display.find_dpll(limit, crtc,
dev_priv->display.find_dpll(limit, intel_crtc,
dev_priv->lvds_downclock,
refclk, clock,
reduced_clock);
6977,7 → 7384,10
int factor, num_connectors = 0;
bool is_lvds = false, is_sdvo = false;
 
for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
for_each_intel_encoder(dev, intel_encoder) {
if (intel_encoder->new_crtc != to_intel_crtc(crtc))
continue;
 
switch (intel_encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
6986,6 → 7396,8
case INTEL_OUTPUT_HDMI:
is_sdvo = true;
break;
default:
break;
}
 
num_connectors++;
6998,10 → 7410,10
dev_priv->vbt.lvds_ssc_freq == 100000) ||
(HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev)))
factor = 25;
} else if (intel_crtc->config.sdvo_tv_clock)
} else if (intel_crtc->new_config->sdvo_tv_clock)
factor = 20;
 
if (ironlake_needs_fb_cb_tune(&intel_crtc->config.dpll, factor))
if (ironlake_needs_fb_cb_tune(&intel_crtc->new_config->dpll, factor))
*fp |= FP_CB_TUNE;
 
if (fp2 && (reduced_clock->m < factor * reduced_clock->n))
7014,20 → 7426,20
else
dpll |= DPLLB_MODE_DAC_SERIAL;
 
dpll |= (intel_crtc->config.pixel_multiplier - 1)
dpll |= (intel_crtc->new_config->pixel_multiplier - 1)
<< PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
 
if (is_sdvo)
dpll |= DPLL_SDVO_HIGH_SPEED;
if (intel_crtc->config.has_dp_encoder)
if (intel_crtc->new_config->has_dp_encoder)
dpll |= DPLL_SDVO_HIGH_SPEED;
 
/* compute bitmask from p1 value */
dpll |= (1 << (intel_crtc->config.dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
dpll |= (1 << (intel_crtc->new_config->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
/* also FPA1 */
dpll |= (1 << (intel_crtc->config.dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
dpll |= (1 << (intel_crtc->new_config->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
 
switch (intel_crtc->config.dpll.p2) {
switch (intel_crtc->new_config->dpll.p2) {
case 5:
dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
break;
7050,78 → 7462,64
return dpll | DPLL_VCO_ENABLE;
}
 
static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
int x, int y,
struct drm_framebuffer *fb)
static int ironlake_crtc_compute_clock(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int num_connectors = 0;
struct drm_device *dev = crtc->base.dev;
intel_clock_t clock, reduced_clock;
u32 dpll = 0, fp = 0, fp2 = 0;
bool ok, has_reduced_clock = false;
bool is_lvds = false;
struct intel_encoder *encoder;
struct intel_shared_dpll *pll;
 
for_each_encoder_on_crtc(dev, crtc, encoder) {
switch (encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
break;
}
is_lvds = intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS);
 
num_connectors++;
}
 
WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
"Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
 
ok = ironlake_compute_clocks(crtc, &clock,
ok = ironlake_compute_clocks(&crtc->base, &clock,
&has_reduced_clock, &reduced_clock);
if (!ok && !intel_crtc->config.clock_set) {
if (!ok && !crtc->new_config->clock_set) {
DRM_ERROR("Couldn't find PLL settings for mode!\n");
return -EINVAL;
}
/* Compat-code for transition, will disappear. */
if (!intel_crtc->config.clock_set) {
intel_crtc->config.dpll.n = clock.n;
intel_crtc->config.dpll.m1 = clock.m1;
intel_crtc->config.dpll.m2 = clock.m2;
intel_crtc->config.dpll.p1 = clock.p1;
intel_crtc->config.dpll.p2 = clock.p2;
if (!crtc->new_config->clock_set) {
crtc->new_config->dpll.n = clock.n;
crtc->new_config->dpll.m1 = clock.m1;
crtc->new_config->dpll.m2 = clock.m2;
crtc->new_config->dpll.p1 = clock.p1;
crtc->new_config->dpll.p2 = clock.p2;
}
 
/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
if (intel_crtc->config.has_pch_encoder) {
fp = i9xx_dpll_compute_fp(&intel_crtc->config.dpll);
if (crtc->new_config->has_pch_encoder) {
fp = i9xx_dpll_compute_fp(&crtc->new_config->dpll);
if (has_reduced_clock)
fp2 = i9xx_dpll_compute_fp(&reduced_clock);
 
dpll = ironlake_compute_dpll(intel_crtc,
dpll = ironlake_compute_dpll(crtc,
&fp, &reduced_clock,
has_reduced_clock ? &fp2 : NULL);
 
intel_crtc->config.dpll_hw_state.dpll = dpll;
intel_crtc->config.dpll_hw_state.fp0 = fp;
crtc->new_config->dpll_hw_state.dpll = dpll;
crtc->new_config->dpll_hw_state.fp0 = fp;
if (has_reduced_clock)
intel_crtc->config.dpll_hw_state.fp1 = fp2;
crtc->new_config->dpll_hw_state.fp1 = fp2;
else
intel_crtc->config.dpll_hw_state.fp1 = fp;
crtc->new_config->dpll_hw_state.fp1 = fp;
 
pll = intel_get_shared_dpll(intel_crtc);
pll = intel_get_shared_dpll(crtc);
if (pll == NULL) {
DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
pipe_name(intel_crtc->pipe));
pipe_name(crtc->pipe));
return -EINVAL;
}
} else
intel_put_shared_dpll(intel_crtc);
}
 
if (is_lvds && has_reduced_clock && i915.powersave)
intel_crtc->lowfreq_avail = true;
crtc->lowfreq_avail = true;
else
intel_crtc->lowfreq_avail = false;
crtc->lowfreq_avail = false;
 
return 0;
}
7144,7 → 7542,8
 
static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
enum transcoder transcoder,
struct intel_link_m_n *m_n)
struct intel_link_m_n *m_n,
struct intel_link_m_n *m2_n2)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
7158,6 → 7557,20
m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
& TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
/* Read M2_N2 registers only for gen < 8 (M2_N2 available for
* gen < 8) and if DRRS is supported (to make sure the
* registers are not unnecessarily read).
*/
if (m2_n2 && INTEL_INFO(dev)->gen < 8 &&
crtc->config.has_drrs) {
m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder));
m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder))
& ~TU_SIZE_MASK;
m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder));
m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
& TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
}
} else {
m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
7176,7 → 7589,8
intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
else
intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
&pipe_config->dp_m_n);
&pipe_config->dp_m_n,
&pipe_config->dp_m2_n2);
}
 
static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
7183,9 → 7597,25
struct intel_crtc_config *pipe_config)
{
intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
&pipe_config->fdi_m_n);
&pipe_config->fdi_m_n, NULL);
}
 
static void skylake_get_pfit_config(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t tmp;
 
tmp = I915_READ(PS_CTL(crtc->pipe));
 
if (tmp & PS_ENABLE) {
pipe_config->pch_pfit.enabled = true;
pipe_config->pch_pfit.pos = I915_READ(PS_WIN_POS(crtc->pipe));
pipe_config->pch_pfit.size = I915_READ(PS_WIN_SZ(crtc->pipe));
}
}
 
static void ironlake_get_pfit_config(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
{
7254,12 → 7684,13
crtc->base.primary->fb->height = ((val >> 0) & 0xfff) + 1;
 
val = I915_READ(DSPSTRIDE(pipe));
crtc->base.primary->fb->pitches[0] = val & 0xffffff80;
crtc->base.primary->fb->pitches[0] = val & 0xffffffc0;
 
aligned_height = intel_align_height(dev, crtc->base.primary->fb->height,
plane_config->tiled);
 
plane_config->size = 16*1024*1024;
plane_config->size = PAGE_ALIGN(crtc->base.primary->fb->pitches[0] *
aligned_height);
 
DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
pipe, plane, crtc->base.primary->fb->width,
7276,7 → 7707,7
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t tmp;
 
if (!intel_display_power_enabled(dev_priv,
if (!intel_display_power_is_enabled(dev_priv,
POWER_DOMAIN_PIPE(crtc->pipe)))
return false;
 
7470,7 → 7901,6
static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
{
uint32_t val;
unsigned long irqflags;
 
val = I915_READ(LCPLL_CTL);
 
7490,10 → 7920,10
* to call special forcewake code that doesn't touch runtime PM and
* doesn't enable the forcewake delayed work.
*/
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
spin_lock_irq(&dev_priv->uncore.lock);
if (dev_priv->uncore.forcewake_count++ == 0)
dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
spin_unlock_irq(&dev_priv->uncore.lock);
 
if (val & LCPLL_POWER_DOWN_ALLOW) {
val &= ~LCPLL_POWER_DOWN_ALLOW;
7524,10 → 7954,10
}
 
/* See the big comment above. */
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
spin_lock_irq(&dev_priv->uncore.lock);
if (--dev_priv->uncore.forcewake_count == 0)
dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
spin_unlock_irq(&dev_priv->uncore.lock);
}
 
/*
7589,29 → 8019,53
intel_prepare_ddi(dev);
}
 
static void snb_modeset_global_resources(struct drm_device *dev)
static int haswell_crtc_compute_clock(struct intel_crtc *crtc)
{
modeset_update_crtc_power_domains(dev);
if (!intel_ddi_pll_select(crtc))
return -EINVAL;
 
crtc->lowfreq_avail = false;
 
return 0;
}
 
static void haswell_modeset_global_resources(struct drm_device *dev)
static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
enum port port,
struct intel_crtc_config *pipe_config)
{
modeset_update_crtc_power_domains(dev);
u32 temp;
 
temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
pipe_config->ddi_pll_sel = temp >> (port * 3 + 1);
 
switch (pipe_config->ddi_pll_sel) {
case SKL_DPLL1:
pipe_config->shared_dpll = DPLL_ID_SKL_DPLL1;
break;
case SKL_DPLL2:
pipe_config->shared_dpll = DPLL_ID_SKL_DPLL2;
break;
case SKL_DPLL3:
pipe_config->shared_dpll = DPLL_ID_SKL_DPLL3;
break;
}
}
 
static int haswell_crtc_mode_set(struct drm_crtc *crtc,
int x, int y,
struct drm_framebuffer *fb)
static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
enum port port,
struct intel_crtc_config *pipe_config)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
 
if (!intel_ddi_pll_select(intel_crtc))
return -EINVAL;
 
intel_crtc->lowfreq_avail = false;
 
return 0;
switch (pipe_config->ddi_pll_sel) {
case PORT_CLK_SEL_WRPLL1:
pipe_config->shared_dpll = DPLL_ID_WRPLL1;
break;
case PORT_CLK_SEL_WRPLL2:
pipe_config->shared_dpll = DPLL_ID_WRPLL2;
break;
}
}
 
static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
7626,17 → 8080,11
 
port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
 
pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
if (IS_SKYLAKE(dev))
skylake_get_ddi_pll(dev_priv, port, pipe_config);
else
haswell_get_ddi_pll(dev_priv, port, pipe_config);
 
switch (pipe_config->ddi_pll_sel) {
case PORT_CLK_SEL_WRPLL1:
pipe_config->shared_dpll = DPLL_ID_WRPLL1;
break;
case PORT_CLK_SEL_WRPLL2:
pipe_config->shared_dpll = DPLL_ID_WRPLL2;
break;
}
 
if (pipe_config->shared_dpll >= 0) {
pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
 
7649,7 → 8097,8
* DDI E. So just check whether this pipe is wired to DDI E and whether
* the PCH transcoder is on.
*/
if ((port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
if (INTEL_INFO(dev)->gen < 9 &&
(port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
pipe_config->has_pch_encoder = true;
 
tmp = I915_READ(FDI_RX_CTL(PIPE_A));
7668,7 → 8117,7
enum intel_display_power_domain pfit_domain;
uint32_t tmp;
 
if (!intel_display_power_enabled(dev_priv,
if (!intel_display_power_is_enabled(dev_priv,
POWER_DOMAIN_PIPE(crtc->pipe)))
return false;
 
7697,7 → 8146,7
pipe_config->cpu_transcoder = TRANSCODER_EDP;
}
 
if (!intel_display_power_enabled(dev_priv,
if (!intel_display_power_is_enabled(dev_priv,
POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
return false;
 
7710,353 → 8159,82
intel_get_pipe_timings(crtc, pipe_config);
 
pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
if (intel_display_power_enabled(dev_priv, pfit_domain))
if (intel_display_power_is_enabled(dev_priv, pfit_domain)) {
if (IS_SKYLAKE(dev))
skylake_get_pfit_config(crtc, pipe_config);
else
ironlake_get_pfit_config(crtc, pipe_config);
}
 
if (IS_HASWELL(dev))
pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
(I915_READ(IPS_CTL) & IPS_ENABLE);
 
if (pipe_config->cpu_transcoder != TRANSCODER_EDP) {
pipe_config->pixel_multiplier =
I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
} else {
pipe_config->pixel_multiplier = 1;
 
return true;
}
 
static struct {
int clock;
u32 config;
} hdmi_audio_clock[] = {
{ DIV_ROUND_UP(25200 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 },
{ 25200, AUD_CONFIG_PIXEL_CLOCK_HDMI_25200 }, /* default per bspec */
{ 27000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27000 },
{ 27000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27027 },
{ 54000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54000 },
{ 54000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54054 },
{ DIV_ROUND_UP(74250 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_74176 },
{ 74250, AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 },
{ DIV_ROUND_UP(148500 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 },
{ 148500, AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 },
};
 
/* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */
static u32 audio_config_hdmi_pixel_clock(struct drm_display_mode *mode)
{
int i;
 
for (i = 0; i < ARRAY_SIZE(hdmi_audio_clock); i++) {
if (mode->clock == hdmi_audio_clock[i].clock)
break;
}
 
if (i == ARRAY_SIZE(hdmi_audio_clock)) {
DRM_DEBUG_KMS("HDMI audio pixel clock setting for %d not found, falling back to defaults\n", mode->clock);
i = 1;
}
 
DRM_DEBUG_KMS("Configuring HDMI audio for pixel clock %d (0x%08x)\n",
hdmi_audio_clock[i].clock,
hdmi_audio_clock[i].config);
 
return hdmi_audio_clock[i].config;
}
 
static bool intel_eld_uptodate(struct drm_connector *connector,
int reg_eldv, uint32_t bits_eldv,
int reg_elda, uint32_t bits_elda,
int reg_edid)
{
struct drm_i915_private *dev_priv = connector->dev->dev_private;
uint8_t *eld = connector->eld;
uint32_t i;
 
i = I915_READ(reg_eldv);
i &= bits_eldv;
 
if (!eld[0])
return !i;
 
if (!i)
return false;
 
i = I915_READ(reg_elda);
i &= ~bits_elda;
I915_WRITE(reg_elda, i);
 
for (i = 0; i < eld[2]; i++)
if (I915_READ(reg_edid) != *((uint32_t *)eld + i))
return false;
 
return true;
}
 
static void g4x_write_eld(struct drm_connector *connector,
struct drm_crtc *crtc,
struct drm_display_mode *mode)
static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
{
struct drm_i915_private *dev_priv = connector->dev->dev_private;
uint8_t *eld = connector->eld;
uint32_t eldv;
uint32_t len;
uint32_t i;
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t cntl = 0, size = 0;
 
i = I915_READ(G4X_AUD_VID_DID);
if (base) {
unsigned int width = intel_crtc->cursor_width;
unsigned int height = intel_crtc->cursor_height;
unsigned int stride = roundup_pow_of_two(width) * 4;
 
if (i == INTEL_AUDIO_DEVBLC || i == INTEL_AUDIO_DEVCL)
eldv = G4X_ELDV_DEVCL_DEVBLC;
else
eldv = G4X_ELDV_DEVCTG;
 
if (intel_eld_uptodate(connector,
G4X_AUD_CNTL_ST, eldv,
G4X_AUD_CNTL_ST, G4X_ELD_ADDR,
G4X_HDMIW_HDMIEDID))
return;
 
i = I915_READ(G4X_AUD_CNTL_ST);
i &= ~(eldv | G4X_ELD_ADDR);
len = (i >> 9) & 0x1f; /* ELD buffer size */
I915_WRITE(G4X_AUD_CNTL_ST, i);
 
if (!eld[0])
return;
 
len = min_t(uint8_t, eld[2], len);
DRM_DEBUG_DRIVER("ELD size %d\n", len);
for (i = 0; i < len; i++)
I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i));
 
i = I915_READ(G4X_AUD_CNTL_ST);
i |= eldv;
I915_WRITE(G4X_AUD_CNTL_ST, i);
switch (stride) {
default:
WARN_ONCE(1, "Invalid cursor width/stride, width=%u, stride=%u\n",
width, stride);
stride = 256;
/* fallthrough */
case 256:
case 512:
case 1024:
case 2048:
break;
}
 
static void haswell_write_eld(struct drm_connector *connector,
struct drm_crtc *crtc,
struct drm_display_mode *mode)
{
struct drm_i915_private *dev_priv = connector->dev->dev_private;
uint8_t *eld = connector->eld;
uint32_t eldv;
uint32_t i;
int len;
int pipe = to_intel_crtc(crtc)->pipe;
int tmp;
cntl |= CURSOR_ENABLE |
CURSOR_GAMMA_ENABLE |
CURSOR_FORMAT_ARGB |
CURSOR_STRIDE(stride);
 
int hdmiw_hdmiedid = HSW_AUD_EDID_DATA(pipe);
int aud_cntl_st = HSW_AUD_DIP_ELD_CTRL(pipe);
int aud_config = HSW_AUD_CFG(pipe);
int aud_cntrl_st2 = HSW_AUD_PIN_ELD_CP_VLD;
 
/* Audio output enable */
DRM_DEBUG_DRIVER("HDMI audio: enable codec\n");
tmp = I915_READ(aud_cntrl_st2);
tmp |= (AUDIO_OUTPUT_ENABLE_A << (pipe * 4));
I915_WRITE(aud_cntrl_st2, tmp);
POSTING_READ(aud_cntrl_st2);
 
assert_pipe_disabled(dev_priv, to_intel_crtc(crtc)->pipe);
 
/* Set ELD valid state */
tmp = I915_READ(aud_cntrl_st2);
DRM_DEBUG_DRIVER("HDMI audio: pin eld vld status=0x%08x\n", tmp);
tmp |= (AUDIO_ELD_VALID_A << (pipe * 4));
I915_WRITE(aud_cntrl_st2, tmp);
tmp = I915_READ(aud_cntrl_st2);
DRM_DEBUG_DRIVER("HDMI audio: eld vld status=0x%08x\n", tmp);
 
/* Enable HDMI mode */
tmp = I915_READ(aud_config);
DRM_DEBUG_DRIVER("HDMI audio: audio conf: 0x%08x\n", tmp);
/* clear N_programing_enable and N_value_index */
tmp &= ~(AUD_CONFIG_N_VALUE_INDEX | AUD_CONFIG_N_PROG_ENABLE);
I915_WRITE(aud_config, tmp);
 
DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
 
eldv = AUDIO_ELD_VALID_A << (pipe * 4);
 
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */
I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
} else {
I915_WRITE(aud_config, audio_config_hdmi_pixel_clock(mode));
size = (height << 12) | width;
}
 
if (intel_eld_uptodate(connector,
aud_cntrl_st2, eldv,
aud_cntl_st, IBX_ELD_ADDRESS,
hdmiw_hdmiedid))
return;
 
i = I915_READ(aud_cntrl_st2);
i &= ~eldv;
I915_WRITE(aud_cntrl_st2, i);
 
if (!eld[0])
return;
 
i = I915_READ(aud_cntl_st);
i &= ~IBX_ELD_ADDRESS;
I915_WRITE(aud_cntl_st, i);
i = (i >> 29) & DIP_PORT_SEL_MASK; /* DIP_Port_Select, 0x1 = PortB */
DRM_DEBUG_DRIVER("port num:%d\n", i);
 
len = min_t(uint8_t, eld[2], 21); /* 84 bytes of hw ELD buffer */
DRM_DEBUG_DRIVER("ELD size %d\n", len);
for (i = 0; i < len; i++)
I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
 
i = I915_READ(aud_cntrl_st2);
i |= eldv;
I915_WRITE(aud_cntrl_st2, i);
 
}
 
static void ironlake_write_eld(struct drm_connector *connector,
struct drm_crtc *crtc,
struct drm_display_mode *mode)
{
struct drm_i915_private *dev_priv = connector->dev->dev_private;
uint8_t *eld = connector->eld;
uint32_t eldv;
uint32_t i;
int len;
int hdmiw_hdmiedid;
int aud_config;
int aud_cntl_st;
int aud_cntrl_st2;
int pipe = to_intel_crtc(crtc)->pipe;
 
if (HAS_PCH_IBX(connector->dev)) {
hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe);
aud_config = IBX_AUD_CFG(pipe);
aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
} else if (IS_VALLEYVIEW(connector->dev)) {
hdmiw_hdmiedid = VLV_HDMIW_HDMIEDID(pipe);
aud_config = VLV_AUD_CFG(pipe);
aud_cntl_st = VLV_AUD_CNTL_ST(pipe);
aud_cntrl_st2 = VLV_AUD_CNTL_ST2;
} else {
hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe);
aud_config = CPT_AUD_CFG(pipe);
aud_cntl_st = CPT_AUD_CNTL_ST(pipe);
aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
}
 
DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
 
if (IS_VALLEYVIEW(connector->dev)) {
struct intel_encoder *intel_encoder;
struct intel_digital_port *intel_dig_port;
 
intel_encoder = intel_attached_encoder(connector);
intel_dig_port = enc_to_dig_port(&intel_encoder->base);
i = intel_dig_port->port;
} else {
i = I915_READ(aud_cntl_st);
i = (i >> 29) & DIP_PORT_SEL_MASK;
/* DIP_Port_Select, 0x1 = PortB */
}
 
if (!i) {
DRM_DEBUG_DRIVER("Audio directed to unknown port\n");
/* operate blindly on all ports */
eldv = IBX_ELD_VALIDB;
eldv |= IBX_ELD_VALIDB << 4;
eldv |= IBX_ELD_VALIDB << 8;
} else {
DRM_DEBUG_DRIVER("ELD on port %c\n", port_name(i));
eldv = IBX_ELD_VALIDB << ((i - 1) * 4);
}
 
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */
I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
} else {
I915_WRITE(aud_config, audio_config_hdmi_pixel_clock(mode));
}
 
if (intel_eld_uptodate(connector,
aud_cntrl_st2, eldv,
aud_cntl_st, IBX_ELD_ADDRESS,
hdmiw_hdmiedid))
return;
 
i = I915_READ(aud_cntrl_st2);
i &= ~eldv;
I915_WRITE(aud_cntrl_st2, i);
 
if (!eld[0])
return;
 
i = I915_READ(aud_cntl_st);
i &= ~IBX_ELD_ADDRESS;
I915_WRITE(aud_cntl_st, i);
 
len = min_t(uint8_t, eld[2], 21); /* 84 bytes of hw ELD buffer */
DRM_DEBUG_DRIVER("ELD size %d\n", len);
for (i = 0; i < len; i++)
I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
 
i = I915_READ(aud_cntrl_st2);
i |= eldv;
I915_WRITE(aud_cntrl_st2, i);
}
 
void intel_write_eld(struct drm_encoder *encoder,
struct drm_display_mode *mode)
{
struct drm_crtc *crtc = encoder->crtc;
struct drm_connector *connector;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
connector = drm_select_eld(encoder, mode);
if (!connector)
return;
 
DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
connector->base.id,
connector->name,
connector->encoder->base.id,
connector->encoder->name);
 
connector->eld[6] = drm_av_sync_delay(connector, mode) / 2;
 
if (dev_priv->display.write_eld)
dev_priv->display.write_eld(connector, crtc, mode);
}
 
static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t cntl;
 
if (base != intel_crtc->cursor_base) {
/* On these chipsets we can only modify the base whilst
* the cursor is disabled.
if (intel_crtc->cursor_cntl != 0 &&
(intel_crtc->cursor_base != base ||
intel_crtc->cursor_size != size ||
intel_crtc->cursor_cntl != cntl)) {
/* On these chipsets we can only modify the base/size/stride
* whilst the cursor is disabled.
*/
if (intel_crtc->cursor_cntl) {
I915_WRITE(_CURACNTR, 0);
POSTING_READ(_CURACNTR);
intel_crtc->cursor_cntl = 0;
}
 
if (intel_crtc->cursor_base != base) {
I915_WRITE(_CURABASE, base);
POSTING_READ(_CURABASE);
intel_crtc->cursor_base = base;
}
 
/* XXX width must be 64, stride 256 => 0x00 << 28 */
cntl = 0;
if (base)
cntl = (CURSOR_ENABLE |
CURSOR_GAMMA_ENABLE |
CURSOR_FORMAT_ARGB);
if (intel_crtc->cursor_size != size) {
I915_WRITE(CURSIZE, size);
intel_crtc->cursor_size = size;
}
 
if (intel_crtc->cursor_cntl != cntl) {
I915_WRITE(_CURACNTR, cntl);
POSTING_READ(_CURACNTR);
8090,47 → 8268,14
return;
}
cntl |= pipe << 28; /* Connect to correct pipe */
}
if (intel_crtc->cursor_cntl != cntl) {
I915_WRITE(CURCNTR(pipe), cntl);
POSTING_READ(CURCNTR(pipe));
intel_crtc->cursor_cntl = cntl;
}
 
/* and commit changes on next vblank */
I915_WRITE(CURBASE(pipe), base);
POSTING_READ(CURBASE(pipe));
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
cntl |= CURSOR_PIPE_CSC_ENABLE;
}
 
static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
uint32_t cntl;
if (to_intel_plane(crtc->cursor)->rotation == BIT(DRM_ROTATE_180))
cntl |= CURSOR_ROTATE_180;
 
cntl = 0;
if (base) {
cntl = MCURSOR_GAMMA_ENABLE;
switch (intel_crtc->cursor_width) {
case 64:
cntl |= CURSOR_MODE_64_ARGB_AX;
break;
case 128:
cntl |= CURSOR_MODE_128_ARGB_AX;
break;
case 256:
cntl |= CURSOR_MODE_256_ARGB_AX;
break;
default:
WARN_ON(1);
return;
}
}
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
cntl |= CURSOR_PIPE_CSC_ENABLE;
 
if (intel_crtc->cursor_cntl != cntl) {
I915_WRITE(CURCNTR(pipe), cntl);
POSTING_READ(CURCNTR(pipe));
8140,6 → 8285,8
/* and commit changes on next vblank */
I915_WRITE(CURBASE(pipe), base);
POSTING_READ(CURBASE(pipe));
 
intel_crtc->cursor_base = base;
}
 
/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
8186,28 → 8333,62
 
I915_WRITE(CURPOS(pipe), pos);
 
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev))
ivb_update_cursor(crtc, base);
else if (IS_845G(dev) || IS_I865G(dev))
/* ILK+ do this automagically */
if (HAS_GMCH_DISPLAY(dev) &&
to_intel_plane(crtc->cursor)->rotation == BIT(DRM_ROTATE_180)) {
base += (intel_crtc->cursor_height *
intel_crtc->cursor_width - 1) * 4;
}
 
if (IS_845G(dev) || IS_I865G(dev))
i845_update_cursor(crtc, base);
else
i9xx_update_cursor(crtc, base);
intel_crtc->cursor_base = base;
}
 
static bool cursor_size_ok(struct drm_device *dev,
uint32_t width, uint32_t height)
{
if (width == 0 || height == 0)
return false;
 
/*
* intel_crtc_cursor_set_obj - Set cursor to specified GEM object
*
* Note that the object's reference will be consumed if the update fails. If
* the update succeeds, the reference of the old object (if any) will be
* consumed.
* 845g/865g are special in that they are only limited by
* the width of their cursors, the height is arbitrary up to
* the precision of the register. Everything else requires
* square cursors, limited to a few power-of-two sizes.
*/
if (IS_845G(dev) || IS_I865G(dev)) {
if ((width & 63) != 0)
return false;
 
if (width > (IS_845G(dev) ? 64 : 512))
return false;
 
if (height > 1023)
return false;
} else {
switch (width | height) {
case 256:
case 128:
if (IS_GEN2(dev))
return false;
case 64:
break;
default:
return false;
}
}
 
return true;
}
 
static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc,
struct drm_i915_gem_object *obj,
uint32_t width, uint32_t height)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum pipe pipe = intel_crtc->pipe;
unsigned old_width;
8218,36 → 8399,15
if (!obj) {
DRM_DEBUG_KMS("cursor off\n");
addr = 0;
obj = NULL;
mutex_lock(&dev->struct_mutex);
goto finish;
}
 
/* Check for which cursor types we support */
if (!((width == 64 && height == 64) ||
(width == 128 && height == 128 && !IS_GEN2(dev)) ||
(width == 256 && height == 256 && !IS_GEN2(dev)))) {
DRM_DEBUG("Cursor dimension not supported\n");
return -EINVAL;
}
 
if (obj->base.size < width * height * 4) {
DRM_DEBUG_KMS("buffer is too small\n");
ret = -ENOMEM;
goto fail;
}
 
/* we only need to pin inside GTT if cursor is non-phy */
mutex_lock(&dev->struct_mutex);
if (!INTEL_INFO(dev)->cursor_needs_physical) {
unsigned alignment;
 
if (obj->tiling_mode) {
DRM_DEBUG_KMS("cursor cannot be tiled\n");
ret = -EINVAL;
goto fail_locked;
}
 
/*
* Global gtt pte registers are special registers which actually
* forward writes to a chunk of system memory. Which means that
8285,17 → 8445,14
intel_runtime_pm_put(dev_priv);
} else {
int align = IS_I830(dev) ? 16 * 1024 : 256;
// ret = i915_gem_object_attach_phys(obj, align);
// if (ret) {
// DRM_DEBUG_KMS("failed to attach phys object\n");
// goto fail_locked;
// }
// addr = obj->phys_handle->busaddr;
ret = 1;//i915_gem_object_attach_phys(obj, align);
if (ret) {
DRM_DEBUG_KMS("failed to attach phys object\n");
goto fail_locked;
}
addr = obj->phys_handle->busaddr;
}
 
if (IS_GEN2(dev))
I915_WRITE(CURSIZE, (height << 12) | width);
 
finish:
if (intel_crtc->cursor_bo) {
if (!INTEL_INFO(dev)->cursor_needs_physical)
8317,6 → 8474,8
if (old_width != width)
intel_update_watermarks(crtc);
intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
 
intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_CURSOR(pipe));
}
 
return 0;
8324,8 → 8483,6
i915_gem_object_unpin_from_display_plane(obj);
fail_locked:
mutex_unlock(&dev->struct_mutex);
fail:
drm_gem_object_unreference_unlocked(&obj->base);
return ret;
}
 
8360,7 → 8517,7
 
intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
if (!intel_fb) {
drm_gem_object_unreference_unlocked(&obj->base);
drm_gem_object_unreference(&obj->base);
return ERR_PTR(-ENOMEM);
}
 
8370,7 → 8527,7
 
return &intel_fb->base;
err:
drm_gem_object_unreference_unlocked(&obj->base);
drm_gem_object_unreference(&obj->base);
kfree(intel_fb);
 
return ERR_PTR(ret);
8503,6 → 8660,9
ret = drm_modeset_lock(&crtc->mutex, ctx);
if (ret)
goto fail_unlock;
ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
if (ret)
goto fail_unlock;
 
old->dpms_mode = connector->dpms;
old->load_detect_temp = false;
8540,6 → 8700,9
ret = drm_modeset_lock(&crtc->mutex, ctx);
if (ret)
goto fail_unlock;
ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
if (ret)
goto fail_unlock;
intel_encoder->new_crtc = to_intel_crtc(crtc);
to_intel_connector(connector)->new_encoder = intel_encoder;
 
8822,35 → 8985,6
return mode;
}
 
static void intel_increase_pllclock(struct drm_device *dev,
enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int dpll_reg = DPLL(pipe);
int dpll;
 
if (!HAS_GMCH_DISPLAY(dev))
return;
 
if (!dev_priv->lvds_downclock_avail)
return;
 
dpll = I915_READ(dpll_reg);
if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
DRM_DEBUG_DRIVER("upclocking LVDS\n");
 
assert_panel_unlocked(dev_priv, pipe);
 
dpll &= ~DISPLAY_RATE_SELECT_FPA1;
I915_WRITE(dpll_reg, dpll);
intel_wait_for_vblank(dev, pipe);
 
dpll = I915_READ(dpll_reg);
if (dpll & DISPLAY_RATE_SELECT_FPA1)
DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
}
}
 
static void intel_decrease_pllclock(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
8926,107 → 9060,16
intel_runtime_pm_put(dev_priv);
}
 
 
/**
* intel_mark_fb_busy - mark given planes as busy
* @dev: DRM device
* @frontbuffer_bits: bits for the affected planes
* @ring: optional ring for asynchronous commands
*
* This function gets called every time the screen contents change. It can be
* used to keep e.g. the update rate at the nominal refresh rate with DRRS.
*/
static void intel_mark_fb_busy(struct drm_device *dev,
unsigned frontbuffer_bits,
struct intel_engine_cs *ring)
{
enum pipe pipe;
 
if (!i915.powersave)
return;
 
for_each_pipe(pipe) {
if (!(frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)))
continue;
 
intel_increase_pllclock(dev, pipe);
if (ring && intel_fbc_enabled(dev))
ring->fbc_dirty = true;
}
}
 
/**
* intel_fb_obj_invalidate - invalidate frontbuffer object
* @obj: GEM object to invalidate
* @ring: set for asynchronous rendering
*
* This function gets called every time rendering on the given object starts and
* frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
* be invalidated. If @ring is non-NULL any subsequent invalidation will be delayed
* until the rendering completes or a flip on this frontbuffer plane is
* scheduled.
*/
void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
struct intel_engine_cs *ring)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
if (!obj->frontbuffer_bits)
return;
 
if (ring) {
mutex_lock(&dev_priv->fb_tracking.lock);
dev_priv->fb_tracking.busy_bits
|= obj->frontbuffer_bits;
dev_priv->fb_tracking.flip_bits
&= ~obj->frontbuffer_bits;
mutex_unlock(&dev_priv->fb_tracking.lock);
}
 
intel_mark_fb_busy(dev, obj->frontbuffer_bits, ring);
 
intel_edp_psr_invalidate(dev, obj->frontbuffer_bits);
}
 
/**
* intel_frontbuffer_flush - flush frontbuffer
* @dev: DRM device
* @frontbuffer_bits: frontbuffer plane tracking bits
*
* This function gets called every time rendering on the given planes has
* completed and frontbuffer caching can be started again. Flushes will get
* delayed if they're blocked by some oustanding asynchronous rendering.
*
* Can be called without any locks held.
*/
void intel_frontbuffer_flush(struct drm_device *dev,
unsigned frontbuffer_bits)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
/* Delay flushing when rings are still busy.*/
mutex_lock(&dev_priv->fb_tracking.lock);
frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits;
mutex_unlock(&dev_priv->fb_tracking.lock);
 
intel_mark_fb_busy(dev, frontbuffer_bits, NULL);
 
intel_edp_psr_flush(dev, frontbuffer_bits);
}
static void intel_crtc_destroy(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct intel_unpin_work *work;
unsigned long flags;
 
spin_lock_irqsave(&dev->event_lock, flags);
spin_lock_irq(&dev->event_lock);
work = intel_crtc->unpin_work;
intel_crtc->unpin_work = NULL;
spin_unlock_irqrestore(&dev->event_lock, flags);
spin_unlock_irq(&dev->event_lock);
 
if (work) {
cancel_work_sync(&work->work);
9054,6 → 9097,8
intel_update_fbc(dev);
mutex_unlock(&dev->struct_mutex);
 
intel_frontbuffer_flip_complete(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
 
BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0);
atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count);
 
9063,7 → 9108,6
static void do_intel_finish_page_flip(struct drm_device *dev,
struct drm_crtc *crtc)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_unpin_work *work;
unsigned long flags;
9072,6 → 9116,10
if (intel_crtc == NULL)
return;
 
/*
* This is called both by irq handlers and the reset code (to complete
* lost pageflips) so needs the full irqsave spinlocks.
*/
spin_lock_irqsave(&dev->event_lock, flags);
work = intel_crtc->unpin_work;
 
9083,23 → 9131,9
return;
}
 
/* and that the unpin work is consistent wrt ->pending. */
smp_rmb();
page_flip_completed(intel_crtc);
 
intel_crtc->unpin_work = NULL;
 
if (work->event)
drm_send_vblank_event(dev, intel_crtc->pipe, work->event);
 
drm_crtc_vblank_put(crtc);
 
spin_unlock_irqrestore(&dev->event_lock, flags);
 
wake_up_all(&dev_priv->pending_flip_queue);
 
queue_work(dev_priv->wq, &work->work);
 
trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
}
 
void intel_finish_page_flip(struct drm_device *dev, int pipe)
9129,6 → 9163,10
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (i915_reset_in_progress(&dev_priv->gpu_error) ||
crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
return true;
 
/*
* The relevant registers doen't exist on pre-ctg.
* As the flip done interrupt doesn't trigger for mmio
9167,7 → 9205,12
to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
unsigned long flags;
 
/* NB: An MMIO update of the plane base pointer will also
 
/*
* This is called both by irq handlers and the reset code (to complete
* lost pageflips) so needs the full irqsave spinlocks.
*
* NB: An MMIO update of the plane base pointer will also
* generate a page-flip completion irq, i.e. every modeset
* is also accompanied by a spurious intel_prepare_page_flip().
*/
9446,7 → 9489,6
enum pipe pipe = intel_crtc->pipe;
struct intel_unpin_work *work;
struct intel_engine_cs *ring;
unsigned long flags;
int ret;
 
/*
9470,6 → 9512,9
fb->pitches[0] != crtc->primary->fb->pitches[0]))
return -EINVAL;
 
if (i915_terminally_wedged(&dev_priv->gpu_error))
goto out_hang;
 
work = kzalloc(sizeof(*work), GFP_KERNEL);
if (work == NULL)
return -ENOMEM;
9484,17 → 9529,25
goto free_work;
 
/* We borrow the event spin lock for protecting unpin_work */
spin_lock_irqsave(&dev->event_lock, flags);
spin_lock_irq(&dev->event_lock);
if (intel_crtc->unpin_work) {
spin_unlock_irqrestore(&dev->event_lock, flags);
/* Before declaring the flip queue wedged, check if
* the hardware completed the operation behind our backs.
*/
if (__intel_pageflip_stall_check(dev, crtc)) {
DRM_DEBUG_DRIVER("flip queue: previous flip completed, continuing\n");
page_flip_completed(intel_crtc);
} else {
DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
spin_unlock_irq(&dev->event_lock);
 
drm_crtc_vblank_put(crtc);
kfree(work);
drm_crtc_vblank_put(crtc);
 
DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
return -EBUSY;
}
}
intel_crtc->unpin_work = work;
spin_unlock_irqrestore(&dev->event_lock, flags);
spin_unlock_irq(&dev->event_lock);
 
if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
flush_workqueue(dev_priv->wq);
9511,8 → 9564,6
 
work->pending_flip_obj = obj;
 
work->enable_stall_check = true;
 
atomic_inc(&intel_crtc->unpin_work_count);
intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
 
9534,7 → 9585,7
ring = &dev_priv->ring[RCS];
}
 
ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
ret = intel_pin_and_fence_fb_obj(crtc->primary, fb, ring);
if (ret)
goto cleanup_pending;
 
9541,15 → 9592,27
work->gtt_offset =
i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset;
 
if (use_mmio_flip(ring, obj))
if (use_mmio_flip(ring, obj)) {
ret = intel_queue_mmio_flip(dev, crtc, fb, obj, ring,
page_flip_flags);
else
if (ret)
goto cleanup_unpin;
 
work->flip_queued_seqno = obj->last_write_seqno;
work->flip_queued_ring = obj->ring;
} else {
ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring,
page_flip_flags);
if (ret)
goto cleanup_unpin;
 
work->flip_queued_seqno = intel_ring_get_seqno(ring);
work->flip_queued_ring = ring;
}
 
work->flip_queued_vblank = drm_vblank_count(dev, intel_crtc->pipe);
work->enable_stall_check = true;
 
i915_gem_track_fb(work->old_fb_obj, obj,
INTEL_FRONTBUFFER_PRIMARY(pipe));
 
9571,9 → 9634,9
mutex_unlock(&dev->struct_mutex);
 
cleanup:
spin_lock_irqsave(&dev->event_lock, flags);
spin_lock_irq(&dev->event_lock);
intel_crtc->unpin_work = NULL;
spin_unlock_irqrestore(&dev->event_lock, flags);
spin_unlock_irq(&dev->event_lock);
 
drm_crtc_vblank_put(crtc);
free_work:
9581,11 → 9644,14
 
if (ret == -EIO) {
out_hang:
intel_crtc_wait_for_pending_flips(crtc);
// intel_crtc_wait_for_pending_flips(crtc);
ret = intel_pipe_set_base(crtc, crtc->x, crtc->y, fb);
if (ret == 0 && event)
if (ret == 0 && event) {
spin_lock_irq(&dev->event_lock);
drm_send_vblank_event(dev, pipe, event);
spin_unlock_irq(&dev->event_lock);
}
}
return ret;
}
#endif
9613,8 → 9679,7
to_intel_encoder(connector->base.encoder);
}
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
base.head) {
for_each_intel_encoder(dev, encoder) {
encoder->new_crtc =
to_intel_crtc(encoder->base.crtc);
}
9645,8 → 9710,7
connector->base.encoder = &connector->new_encoder->base;
}
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
base.head) {
for_each_intel_encoder(dev, encoder) {
encoder->base.crtc = &encoder->new_crtc->base;
}
 
9773,6 → 9837,19
pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n,
pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n,
pipe_config->dp_m_n.tu);
 
DRM_DEBUG_KMS("dp: %i, gmch_m2: %u, gmch_n2: %u, link_m2: %u, link_n2: %u, tu2: %u\n",
pipe_config->has_dp_encoder,
pipe_config->dp_m2_n2.gmch_m,
pipe_config->dp_m2_n2.gmch_n,
pipe_config->dp_m2_n2.link_m,
pipe_config->dp_m2_n2.link_n,
pipe_config->dp_m2_n2.tu);
 
DRM_DEBUG_KMS("audio: %i, infoframes: %i\n",
pipe_config->has_audio,
pipe_config->has_infoframe);
 
DRM_DEBUG_KMS("requested mode:\n");
drm_mode_debug_printmodeline(&pipe_config->requested_mode);
DRM_DEBUG_KMS("adjusted mode:\n");
9807,8 → 9884,7
struct drm_device *dev = crtc->base.dev;
struct intel_encoder *source_encoder;
 
list_for_each_entry(source_encoder,
&dev->mode_config.encoder_list, base.head) {
for_each_intel_encoder(dev, source_encoder) {
if (source_encoder->new_crtc != crtc)
continue;
 
9824,8 → 9900,7
struct drm_device *dev = crtc->base.dev;
struct intel_encoder *encoder;
 
list_for_each_entry(encoder,
&dev->mode_config.encoder_list, base.head) {
for_each_intel_encoder(dev, encoder) {
if (encoder->new_crtc != crtc)
continue;
 
9836,6 → 9911,48
return true;
}
 
static bool check_digital_port_conflicts(struct drm_device *dev)
{
struct intel_connector *connector;
unsigned int used_ports = 0;
 
/*
* Walk the connector list instead of the encoder
* list to detect the problem on ddi platforms
* where there's just one encoder per digital port.
*/
list_for_each_entry(connector,
&dev->mode_config.connector_list, base.head) {
struct intel_encoder *encoder = connector->new_encoder;
 
if (!encoder)
continue;
 
WARN_ON(!encoder->new_crtc);
 
switch (encoder->type) {
unsigned int port_mask;
case INTEL_OUTPUT_UNKNOWN:
if (WARN_ON(!HAS_DDI(dev)))
break;
case INTEL_OUTPUT_DISPLAYPORT:
case INTEL_OUTPUT_HDMI:
case INTEL_OUTPUT_EDP:
port_mask = 1 << enc_to_dig_port(&encoder->base)->port;
 
/* the same port mustn't appear more than once */
if (used_ports & port_mask)
return false;
 
used_ports |= port_mask;
default:
break;
}
}
 
return true;
}
 
static struct intel_crtc_config *
intel_modeset_pipe_config(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
9852,6 → 9969,11
return ERR_PTR(-EINVAL);
}
 
if (!check_digital_port_conflicts(dev)) {
DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
return ERR_PTR(-EINVAL);
}
 
pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
if (!pipe_config)
return ERR_PTR(-ENOMEM);
9909,8 → 10031,7
* adjust it according to limitations or connector properties, and also
* a chance to reject the mode entirely.
*/
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
base.head) {
for_each_intel_encoder(dev, encoder) {
 
if (&encoder->new_crtc->base != crtc)
continue;
9988,8 → 10109,7
1 << connector->new_encoder->new_crtc->pipe;
}
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
base.head) {
for_each_intel_encoder(dev, encoder) {
if (encoder->base.crtc == &encoder->new_crtc->base)
continue;
 
10059,12 → 10179,14
static void
intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *intel_encoder;
struct intel_crtc *intel_crtc;
struct drm_connector *connector;
 
list_for_each_entry(intel_encoder, &dev->mode_config.encoder_list,
base.head) {
intel_shared_dpll_commit(dev_priv);
 
for_each_intel_encoder(dev, intel_encoder) {
if (!intel_encoder->base.crtc)
continue;
 
10153,6 → 10275,22
return false; \
}
 
/* This is required for BDW+ where there is only one set of registers for
* switching between high and low RR.
* This macro can be used whenever a comparison has to be made between one
* hw state and multiple sw state variables.
*/
#define PIPE_CONF_CHECK_I_ALT(name, alt_name) \
if ((current_config->name != pipe_config->name) && \
(current_config->alt_name != pipe_config->name)) { \
DRM_ERROR("mismatch in " #name " " \
"(expected %i or %i, found %i)\n", \
current_config->name, \
current_config->alt_name, \
pipe_config->name); \
return false; \
}
 
#define PIPE_CONF_CHECK_FLAGS(name, mask) \
if ((current_config->name ^ pipe_config->name) & (mask)) { \
DRM_ERROR("mismatch in " #name "(" #mask ") " \
10185,6 → 10323,8
PIPE_CONF_CHECK_I(fdi_m_n.tu);
 
PIPE_CONF_CHECK_I(has_dp_encoder);
 
if (INTEL_INFO(dev)->gen < 8) {
PIPE_CONF_CHECK_I(dp_m_n.gmch_m);
PIPE_CONF_CHECK_I(dp_m_n.gmch_n);
PIPE_CONF_CHECK_I(dp_m_n.link_m);
10191,6 → 10331,21
PIPE_CONF_CHECK_I(dp_m_n.link_n);
PIPE_CONF_CHECK_I(dp_m_n.tu);
 
if (current_config->has_drrs) {
PIPE_CONF_CHECK_I(dp_m2_n2.gmch_m);
PIPE_CONF_CHECK_I(dp_m2_n2.gmch_n);
PIPE_CONF_CHECK_I(dp_m2_n2.link_m);
PIPE_CONF_CHECK_I(dp_m2_n2.link_n);
PIPE_CONF_CHECK_I(dp_m2_n2.tu);
}
} else {
PIPE_CONF_CHECK_I_ALT(dp_m_n.gmch_m, dp_m2_n2.gmch_m);
PIPE_CONF_CHECK_I_ALT(dp_m_n.gmch_n, dp_m2_n2.gmch_n);
PIPE_CONF_CHECK_I_ALT(dp_m_n.link_m, dp_m2_n2.link_m);
PIPE_CONF_CHECK_I_ALT(dp_m_n.link_n, dp_m2_n2.link_n);
PIPE_CONF_CHECK_I_ALT(dp_m_n.tu, dp_m2_n2.tu);
}
 
PIPE_CONF_CHECK_I(adjusted_mode.crtc_hdisplay);
PIPE_CONF_CHECK_I(adjusted_mode.crtc_htotal);
PIPE_CONF_CHECK_I(adjusted_mode.crtc_hblank_start);
10210,6 → 10365,7
if ((INTEL_INFO(dev)->gen < 8 && !IS_HASWELL(dev)) ||
IS_VALLEYVIEW(dev))
PIPE_CONF_CHECK_I(limited_color_range);
PIPE_CONF_CHECK_I(has_infoframe);
 
PIPE_CONF_CHECK_I(has_audio);
 
10266,6 → 10422,9
PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
 
if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
PIPE_CONF_CHECK_I(pipe_bpp);
10275,6 → 10434,7
 
#undef PIPE_CONF_CHECK_X
#undef PIPE_CONF_CHECK_I
#undef PIPE_CONF_CHECK_I_ALT
#undef PIPE_CONF_CHECK_FLAGS
#undef PIPE_CONF_CHECK_CLOCK_FUZZY
#undef PIPE_CONF_QUIRK
10282,6 → 10442,56
return true;
}
 
static void check_wm_state(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct skl_ddb_allocation hw_ddb, *sw_ddb;
struct intel_crtc *intel_crtc;
int plane;
 
if (INTEL_INFO(dev)->gen < 9)
return;
 
skl_ddb_get_hw_state(dev_priv, &hw_ddb);
sw_ddb = &dev_priv->wm.skl_hw.ddb;
 
for_each_intel_crtc(dev, intel_crtc) {
struct skl_ddb_entry *hw_entry, *sw_entry;
const enum pipe pipe = intel_crtc->pipe;
 
if (!intel_crtc->active)
continue;
 
/* planes */
for_each_plane(pipe, plane) {
hw_entry = &hw_ddb.plane[pipe][plane];
sw_entry = &sw_ddb->plane[pipe][plane];
 
if (skl_ddb_entry_equal(hw_entry, sw_entry))
continue;
 
DRM_ERROR("mismatch in DDB state pipe %c plane %d "
"(expected (%u,%u), found (%u,%u))\n",
pipe_name(pipe), plane + 1,
sw_entry->start, sw_entry->end,
hw_entry->start, hw_entry->end);
}
 
/* cursor */
hw_entry = &hw_ddb.cursor[pipe];
sw_entry = &sw_ddb->cursor[pipe];
 
if (skl_ddb_entry_equal(hw_entry, sw_entry))
continue;
 
DRM_ERROR("mismatch in DDB state pipe %c cursor "
"(expected (%u,%u), found (%u,%u))\n",
pipe_name(pipe),
sw_entry->start, sw_entry->end,
hw_entry->start, hw_entry->end);
}
}
 
static void
check_connector_state(struct drm_device *dev)
{
10304,8 → 10514,7
struct intel_encoder *encoder;
struct intel_connector *connector;
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
base.head) {
for_each_intel_encoder(dev, encoder) {
bool enabled = false;
bool active = false;
enum pipe pipe, tracked_pipe;
10384,8 → 10593,7
WARN(crtc->active && !crtc->base.enabled,
"active crtc, but not enabled in sw tracking\n");
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
base.head) {
for_each_intel_encoder(dev, encoder) {
if (encoder->base.crtc != &crtc->base)
continue;
enabled = true;
10403,12 → 10611,12
active = dev_priv->display.get_pipe_config(crtc,
&pipe_config);
 
/* hw state is inconsistent with the pipe A quirk */
if (crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
/* hw state is inconsistent with the pipe quirk */
if ((crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
(crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
active = crtc->active;
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
base.head) {
for_each_intel_encoder(dev, encoder) {
enum pipe pipe;
if (encoder->base.crtc != &crtc->base)
continue;
10450,9 → 10658,9
 
active = pll->get_hw_state(dev_priv, pll, &dpll_hw_state);
 
WARN(pll->active > pll->refcount,
WARN(pll->active > hweight32(pll->config.crtc_mask),
"more active pll users than references: %i vs %i\n",
pll->active, pll->refcount);
pll->active, hweight32(pll->config.crtc_mask));
WARN(pll->active && !pll->on,
"pll in active use but not on in sw tracking\n");
WARN(pll->on && !pll->active,
10470,11 → 10678,11
WARN(pll->active != active_crtcs,
"pll active crtcs mismatch (expected %i, found %i)\n",
pll->active, active_crtcs);
WARN(pll->refcount != enabled_crtcs,
WARN(hweight32(pll->config.crtc_mask) != enabled_crtcs,
"pll enabled crtcs mismatch (expected %i, found %i)\n",
pll->refcount, enabled_crtcs);
hweight32(pll->config.crtc_mask), enabled_crtcs);
 
WARN(pll->on && memcmp(&pll->hw_state, &dpll_hw_state,
WARN(pll->on && memcmp(&pll->config.hw_state, &dpll_hw_state,
sizeof(dpll_hw_state)),
"pll hw state mismatch\n");
}
10483,6 → 10691,7
void
intel_modeset_check_state(struct drm_device *dev)
{
check_wm_state(dev);
check_connector_state(dev);
check_encoder_state(dev);
check_crtc_state(dev);
10533,22 → 10742,57
 
crtc->scanline_offset = vtotal - 1;
} else if (HAS_DDI(dev) &&
intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI)) {
intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) {
crtc->scanline_offset = 2;
} else
crtc->scanline_offset = 1;
}
 
static struct intel_crtc_config *
intel_modeset_compute_config(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_framebuffer *fb,
unsigned *modeset_pipes,
unsigned *prepare_pipes,
unsigned *disable_pipes)
{
struct intel_crtc_config *pipe_config = NULL;
 
intel_modeset_affected_pipes(crtc, modeset_pipes,
prepare_pipes, disable_pipes);
 
if ((*modeset_pipes) == 0)
goto out;
 
/*
* Note this needs changes when we start tracking multiple modes
* and crtcs. At that point we'll need to compute the whole config
* (i.e. one pipe_config for each crtc) rather than just the one
* for this crtc.
*/
pipe_config = intel_modeset_pipe_config(crtc, fb, mode);
if (IS_ERR(pipe_config)) {
goto out;
}
intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
"[modeset]");
 
out:
return pipe_config;
}
 
static int __intel_set_mode(struct drm_crtc *crtc,
struct drm_display_mode *mode,
int x, int y, struct drm_framebuffer *fb)
int x, int y, struct drm_framebuffer *fb,
struct intel_crtc_config *pipe_config,
unsigned modeset_pipes,
unsigned prepare_pipes,
unsigned disable_pipes)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_display_mode *saved_mode;
struct intel_crtc_config *pipe_config = NULL;
struct intel_crtc *intel_crtc;
unsigned disable_pipes, prepare_pipes, modeset_pipes;
int ret = 0;
 
saved_mode = kmalloc(sizeof(*saved_mode), GFP_KERNEL);
10555,28 → 10799,10
if (!saved_mode)
return -ENOMEM;
 
intel_modeset_affected_pipes(crtc, &modeset_pipes,
&prepare_pipes, &disable_pipes);
 
*saved_mode = crtc->mode;
 
/* Hack: Because we don't (yet) support global modeset on multiple
* crtcs, we don't keep track of the new mode for more than one crtc.
* Hence simply check whether any bit is set in modeset_pipes in all the
* pieces of code that are not yet converted to deal with mutliple crtcs
* changing their mode at the same time. */
if (modeset_pipes) {
pipe_config = intel_modeset_pipe_config(crtc, fb, mode);
if (IS_ERR(pipe_config)) {
ret = PTR_ERR(pipe_config);
pipe_config = NULL;
 
goto out;
}
intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
"[modeset]");
if (modeset_pipes)
to_intel_crtc(crtc)->new_config = pipe_config;
}
 
/*
* See if the config requires any additional preparation, e.g.
10592,6 → 10818,22
prepare_pipes &= ~disable_pipes;
}
 
if (dev_priv->display.crtc_compute_clock) {
unsigned clear_pipes = modeset_pipes | disable_pipes;
 
ret = intel_shared_dpll_start_config(dev_priv, clear_pipes);
if (ret)
goto done;
 
for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
ret = dev_priv->display.crtc_compute_clock(intel_crtc);
if (ret) {
intel_shared_dpll_abort_config(dev_priv);
goto done;
}
}
}
 
for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc)
intel_crtc_disable(&intel_crtc->base);
 
10602,6 → 10844,10
 
/* crtc->mode is already used by the ->mode_set callbacks, hence we need
* to set it here already despite that we pass it down the callchain.
*
* Note we'll need to fix this up when we start tracking multiple
* pipes; here we assume a single modeset_pipe and only track the
* single crtc and mode.
*/
if (modeset_pipes) {
crtc->mode = *mode;
10623,8 → 10869,7
* update the the output configuration. */
intel_modeset_update_state(dev, prepare_pipes);
 
if (dev_priv->display.modeset_global_resources)
dev_priv->display.modeset_global_resources(dev);
modeset_update_crtc_power_domains(dev);
 
/* Set up the DPLL and any encoders state that needs to adjust or depend
* on the DPLL.
10635,9 → 10880,7
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
 
mutex_lock(&dev->struct_mutex);
ret = intel_pin_and_fence_fb_obj(dev,
obj,
NULL);
ret = intel_pin_and_fence_fb_obj(crtc->primary, fb, NULL);
if (ret != 0) {
DRM_ERROR("pin & fence failed\n");
mutex_unlock(&dev->struct_mutex);
10652,11 → 10895,6
crtc->primary->fb = fb;
crtc->x = x;
crtc->y = y;
 
ret = dev_priv->display.crtc_mode_set(&intel_crtc->base,
x, y, fb);
if (ret)
goto done;
}
 
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
10671,19 → 10909,23
if (ret && crtc->enabled)
crtc->mode = *saved_mode;
 
out:
kfree(pipe_config);
kfree(saved_mode);
return ret;
}
 
static int intel_set_mode(struct drm_crtc *crtc,
static int intel_set_mode_pipes(struct drm_crtc *crtc,
struct drm_display_mode *mode,
int x, int y, struct drm_framebuffer *fb)
int x, int y, struct drm_framebuffer *fb,
struct intel_crtc_config *pipe_config,
unsigned modeset_pipes,
unsigned prepare_pipes,
unsigned disable_pipes)
{
int ret;
 
ret = __intel_set_mode(crtc, mode, x, y, fb);
ret = __intel_set_mode(crtc, mode, x, y, fb, pipe_config, modeset_pipes,
prepare_pipes, disable_pipes);
 
if (ret == 0)
intel_modeset_check_state(crtc->dev);
10691,6 → 10933,26
return ret;
}
 
static int intel_set_mode(struct drm_crtc *crtc,
struct drm_display_mode *mode,
int x, int y, struct drm_framebuffer *fb)
{
struct intel_crtc_config *pipe_config;
unsigned modeset_pipes, prepare_pipes, disable_pipes;
 
pipe_config = intel_modeset_compute_config(crtc, mode, fb,
&modeset_pipes,
&prepare_pipes,
&disable_pipes);
 
if (IS_ERR(pipe_config))
return PTR_ERR(pipe_config);
 
return intel_set_mode_pipes(crtc, mode, x, y, fb, pipe_config,
modeset_pipes, prepare_pipes,
disable_pipes);
}
 
void intel_crtc_restore_mode(struct drm_crtc *crtc)
{
intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->primary->fb);
10776,7 → 11038,7
}
 
count = 0;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
for_each_intel_encoder(dev, encoder) {
encoder->new_crtc =
to_intel_crtc(config->save_encoder_crtcs[count++]);
}
10935,8 → 11197,7
}
 
/* Check for any encoders that needs to be disabled. */
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
base.head) {
for_each_intel_encoder(dev, encoder) {
int num_connectors = 0;
list_for_each_entry(connector,
&dev->mode_config.connector_list,
10969,9 → 11230,7
for_each_intel_crtc(dev, crtc) {
crtc->new_enabled = false;
 
list_for_each_entry(encoder,
&dev->mode_config.encoder_list,
base.head) {
for_each_intel_encoder(dev, encoder) {
if (encoder->new_crtc == crtc) {
crtc->new_enabled = true;
break;
11008,7 → 11267,7
connector->new_encoder = NULL;
}
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
for_each_intel_encoder(dev, encoder) {
if (encoder->new_crtc == crtc)
encoder->new_crtc = NULL;
}
11022,6 → 11281,8
struct drm_device *dev;
struct drm_mode_set save_set;
struct intel_set_config *config;
struct intel_crtc_config *pipe_config;
unsigned modeset_pipes, prepare_pipes, disable_pipes;
int ret;
 
BUG_ON(!set);
11067,13 → 11328,42
if (ret)
goto fail;
 
pipe_config = intel_modeset_compute_config(set->crtc, set->mode,
set->fb,
&modeset_pipes,
&prepare_pipes,
&disable_pipes);
if (IS_ERR(pipe_config)) {
ret = PTR_ERR(pipe_config);
goto fail;
} else if (pipe_config) {
if (pipe_config->has_audio !=
to_intel_crtc(set->crtc)->config.has_audio)
config->mode_changed = true;
 
/*
* Note we have an issue here with infoframes: current code
* only updates them on the full mode set path per hw
* requirements. So here we should be checking for any
* required changes and forcing a mode set.
*/
}
 
/* set_mode will free it in the mode_changed case */
if (!config->mode_changed)
kfree(pipe_config);
 
intel_update_pipe_size(to_intel_crtc(set->crtc));
 
if (config->mode_changed) {
ret = intel_set_mode(set->crtc, set->mode,
set->x, set->y, set->fb);
ret = intel_set_mode_pipes(set->crtc, set->mode,
set->x, set->y, set->fb, pipe_config,
modeset_pipes, prepare_pipes,
disable_pipes);
} else if (config->fb_changed) {
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(set->crtc);
 
// intel_crtc_wait_for_pending_flips(set->crtc);
 
ret = intel_pipe_set_base(set->crtc,
set->x, set->y, set->fb);
11084,8 → 11374,7
*/
if (!intel_crtc->primary_enabled && ret == 0) {
WARN_ON(!intel_crtc->active);
intel_enable_primary_hw_plane(dev_priv, intel_crtc->plane,
intel_crtc->pipe);
intel_enable_primary_hw_plane(set->crtc->primary, set->crtc);
}
 
/*
11140,7 → 11429,7
{
uint32_t val;
 
if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_PLLS))
if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
return false;
 
val = I915_READ(PCH_DPLL(pll->id));
11154,8 → 11443,8
static void ibx_pch_dpll_mode_set(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
I915_WRITE(PCH_FP0(pll->id), pll->hw_state.fp0);
I915_WRITE(PCH_FP1(pll->id), pll->hw_state.fp1);
I915_WRITE(PCH_FP0(pll->id), pll->config.hw_state.fp0);
I915_WRITE(PCH_FP1(pll->id), pll->config.hw_state.fp1);
}
 
static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
11164,7 → 11453,7
/* PCH refclock must be enabled first */
ibx_assert_pch_refclk_enabled(dev_priv);
 
I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll);
I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll);
 
/* Wait for the clocks to stabilize. */
POSTING_READ(PCH_DPLL(pll->id));
11175,7 → 11464,7
*
* So write it again.
*/
I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll);
I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll);
POSTING_READ(PCH_DPLL(pll->id));
udelay(200);
}
11238,8 → 11527,6
intel_primary_plane_disable(struct drm_plane *plane)
{
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(plane);
struct intel_crtc *intel_crtc;
 
if (!plane->fb)
11261,8 → 11548,9
if (!intel_crtc->primary_enabled)
goto disable_unpin;
 
intel_disable_primary_hw_plane(dev_priv, intel_plane->plane,
intel_plane->pipe);
// intel_crtc_wait_for_pending_flips(plane->crtc);
intel_disable_primary_hw_plane(plane, plane->crtc);
 
disable_unpin:
mutex_lock(&dev->struct_mutex);
i915_gem_track_fb(intel_fb_obj(plane->fb), NULL,
11275,123 → 11563,191
}
 
static int
intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb, int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h)
intel_check_primary_plane(struct drm_plane *plane,
struct intel_plane_state *state)
{
struct drm_crtc *crtc = state->crtc;
struct drm_framebuffer *fb = state->fb;
struct drm_rect *dest = &state->dst;
struct drm_rect *src = &state->src;
const struct drm_rect *clip = &state->clip;
 
return drm_plane_helper_check_update(plane, crtc, fb,
src, dest, clip,
DRM_PLANE_HELPER_NO_SCALING,
DRM_PLANE_HELPER_NO_SCALING,
false, true, &state->visible);
}
 
static int
intel_prepare_primary_plane(struct drm_plane *plane,
struct intel_plane_state *state)
{
struct drm_crtc *crtc = state->crtc;
struct drm_framebuffer *fb = state->fb;
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_plane *intel_plane = to_intel_plane(plane);
enum pipe pipe = intel_crtc->pipe;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
struct drm_rect dest = {
/* integer pixels */
.x1 = crtc_x,
.y1 = crtc_y,
.x2 = crtc_x + crtc_w,
.y2 = crtc_y + crtc_h,
};
struct drm_rect src = {
/* 16.16 fixed point */
.x1 = src_x,
.y1 = src_y,
.x2 = src_x + src_w,
.y2 = src_y + src_h,
};
const struct drm_rect clip = {
/* integer pixels */
.x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0,
.y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0,
};
bool visible;
int ret;
 
ret = drm_plane_helper_check_update(plane, crtc, fb,
&src, &dest, &clip,
DRM_PLANE_HELPER_NO_SCALING,
DRM_PLANE_HELPER_NO_SCALING,
false, true, &visible);
 
if (ret)
return ret;
 
/*
* If the CRTC isn't enabled, we're just pinning the framebuffer,
* updating the fb pointer, and returning without touching the
* hardware. This allows us to later do a drmModeSetCrtc with fb=-1 to
* turn on the display with all planes setup as desired.
*/
if (!crtc->enabled) {
if (old_obj != obj) {
mutex_lock(&dev->struct_mutex);
ret = intel_pin_and_fence_fb_obj(plane, fb, NULL);
if (ret == 0)
i915_gem_track_fb(old_obj, obj,
INTEL_FRONTBUFFER_PRIMARY(pipe));
mutex_unlock(&dev->struct_mutex);
if (ret != 0) {
DRM_DEBUG_KMS("pin & fence failed\n");
return ret;
}
}
 
return 0;
}
 
static void
intel_commit_primary_plane(struct drm_plane *plane,
struct intel_plane_state *state)
{
struct drm_crtc *crtc = state->crtc;
struct drm_framebuffer *fb = state->fb;
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum pipe pipe = intel_crtc->pipe;
struct drm_framebuffer *old_fb = plane->fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
struct intel_plane *intel_plane = to_intel_plane(plane);
struct drm_rect *src = &state->src;
 
crtc->primary->fb = fb;
crtc->x = src->x1 >> 16;
crtc->y = src->y1 >> 16;
 
intel_plane->crtc_x = state->orig_dst.x1;
intel_plane->crtc_y = state->orig_dst.y1;
intel_plane->crtc_w = drm_rect_width(&state->orig_dst);
intel_plane->crtc_h = drm_rect_height(&state->orig_dst);
intel_plane->src_x = state->orig_src.x1;
intel_plane->src_y = state->orig_src.y1;
intel_plane->src_w = drm_rect_width(&state->orig_src);
intel_plane->src_h = drm_rect_height(&state->orig_src);
intel_plane->obj = obj;
 
if (intel_crtc->active) {
/*
* If we already called setplane while the crtc was disabled,
* we may have an fb pinned; unpin it.
* FBC does not work on some platforms for rotated
* planes, so disable it when rotation is not 0 and
* update it when rotation is set back to 0.
*
* FIXME: This is redundant with the fbc update done in
* the primary plane enable function except that that
* one is done too late. We eventually need to unify
* this.
*/
if (plane->fb)
intel_unpin_fb_obj(old_obj);
if (intel_crtc->primary_enabled &&
INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
dev_priv->fbc.plane == intel_crtc->plane &&
intel_plane->rotation != BIT(DRM_ROTATE_0)) {
intel_disable_fbc(dev);
}
 
i915_gem_track_fb(old_obj, obj,
INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
if (state->visible) {
bool was_enabled = intel_crtc->primary_enabled;
 
/* Pin and return without programming hardware */
ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
mutex_unlock(&dev->struct_mutex);
/* FIXME: kill this fastboot hack */
intel_update_pipe_size(intel_crtc);
 
return ret;
}
intel_crtc->primary_enabled = true;
 
dev_priv->display.update_primary_plane(crtc, plane->fb,
crtc->x, crtc->y);
 
/*
* If clipping results in a non-visible primary plane, we'll disable
* the primary plane. Note that this is a bit different than what
* happens if userspace explicitly disables the plane by passing fb=0
* BDW signals flip done immediately if the plane
* is disabled, even if the plane enable is already
* armed to occur at the next vblank :(
*/
if (IS_BROADWELL(dev) && !was_enabled)
intel_wait_for_vblank(dev, intel_crtc->pipe);
} else {
/*
* If clipping results in a non-visible primary plane,
* we'll disable the primary plane. Note that this is
* a bit different than what happens if userspace
* explicitly disables the plane by passing fb=0
* because plane->fb still gets set and pinned.
*/
if (!visible) {
intel_disable_primary_hw_plane(plane, crtc);
}
 
intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
 
mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev);
mutex_unlock(&dev->struct_mutex);
}
 
/*
* Try to pin the new fb first so that we can bail out if we
* fail.
*/
if (plane->fb != fb) {
ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
if (ret) {
if (old_fb && old_fb != fb) {
if (intel_crtc->active)
intel_wait_for_vblank(dev, intel_crtc->pipe);
 
mutex_lock(&dev->struct_mutex);
intel_unpin_fb_obj(old_obj);
mutex_unlock(&dev->struct_mutex);
return ret;
}
}
 
i915_gem_track_fb(old_obj, obj,
INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
static int
intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb, int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h)
{
struct intel_plane_state state;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int ret;
 
if (intel_crtc->primary_enabled)
intel_disable_primary_hw_plane(dev_priv,
intel_plane->plane,
intel_plane->pipe);
state.crtc = crtc;
state.fb = fb;
 
/* sample coordinates in 16.16 fixed point */
state.src.x1 = src_x;
state.src.x2 = src_x + src_w;
state.src.y1 = src_y;
state.src.y2 = src_y + src_h;
 
if (plane->fb != fb)
if (plane->fb)
intel_unpin_fb_obj(old_obj);
/* integer pixels */
state.dst.x1 = crtc_x;
state.dst.x2 = crtc_x + crtc_w;
state.dst.y1 = crtc_y;
state.dst.y2 = crtc_y + crtc_h;
 
mutex_unlock(&dev->struct_mutex);
state.clip.x1 = 0;
state.clip.y1 = 0;
state.clip.x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0;
state.clip.y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0;
 
return 0;
}
state.orig_src = state.src;
state.orig_dst = state.dst;
 
ret = intel_pipe_set_base(crtc, src.x1, src.y1, fb);
ret = intel_check_primary_plane(plane, &state);
if (ret)
return ret;
 
if (!intel_crtc->primary_enabled)
intel_enable_primary_hw_plane(dev_priv, intel_crtc->plane,
intel_crtc->pipe);
ret = intel_prepare_primary_plane(plane, &state);
if (ret)
return ret;
 
intel_commit_primary_plane(plane, &state);
 
return 0;
}
 
11407,6 → 11763,7
.update_plane = intel_primary_plane_setplane,
.disable_plane = intel_primary_plane_disable,
.destroy = intel_plane_destroy,
.set_property = intel_plane_set_property
};
 
static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
11424,6 → 11781,7
primary->max_downscale = 1;
primary->pipe = pipe;
primary->plane = pipe;
primary->rotation = BIT(DRM_ROTATE_0);
if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4)
primary->plane = !pipe;
 
11439,6 → 11797,19
&intel_primary_plane_funcs,
intel_primary_formats, num_formats,
DRM_PLANE_TYPE_PRIMARY);
 
if (INTEL_INFO(dev)->gen >= 4) {
if (!dev->mode_config.rotation_property)
dev->mode_config.rotation_property =
drm_mode_create_rotation_property(dev,
BIT(DRM_ROTATE_0) |
BIT(DRM_ROTATE_180));
if (dev->mode_config.rotation_property)
drm_object_attach_property(&primary->base.base,
dev->mode_config.rotation_property,
primary->rotation);
}
 
return &primary->base;
}
 
11454,58 → 11825,144
}
 
static int
intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb, int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h)
intel_check_cursor_plane(struct drm_plane *plane,
struct intel_plane_state *state)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
struct drm_i915_gem_object *obj = intel_fb->obj;
struct drm_rect dest = {
/* integer pixels */
.x1 = crtc_x,
.y1 = crtc_y,
.x2 = crtc_x + crtc_w,
.y2 = crtc_y + crtc_h,
};
struct drm_rect src = {
/* 16.16 fixed point */
.x1 = src_x,
.y1 = src_y,
.x2 = src_x + src_w,
.y2 = src_y + src_h,
};
const struct drm_rect clip = {
/* integer pixels */
.x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0,
.y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0,
};
bool visible;
struct drm_crtc *crtc = state->crtc;
struct drm_device *dev = crtc->dev;
struct drm_framebuffer *fb = state->fb;
struct drm_rect *dest = &state->dst;
struct drm_rect *src = &state->src;
const struct drm_rect *clip = &state->clip;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
int crtc_w, crtc_h;
unsigned stride;
int ret;
 
ret = drm_plane_helper_check_update(plane, crtc, fb,
&src, &dest, &clip,
src, dest, clip,
DRM_PLANE_HELPER_NO_SCALING,
DRM_PLANE_HELPER_NO_SCALING,
true, true, &visible);
true, true, &state->visible);
if (ret)
return ret;
 
crtc->cursor_x = crtc_x;
crtc->cursor_y = crtc_y;
 
/* if we want to turn off the cursor ignore width and height */
if (!obj)
return 0;
 
/* Check for which cursor types we support */
crtc_w = drm_rect_width(&state->orig_dst);
crtc_h = drm_rect_height(&state->orig_dst);
if (!cursor_size_ok(dev, crtc_w, crtc_h)) {
DRM_DEBUG("Cursor dimension not supported\n");
return -EINVAL;
}
 
stride = roundup_pow_of_two(crtc_w) * 4;
if (obj->base.size < stride * crtc_h) {
DRM_DEBUG_KMS("buffer is too small\n");
return -ENOMEM;
}
 
if (fb == crtc->cursor->fb)
return 0;
 
/* we only need to pin inside GTT if cursor is non-phy */
mutex_lock(&dev->struct_mutex);
if (!INTEL_INFO(dev)->cursor_needs_physical && obj->tiling_mode) {
DRM_DEBUG_KMS("cursor cannot be tiled\n");
ret = -EINVAL;
}
mutex_unlock(&dev->struct_mutex);
 
return ret;
}
 
static int
intel_commit_cursor_plane(struct drm_plane *plane,
struct intel_plane_state *state)
{
struct drm_crtc *crtc = state->crtc;
struct drm_framebuffer *fb = state->fb;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_plane *intel_plane = to_intel_plane(plane);
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
struct drm_i915_gem_object *obj = intel_fb->obj;
int crtc_w, crtc_h;
 
crtc->cursor_x = state->orig_dst.x1;
crtc->cursor_y = state->orig_dst.y1;
 
intel_plane->crtc_x = state->orig_dst.x1;
intel_plane->crtc_y = state->orig_dst.y1;
intel_plane->crtc_w = drm_rect_width(&state->orig_dst);
intel_plane->crtc_h = drm_rect_height(&state->orig_dst);
intel_plane->src_x = state->orig_src.x1;
intel_plane->src_y = state->orig_src.y1;
intel_plane->src_w = drm_rect_width(&state->orig_src);
intel_plane->src_h = drm_rect_height(&state->orig_src);
intel_plane->obj = obj;
 
if (fb != crtc->cursor->fb) {
crtc_w = drm_rect_width(&state->orig_dst);
crtc_h = drm_rect_height(&state->orig_dst);
return intel_crtc_cursor_set_obj(crtc, obj, crtc_w, crtc_h);
} else {
intel_crtc_update_cursor(crtc, visible);
intel_crtc_update_cursor(crtc, state->visible);
 
 
return 0;
}
}
 
static int
intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb, int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_plane_state state;
int ret;
 
state.crtc = crtc;
state.fb = fb;
 
/* sample coordinates in 16.16 fixed point */
state.src.x1 = src_x;
state.src.x2 = src_x + src_w;
state.src.y1 = src_y;
state.src.y2 = src_y + src_h;
 
/* integer pixels */
state.dst.x1 = crtc_x;
state.dst.x2 = crtc_x + crtc_w;
state.dst.y1 = crtc_y;
state.dst.y2 = crtc_y + crtc_h;
 
state.clip.x1 = 0;
state.clip.y1 = 0;
state.clip.x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0;
state.clip.y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0;
 
state.orig_src = state.src;
state.orig_dst = state.dst;
 
ret = intel_check_cursor_plane(plane, &state);
if (ret)
return ret;
 
return intel_commit_cursor_plane(plane, &state);
}
 
static const struct drm_plane_funcs intel_cursor_plane_funcs = {
.update_plane = intel_cursor_plane_update,
.disable_plane = intel_cursor_plane_disable,
.destroy = intel_plane_destroy,
.set_property = intel_plane_set_property,
};
 
static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
11521,6 → 11978,7
cursor->max_downscale = 1;
cursor->pipe = pipe;
cursor->plane = pipe;
cursor->rotation = BIT(DRM_ROTATE_0);
 
drm_universal_plane_init(dev, &cursor->base, 0,
&intel_cursor_plane_funcs,
11527,6 → 11985,19
intel_cursor_formats,
ARRAY_SIZE(intel_cursor_formats),
DRM_PLANE_TYPE_CURSOR);
 
if (INTEL_INFO(dev)->gen >= 4) {
if (!dev->mode_config.rotation_property)
dev->mode_config.rotation_property =
drm_mode_create_rotation_property(dev,
BIT(DRM_ROTATE_0) |
BIT(DRM_ROTATE_180));
if (dev->mode_config.rotation_property)
drm_object_attach_property(&cursor->base.base,
dev->mode_config.rotation_property,
cursor->rotation);
}
 
return &cursor->base;
}
 
11575,9 → 12046,8
 
intel_crtc->cursor_base = ~0;
intel_crtc->cursor_cntl = ~0;
intel_crtc->cursor_size = ~0;
 
init_waitqueue_head(&intel_crtc->vbl_wait);
 
BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
11603,7 → 12073,7
 
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
 
if (!encoder)
if (!encoder || WARN_ON(!encoder->crtc))
return INVALID_PIPE;
 
return to_intel_crtc(encoder->crtc)->pipe;
11639,8 → 12109,7
int index_mask = 0;
int entry = 0;
 
list_for_each_entry(source_encoder,
&dev->mode_config.encoder_list, base.head) {
for_each_intel_encoder(dev, source_encoder) {
if (encoders_cloneable(encoder, source_encoder))
index_mask |= (1 << entry);
 
11692,9 → 12161,12
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (IS_ULT(dev))
if (INTEL_INFO(dev)->gen >= 9)
return false;
 
if (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
return false;
 
if (IS_CHERRYVIEW(dev))
return false;
 
11762,28 → 12234,37
if (I915_READ(PCH_DP_D) & DP_DETECTED)
intel_dp_init(dev, PCH_DP_D, PORT_D);
} else if (IS_VALLEYVIEW(dev)) {
if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED) {
/*
* The DP_DETECTED bit is the latched state of the DDC
* SDA pin at boot. However since eDP doesn't require DDC
* (no way to plug in a DP->HDMI dongle) the DDC pins for
* eDP ports may have been muxed to an alternate function.
* Thus we can't rely on the DP_DETECTED bit alone to detect
* eDP ports. Consult the VBT as well as DP_DETECTED to
* detect eDP ports.
*/
if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED)
intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB,
PORT_B);
if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED)
if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED ||
intel_dp_is_edp(dev, PORT_B))
intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B);
}
 
if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIC) & SDVO_DETECTED) {
if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIC) & SDVO_DETECTED)
intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC,
PORT_C);
if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED)
if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED ||
intel_dp_is_edp(dev, PORT_C))
intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C);
}
 
if (IS_CHERRYVIEW(dev)) {
if (I915_READ(VLV_DISPLAY_BASE + CHV_HDMID) & SDVO_DETECTED) {
if (I915_READ(VLV_DISPLAY_BASE + CHV_HDMID) & SDVO_DETECTED)
intel_hdmi_init(dev, VLV_DISPLAY_BASE + CHV_HDMID,
PORT_D);
/* eDP not supported on port D, so don't check VBT */
if (I915_READ(VLV_DISPLAY_BASE + DP_D) & DP_DETECTED)
intel_dp_init(dev, VLV_DISPLAY_BASE + DP_D, PORT_D);
}
}
 
intel_dsi_init(dev);
} else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
11825,9 → 12306,9
intel_dvo_init(dev);
 
 
intel_edp_psr_init(dev);
intel_psr_init(dev);
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
for_each_intel_encoder(dev, encoder) {
encoder->base.possible_crtcs = encoder->crtc_mask;
encoder->base.possible_clones =
intel_encoder_clones(encoder);
11993,16 → 12474,22
if (HAS_DDI(dev)) {
dev_priv->display.get_pipe_config = haswell_get_pipe_config;
dev_priv->display.get_plane_config = ironlake_get_plane_config;
dev_priv->display.crtc_mode_set = haswell_crtc_mode_set;
dev_priv->display.crtc_compute_clock =
haswell_crtc_compute_clock;
dev_priv->display.crtc_enable = haswell_crtc_enable;
dev_priv->display.crtc_disable = haswell_crtc_disable;
dev_priv->display.off = ironlake_crtc_off;
if (INTEL_INFO(dev)->gen >= 9)
dev_priv->display.update_primary_plane =
skylake_update_primary_plane;
else
dev_priv->display.update_primary_plane =
ironlake_update_primary_plane;
} else if (HAS_PCH_SPLIT(dev)) {
dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
dev_priv->display.get_plane_config = ironlake_get_plane_config;
dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
dev_priv->display.crtc_compute_clock =
ironlake_crtc_compute_clock;
dev_priv->display.crtc_enable = ironlake_crtc_enable;
dev_priv->display.crtc_disable = ironlake_crtc_disable;
dev_priv->display.off = ironlake_crtc_off;
12011,7 → 12498,7
} else if (IS_VALLEYVIEW(dev)) {
dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
dev_priv->display.get_plane_config = i9xx_get_plane_config;
dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
dev_priv->display.crtc_enable = valleyview_crtc_enable;
dev_priv->display.crtc_disable = i9xx_crtc_disable;
dev_priv->display.off = i9xx_crtc_off;
12020,7 → 12507,7
} else {
dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
dev_priv->display.get_plane_config = i9xx_get_plane_config;
dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
dev_priv->display.crtc_enable = i9xx_crtc_enable;
dev_priv->display.crtc_disable = i9xx_crtc_disable;
dev_priv->display.off = i9xx_crtc_off;
12057,33 → 12544,20
dev_priv->display.get_display_clock_speed =
i830_get_display_clock_speed;
 
if (HAS_PCH_SPLIT(dev)) {
if (IS_GEN5(dev)) {
dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
dev_priv->display.write_eld = ironlake_write_eld;
} else if (IS_GEN6(dev)) {
dev_priv->display.fdi_link_train = gen6_fdi_link_train;
dev_priv->display.write_eld = ironlake_write_eld;
dev_priv->display.modeset_global_resources =
snb_modeset_global_resources;
} else if (IS_IVYBRIDGE(dev)) {
/* FIXME: detect B0+ stepping and use auto training */
dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
dev_priv->display.write_eld = ironlake_write_eld;
dev_priv->display.modeset_global_resources =
ivb_modeset_global_resources;
} else if (IS_HASWELL(dev) || IS_GEN8(dev)) {
} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
dev_priv->display.fdi_link_train = hsw_fdi_link_train;
dev_priv->display.write_eld = haswell_write_eld;
dev_priv->display.modeset_global_resources =
haswell_modeset_global_resources;
}
} else if (IS_G4X(dev)) {
dev_priv->display.write_eld = g4x_write_eld;
} else if (IS_VALLEYVIEW(dev)) {
dev_priv->display.modeset_global_resources =
valleyview_modeset_global_resources;
dev_priv->display.write_eld = ironlake_write_eld;
}
 
/* Default just returns -ENODEV to indicate unsupported */
12093,6 → 12567,8
 
 
intel_panel_init_backlight_funcs(dev);
 
mutex_init(&dev_priv->pps_mutex);
}
 
/*
12108,6 → 12584,14
DRM_INFO("applying pipe a force quirk\n");
}
 
static void quirk_pipeb_force(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
dev_priv->quirks |= QUIRK_PIPEB_FORCE;
DRM_INFO("applying pipe b force quirk\n");
}
 
/*
* Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
*/
12212,6 → 12696,9
/* Acer C720 Chromebook (Core i3 4005U) */
{ 0x0a16, 0x1025, 0x0a11, quirk_backlight_present },
 
/* Apple Macbook 2,1 (Core 2 T7400) */
{ 0x27a2, 0x8086, 0x7270, quirk_backlight_present },
 
/* Toshiba CB35 Chromebook (Celeron 2955U) */
{ 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
 
12254,7 → 12741,11
// vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
udelay(300);
 
I915_WRITE(vga_reg, VGA_DISP_DISABLE);
/*
* Fujitsu-Siemens Lifebook S6010 (830) has problems resuming
* from S3 without preserving (some of?) the other bits.
*/
I915_WRITE(vga_reg, dev_priv->bios_vgacntr | VGA_DISP_DISABLE);
POSTING_READ(vga_reg);
}
 
12267,16 → 12758,9
 
intel_init_clock_gating(dev);
 
intel_reset_dpio(dev);
 
intel_enable_gt_powersave(dev);
}
 
void intel_modeset_suspend_hw(struct drm_device *dev)
{
intel_suspend_hw(dev);
}
 
void intel_modeset_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
12328,7 → 12812,7
INTEL_INFO(dev)->num_pipes,
INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
 
for_each_pipe(pipe) {
for_each_pipe(dev_priv, pipe) {
intel_crtc_init(dev, pipe);
for_each_sprite(pipe, sprite) {
ret = intel_plane_init(dev, pipe, sprite);
12339,10 → 12823,11
}
 
intel_init_dpio(dev);
intel_reset_dpio(dev);
 
intel_shared_dpll_init(dev);
 
/* save the BIOS value before clobbering it */
dev_priv->bios_vgacntr = I915_READ(i915_vgacntrl_reg(dev));
/* Just disable it once at startup */
i915_disable_vga(dev);
intel_setup_outputs(dev);
12434,9 → 12919,10
I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
 
/* restore vblank interrupts to correct state */
if (crtc->active)
if (crtc->active) {
update_scanline_offset(crtc);
drm_vblank_on(dev, crtc->pipe);
else
} else
drm_vblank_off(dev, crtc->pipe);
 
/* We need to sanitize the plane -> pipe mapping first because this will
12519,7 → 13005,7
}
}
 
if (crtc->active || IS_VALLEYVIEW(dev) || INTEL_INFO(dev)->gen < 5) {
if (crtc->active || HAS_GMCH_DISPLAY(dev)) {
/*
* We start out with underrun reporting disabled to avoid races.
* For correct bookkeeping mark this on active crtcs.
12535,8 → 13021,6
*/
crtc->cpu_fifo_underrun_disabled = true;
crtc->pch_fifo_underrun_disabled = true;
 
update_scanline_offset(crtc);
}
}
 
12609,7 → 13093,7
* level, just check if the power well is enabled instead of trying to
* follow the "don't touch the power well if we don't need it" policy
* the rest of the driver uses. */
if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_VGA))
if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_VGA))
return;
 
i915_redisable_vga_power_on(dev);
12653,23 → 13137,25
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
 
pll->on = pll->get_hw_state(dev_priv, pll, &pll->hw_state);
pll->on = pll->get_hw_state(dev_priv, pll,
&pll->config.hw_state);
pll->active = 0;
pll->config.crtc_mask = 0;
for_each_intel_crtc(dev, crtc) {
if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll) {
pll->active++;
pll->config.crtc_mask |= 1 << crtc->pipe;
}
pll->refcount = pll->active;
}
 
DRM_DEBUG_KMS("%s hw state readout: refcount %i, on %i\n",
pll->name, pll->refcount, pll->on);
DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
pll->name, pll->config.crtc_mask, pll->on);
 
if (pll->refcount)
if (pll->config.crtc_mask)
intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
}
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
base.head) {
for_each_intel_encoder(dev, encoder) {
pipe = 0;
 
if (encoder->get_hw_state(encoder, &pipe)) {
12733,12 → 13219,11
}
 
/* HW state is read out, now we need to sanitize this mess. */
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
base.head) {
for_each_intel_encoder(dev, encoder) {
intel_sanitize_encoder(encoder);
}
 
for_each_pipe(pipe) {
for_each_pipe(dev_priv, pipe) {
crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
intel_sanitize_crtc(crtc);
intel_dump_pipe_config(crtc, &crtc->config, "[setup_hw_state]");
12756,7 → 13241,9
pll->on = false;
}
 
if (HAS_PCH_SPLIT(dev))
if (IS_GEN9(dev))
skl_wm_get_hw_state(dev);
else if (HAS_PCH_SPLIT(dev))
ilk_wm_get_hw_state(dev);
 
if (force_restore) {
12766,11 → 13253,11
* We need to use raw interfaces for restoring state to avoid
* checking (bogus) intermediate states.
*/
for_each_pipe(pipe) {
for_each_pipe(dev_priv, pipe) {
struct drm_crtc *crtc =
dev_priv->pipe_to_crtc_mapping[pipe];
 
__intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y,
intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y,
crtc->primary->fb);
}
} else {
12782,6 → 13269,7
 
void intel_modeset_gem_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *c;
struct drm_i915_gem_object *obj;
 
12789,6 → 13277,16
intel_init_gt_powersave(dev);
mutex_unlock(&dev->struct_mutex);
 
/*
* There may be no VBT; and if the BIOS enabled SSC we can
* just keep using it to avoid unnecessary flicker. Whereas if the
* BIOS isn't using it, don't assume it will work even if the VBT
* indicates as much.
*/
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
dev_priv->vbt.lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
DREF_SSC1_ENABLE);
 
intel_modeset_init_hw(dev);
 
// intel_setup_overlay(dev);
12804,7 → 13302,9
if (obj == NULL)
continue;
 
if (intel_pin_and_fence_fb_obj(dev, obj, NULL)) {
if (intel_pin_and_fence_fb_obj(c->primary,
c->primary->fb,
NULL)) {
DRM_ERROR("failed to pin boot fb on pipe %d\n",
to_intel_crtc(c)->pipe);
drm_framebuffer_unreference(c->primary->fb);
12828,14 → 13328,16
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_connector *connector;
 
intel_disable_gt_powersave(dev);
 
intel_backlight_unregister(dev);
 
/*
* Interrupts and polling as the first thing to avoid creating havoc.
* Too much stuff here (turning of rps, connectors, ...) would
* Too much stuff here (turning of connectors, ...) would
* experience fancy races otherwise.
*/
drm_irq_uninstall(dev);
intel_hpd_cancel_work(dev_priv);
dev_priv->pm._irqs_disabled = true;
intel_irq_uninstall(dev_priv);
 
/*
* Due to the hpd irq storm handling the hotplug work can re-arm the
12849,8 → 13351,6
 
intel_disable_fbc(dev);
 
intel_disable_gt_powersave(dev);
 
ironlake_teardown_rc6(dev);
 
mutex_unlock(&dev->struct_mutex);
12991,9 → 13491,9
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
 
for_each_pipe(i) {
for_each_pipe(dev_priv, i) {
error->pipe[i].power_domain_on =
intel_display_power_enabled_unlocked(dev_priv,
__intel_display_power_is_enabled(dev_priv,
POWER_DOMAIN_PIPE(i));
if (!error->pipe[i].power_domain_on)
continue;
13029,7 → 13529,7
enum transcoder cpu_transcoder = transcoders[i];
 
error->transcoder[i].power_domain_on =
intel_display_power_enabled_unlocked(dev_priv,
__intel_display_power_is_enabled(dev_priv,
POWER_DOMAIN_TRANSCODER(cpu_transcoder));
if (!error->transcoder[i].power_domain_on)
continue;
13055,6 → 13555,7
struct drm_device *dev,
struct intel_display_error_state *error)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
 
if (!error)
13064,7 → 13565,7
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
err_printf(m, "PWR_WELL_CTL2: %08x\n",
error->power_well_driver);
for_each_pipe(i) {
for_each_pipe(dev_priv, i) {
err_printf(m, "Pipe [%d]:\n", i);
err_printf(m, " Power: %s\n",
error->pipe[i].power_domain_on ? "on" : "off");
13106,3 → 13607,24
}
}
#endif
 
void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file)
{
struct intel_crtc *crtc;
 
for_each_intel_crtc(dev, crtc) {
struct intel_unpin_work *work;
 
spin_lock_irq(&dev->event_lock);
 
work = crtc->unpin_work;
 
if (work && work->event &&
work->event->base.file_priv == file) {
kfree(work->event);
work->event = NULL;
}
 
spin_unlock_irq(&dev->event_lock);
}
}
/drivers/video/drm/i915/intel_dp.c
109,8 → 109,11
}
 
static void intel_dp_link_down(struct intel_dp *intel_dp);
static bool _edp_panel_vdd_on(struct intel_dp *intel_dp);
static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
static void vlv_steal_power_sequencer(struct drm_device *dev,
enum pipe pipe);
 
int
intel_dp_max_link_bw(struct intel_dp *intel_dp)
222,8 → 225,7
return MODE_OK;
}
 
static uint32_t
pack_aux(uint8_t *src, int src_bytes)
uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
{
int i;
uint32_t v = 0;
235,8 → 237,7
return v;
}
 
static void
unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
{
int i;
if (dst_bytes > 4)
281,41 → 282,277
 
static void
intel_dp_init_panel_power_sequencer(struct drm_device *dev,
struct intel_dp *intel_dp,
struct edp_power_seq *out);
struct intel_dp *intel_dp);
static void
intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
struct intel_dp *intel_dp,
struct edp_power_seq *out);
struct intel_dp *intel_dp);
 
static void pps_lock(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *encoder = &intel_dig_port->base;
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum intel_display_power_domain power_domain;
 
/*
* See vlv_power_sequencer_reset() why we need
* a power domain reference here.
*/
power_domain = intel_display_port_power_domain(encoder);
intel_display_power_get(dev_priv, power_domain);
 
mutex_lock(&dev_priv->pps_mutex);
}
 
static void pps_unlock(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *encoder = &intel_dig_port->base;
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum intel_display_power_domain power_domain;
 
mutex_unlock(&dev_priv->pps_mutex);
 
power_domain = intel_display_port_power_domain(encoder);
intel_display_power_put(dev_priv, power_domain);
}
 
static void
vlv_power_sequencer_kick(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe pipe = intel_dp->pps_pipe;
bool pll_enabled;
uint32_t DP;
 
if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
"skipping pipe %c power seqeuncer kick due to port %c being active\n",
pipe_name(pipe), port_name(intel_dig_port->port)))
return;
 
DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
pipe_name(pipe), port_name(intel_dig_port->port));
 
/* Preserve the BIOS-computed detected bit. This is
* supposed to be read-only.
*/
DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
DP |= DP_PORT_WIDTH(1);
DP |= DP_LINK_TRAIN_PAT_1;
 
if (IS_CHERRYVIEW(dev))
DP |= DP_PIPE_SELECT_CHV(pipe);
else if (pipe == PIPE_B)
DP |= DP_PIPEB_SELECT;
 
pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
 
/*
* The DPLL for the pipe must be enabled for this to work.
* So enable temporarily it if it's not already enabled.
*/
if (!pll_enabled)
vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
&chv_dpll[0].dpll : &vlv_dpll[0].dpll);
 
/*
* Similar magic as in intel_dp_enable_port().
* We _must_ do this port enable + disable trick
* to make this power seqeuencer lock onto the port.
* Otherwise even VDD force bit won't work.
*/
I915_WRITE(intel_dp->output_reg, DP);
POSTING_READ(intel_dp->output_reg);
 
I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
POSTING_READ(intel_dp->output_reg);
 
I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
POSTING_READ(intel_dp->output_reg);
 
if (!pll_enabled)
vlv_force_pll_off(dev, pipe);
}
 
static enum pipe
vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum port port = intel_dig_port->port;
struct intel_encoder *encoder;
unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
enum pipe pipe;
 
/* modeset should have pipe */
if (crtc)
return to_intel_crtc(crtc)->pipe;
lockdep_assert_held(&dev_priv->pps_mutex);
 
/* init time, try to find a pipe with this port selected */
/* We should never land here with regular DP ports */
WARN_ON(!is_edp(intel_dp));
 
if (intel_dp->pps_pipe != INVALID_PIPE)
return intel_dp->pps_pipe;
 
/*
* We don't have power sequencer currently.
* Pick one that's not used by other ports.
*/
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
base.head) {
struct intel_dp *tmp;
 
if (encoder->type != INTEL_OUTPUT_EDP)
continue;
 
tmp = enc_to_intel_dp(&encoder->base);
 
if (tmp->pps_pipe != INVALID_PIPE)
pipes &= ~(1 << tmp->pps_pipe);
}
 
/*
* Didn't find one. This should not happen since there
* are two power sequencers and up to two eDP ports.
*/
if (WARN_ON(pipes == 0))
pipe = PIPE_A;
else
pipe = ffs(pipes) - 1;
 
vlv_steal_power_sequencer(dev, pipe);
intel_dp->pps_pipe = pipe;
 
DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
pipe_name(intel_dp->pps_pipe),
port_name(intel_dig_port->port));
 
/* init power sequencer on this pipe and port */
intel_dp_init_panel_power_sequencer(dev, intel_dp);
intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
 
/*
* Even vdd force doesn't work until we've made
* the power sequencer lock in on the port.
*/
vlv_power_sequencer_kick(intel_dp);
 
return intel_dp->pps_pipe;
}
 
typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
enum pipe pipe);
 
static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
}
 
static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
}
 
static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
return true;
}
 
static enum pipe
vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
enum port port,
vlv_pipe_check pipe_check)
{
enum pipe pipe;
 
for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
PANEL_PORT_SELECT_MASK;
if (port_sel == PANEL_PORT_SELECT_DPB_VLV && port == PORT_B)
 
if (port_sel != PANEL_PORT_SELECT_VLV(port))
continue;
 
if (!pipe_check(dev_priv, pipe))
continue;
 
return pipe;
if (port_sel == PANEL_PORT_SELECT_DPC_VLV && port == PORT_C)
return pipe;
}
 
/* shrug */
return PIPE_A;
return INVALID_PIPE;
}
 
static void
vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum port port = intel_dig_port->port;
 
lockdep_assert_held(&dev_priv->pps_mutex);
 
/* try to find a pipe with this port selected */
/* first pick one where the panel is on */
intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
vlv_pipe_has_pp_on);
/* didn't find one? pick one where vdd is on */
if (intel_dp->pps_pipe == INVALID_PIPE)
intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
vlv_pipe_has_vdd_on);
/* didn't find one? pick one with just the correct port */
if (intel_dp->pps_pipe == INVALID_PIPE)
intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
vlv_pipe_any);
 
/* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
if (intel_dp->pps_pipe == INVALID_PIPE) {
DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
port_name(port));
return;
}
 
DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
port_name(port), pipe_name(intel_dp->pps_pipe));
 
intel_dp_init_panel_power_sequencer(dev, intel_dp);
intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
}
 
void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
struct intel_encoder *encoder;
 
if (WARN_ON(!IS_VALLEYVIEW(dev)))
return;
 
/*
* We can't grab pps_mutex here due to deadlock with power_domain
* mutex when power_domain functions are called while holding pps_mutex.
* That also means that in order to use pps_pipe the code needs to
* hold both a power domain reference and pps_mutex, and the power domain
* reference get/put must be done while _not_ holding pps_mutex.
* pps_{lock,unlock}() do these steps in the correct order, so one
* should use them always.
*/
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
struct intel_dp *intel_dp;
 
if (encoder->type != INTEL_OUTPUT_EDP)
continue;
 
intel_dp = enc_to_intel_dp(&encoder->base);
intel_dp->pps_pipe = INVALID_PIPE;
}
}
 
static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
336,12 → 573,55
return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
}
 
#if 0
/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
This function only applicable when panel PM state is not to be tracked */
static int edp_notify_handler(struct notifier_block *this, unsigned long code,
void *unused)
{
struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
edp_notifier);
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp_div;
u32 pp_ctrl_reg, pp_div_reg;
 
if (!is_edp(intel_dp) || code != SYS_RESTART)
return 0;
 
pps_lock(intel_dp);
 
if (IS_VALLEYVIEW(dev)) {
enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
 
pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
pp_div = I915_READ(pp_div_reg);
pp_div &= PP_REFERENCE_DIVIDER_MASK;
 
/* 0x1F write to PP_DIV_REG sets max cycle delay */
I915_WRITE(pp_div_reg, pp_div | 0x1F);
I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
msleep(intel_dp->panel_power_cycle_delay);
}
 
pps_unlock(intel_dp);
 
return 0;
}
#endif
 
static bool edp_have_panel_power(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
 
lockdep_assert_held(&dev_priv->pps_mutex);
 
if (IS_VALLEYVIEW(dev) &&
intel_dp->pps_pipe == INVALID_PIPE)
return false;
 
return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
}
 
349,13 → 629,14
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *intel_encoder = &intel_dig_port->base;
enum intel_display_power_domain power_domain;
 
power_domain = intel_display_port_power_domain(intel_encoder);
return intel_display_power_enabled(dev_priv, power_domain) &&
(I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD) != 0;
lockdep_assert_held(&dev_priv->pps_mutex);
 
if (IS_VALLEYVIEW(dev) &&
intel_dp->pps_pipe == INVALID_PIPE)
return false;
 
return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
}
 
static void
456,6 → 737,16
return index ? 0 : 100;
}
 
static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
{
/*
* SKL doesn't need us to program the AUX clock divider (Hardware will
* derive the clock from CDCLK automatically). We still implement the
* get_aux_clock_divider vfunc to plug-in into the existing code.
*/
return index ? 0 : 1;
}
 
static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
bool has_aux_irq,
int send_bytes,
486,9 → 777,24
(aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
}
 
static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
bool has_aux_irq,
int send_bytes,
uint32_t unused)
{
return DP_AUX_CH_CTL_SEND_BUSY |
DP_AUX_CH_CTL_DONE |
(has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
DP_AUX_CH_CTL_TIME_OUT_ERROR |
DP_AUX_CH_CTL_TIME_OUT_1600us |
DP_AUX_CH_CTL_RECEIVE_ERROR |
(send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
}
 
static int
intel_dp_aux_ch(struct intel_dp *intel_dp,
uint8_t *send, int send_bytes,
const uint8_t *send, int send_bytes,
uint8_t *recv, int recv_size)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
503,8 → 809,16
bool has_aux_irq = HAS_AUX_IRQ(dev);
bool vdd;
 
vdd = _edp_panel_vdd_on(intel_dp);
pps_lock(intel_dp);
 
/*
* We will be called with VDD already enabled for dpcd/edid/oui reads.
* In such cases we want to leave VDD enabled and it's up to upper layers
* to turn it off. But for eg. i2c-dev access we need to turn it on/off
* ourselves.
*/
vdd = edp_panel_vdd_on(intel_dp);
 
/* dp aux is extremely sensitive to irq latency, hence request the
* lowest possible wakeup latency and so prevent the cpu from going into
* deep sleep states.
546,7 → 860,8
/* Load the send data into the aux channel data registers */
for (i = 0; i < send_bytes; i += 4)
I915_WRITE(ch_data + i,
pack_aux(send + i, send_bytes - i));
intel_dp_pack_aux(send + i,
send_bytes - i));
 
/* Send the command and wait for it to complete */
I915_WRITE(ch_ctl, send_ctl);
600,7 → 915,7
recv_bytes = recv_size;
 
for (i = 0; i < recv_bytes; i += 4)
unpack_aux(I915_READ(ch_data + i),
intel_dp_unpack_aux(I915_READ(ch_data + i),
recv + i, recv_bytes - i);
 
ret = recv_bytes;
611,6 → 926,8
if (vdd)
edp_panel_vdd_off(intel_dp, false);
 
pps_unlock(intel_dp);
 
return ret;
}
 
709,7 → 1026,16
BUG();
}
 
if (!HAS_DDI(dev))
/*
* The AUX_CTL register is usually DP_CTL + 0x10.
*
* On Haswell and Broadwell though:
* - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
* - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
*
* Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
*/
if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
 
intel_dp->aux.name = name;
716,6 → 1042,8
intel_dp->aux.dev = dev->dev;
intel_dp->aux.transfer = intel_dp_aux_transfer;
 
DRM_DEBUG_KMS("registering %s bus for %s\n", name,
connector->base.kdev->kobj.name);
 
ret = drm_dp_aux_register(&intel_dp->aux);
if (ret < 0) {
734,6 → 1062,33
}
 
static void
skl_edp_set_pll_config(struct intel_crtc_config *pipe_config, int link_bw)
{
u32 ctrl1;
 
pipe_config->ddi_pll_sel = SKL_DPLL0;
pipe_config->dpll_hw_state.cfgcr1 = 0;
pipe_config->dpll_hw_state.cfgcr2 = 0;
 
ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
switch (link_bw) {
case DP_LINK_BW_1_62:
ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_810,
SKL_DPLL0);
break;
case DP_LINK_BW_2_7:
ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1350,
SKL_DPLL0);
break;
case DP_LINK_BW_5_4:
ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2700,
SKL_DPLL0);
break;
}
pipe_config->dpll_hw_state.ctrl1 = ctrl1;
}
 
static void
hsw_dp_set_ddi_pll_sel(struct intel_crtc_config *pipe_config, int link_bw)
{
switch (link_bw) {
782,20 → 1137,6
}
}
 
static void
intel_dp_set_m2_n2(struct intel_crtc *crtc, struct intel_link_m_n *m_n)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum transcoder transcoder = crtc->config.cpu_transcoder;
 
I915_WRITE(PIPE_DATA_M2(transcoder),
TU_SIZE(m_n->tu) | m_n->gmch_m);
I915_WRITE(PIPE_DATA_N2(transcoder), m_n->gmch_n);
I915_WRITE(PIPE_LINK_M2(transcoder), m_n->link_m);
I915_WRITE(PIPE_LINK_N2(transcoder), m_n->link_n);
}
 
bool
intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
821,6 → 1162,7
pipe_config->has_pch_encoder = true;
 
pipe_config->has_dp_encoder = true;
pipe_config->has_drrs = false;
pipe_config->has_audio = intel_dp->has_audio;
 
if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
852,25 → 1194,17
bpp = dev_priv->vbt.edp_bpp;
}
 
if (IS_BROADWELL(dev)) {
/* Yes, it's an ugly hack. */
/*
* Use the maximum clock and number of lanes the eDP panel
* advertizes being capable of. The panels are generally
* designed to support only a single clock and lane
* configuration, and typically these values correspond to the
* native resolution of the panel.
*/
min_lane_count = max_lane_count;
DRM_DEBUG_KMS("forcing lane count to max (%u) on BDW\n",
min_lane_count);
} else if (dev_priv->vbt.edp_lanes) {
min_lane_count = min(dev_priv->vbt.edp_lanes,
max_lane_count);
DRM_DEBUG_KMS("using min %u lanes per VBT\n",
min_lane_count);
min_clock = max_clock;
}
 
if (dev_priv->vbt.edp_rate) {
min_clock = min(dev_priv->vbt.edp_rate >> 3, max_clock);
DRM_DEBUG_KMS("using min %02x link bw per VBT\n",
bws[min_clock]);
}
}
 
for (; bpp >= 6*3; bpp -= 2*3) {
mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
bpp);
924,6 → 1258,7
 
if (intel_connector->panel.downclock_mode != NULL &&
intel_dp->drrs_state.type == SEAMLESS_DRRS_SUPPORT) {
pipe_config->has_drrs = true;
intel_link_compute_m_n(bpp, lane_count,
intel_connector->panel.downclock_mode->clock,
pipe_config->port_clock,
930,7 → 1265,9
&pipe_config->dp_m2_n2);
}
 
if (HAS_DDI(dev))
if (IS_SKYLAKE(dev) && is_edp(intel_dp))
skl_edp_set_pll_config(pipe_config, intel_dp->link_bw);
else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
else
intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
1003,12 → 1340,8
intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
 
if (crtc->config.has_audio) {
DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
pipe_name(crtc->pipe));
if (crtc->config.has_audio)
intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
intel_write_eld(&encoder->base, adjusted_mode);
}
 
/* Split out the IBX/CPU vs CPT settings */
 
1064,6 → 1397,8
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp_stat_reg, pp_ctrl_reg;
 
lockdep_assert_held(&dev_priv->pps_mutex);
 
pp_stat_reg = _pp_stat_reg(intel_dp);
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
 
1127,6 → 1462,8
struct drm_i915_private *dev_priv = dev->dev_private;
u32 control;
 
lockdep_assert_held(&dev_priv->pps_mutex);
 
control = I915_READ(_pp_ctrl_reg(intel_dp));
control &= ~PANEL_UNLOCK_MASK;
control |= PANEL_UNLOCK_REGS;
1133,7 → 1470,12
return control;
}
 
static bool _edp_panel_vdd_on(struct intel_dp *intel_dp)
/*
* Must be paired with edp_panel_vdd_off().
* Must hold pps_mutex around the whole on/off sequence.
* Can be nested with intel_edp_panel_vdd_{on,off}() calls.
*/
static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1144,6 → 1486,8
u32 pp_stat_reg, pp_ctrl_reg;
bool need_to_disable = !intel_dp->want_panel_vdd;
 
lockdep_assert_held(&dev_priv->pps_mutex);
 
if (!is_edp(intel_dp))
return false;
 
1155,7 → 1499,8
power_domain = intel_display_port_power_domain(intel_encoder);
intel_display_power_get(dev_priv, power_domain);
 
DRM_DEBUG_KMS("Turning eDP VDD on\n");
DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
port_name(intel_dig_port->port));
 
if (!edp_have_panel_power(intel_dp))
wait_panel_power_cycle(intel_dp);
1174,7 → 1519,8
* If the panel wasn't on, delay before accessing aux channel
*/
if (!edp_have_panel_power(intel_dp)) {
DRM_DEBUG_KMS("eDP was not running\n");
DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
port_name(intel_dig_port->port));
msleep(intel_dp->panel_power_up_delay);
}
 
1181,32 → 1527,49
return need_to_disable;
}
 
/*
* Must be paired with intel_edp_panel_vdd_off() or
* intel_edp_panel_off().
* Nested calls to these functions are not allowed since
* we drop the lock. Caller must use some higher level
* locking to prevent nested calls from other threads.
*/
void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
{
if (is_edp(intel_dp)) {
bool vdd = _edp_panel_vdd_on(intel_dp);
bool vdd;
 
WARN(!vdd, "eDP VDD already requested on\n");
if (!is_edp(intel_dp))
return;
 
pps_lock(intel_dp);
vdd = edp_panel_vdd_on(intel_dp);
pps_unlock(intel_dp);
 
WARN(!vdd, "eDP port %c VDD already requested on\n",
port_name(dp_to_dig_port(intel_dp)->port));
}
}
 
static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
u32 pp_stat_reg, pp_ctrl_reg;
 
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
 
if (!intel_dp->want_panel_vdd && edp_have_panel_vdd(intel_dp)) {
struct intel_digital_port *intel_dig_port =
dp_to_dig_port(intel_dp);
struct intel_encoder *intel_encoder = &intel_dig_port->base;
enum intel_display_power_domain power_domain;
u32 pp;
u32 pp_stat_reg, pp_ctrl_reg;
 
DRM_DEBUG_KMS("Turning eDP VDD off\n");
lockdep_assert_held(&dev_priv->pps_mutex);
 
WARN_ON(intel_dp->want_panel_vdd);
 
if (!edp_have_panel_vdd(intel_dp))
return;
 
DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
port_name(intel_dig_port->port));
 
pp = ironlake_get_pp_control(intel_dp);
pp &= ~EDP_FORCE_VDD;
 
1226,17 → 1589,16
power_domain = intel_display_port_power_domain(intel_encoder);
intel_display_power_put(dev_priv, power_domain);
}
}
 
static void edp_panel_vdd_work(struct work_struct *__work)
{
struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
struct intel_dp, panel_vdd_work);
struct drm_device *dev = intel_dp_to_dev(intel_dp);
 
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
pps_lock(intel_dp);
if (!intel_dp->want_panel_vdd)
edp_panel_vdd_off_sync(intel_dp);
drm_modeset_unlock(&dev->mode_config.connection_mutex);
pps_unlock(intel_dp);
}
 
static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1252,12 → 1614,23
schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
}
 
/*
* Must be paired with edp_panel_vdd_on().
* Must hold pps_mutex around the whole on/off sequence.
* Can be nested with intel_edp_panel_vdd_{on,off}() calls.
*/
static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
{
struct drm_i915_private *dev_priv =
intel_dp_to_dev(intel_dp)->dev_private;
 
lockdep_assert_held(&dev_priv->pps_mutex);
 
if (!is_edp(intel_dp))
return;
 
WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on");
WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
port_name(dp_to_dig_port(intel_dp)->port));
 
intel_dp->want_panel_vdd = false;
 
1267,7 → 1640,7
edp_panel_vdd_schedule_off(intel_dp);
}
 
void intel_edp_panel_on(struct intel_dp *intel_dp)
static void edp_panel_on(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
1274,15 → 1647,18
u32 pp;
u32 pp_ctrl_reg;
 
lockdep_assert_held(&dev_priv->pps_mutex);
 
if (!is_edp(intel_dp))
return;
 
DRM_DEBUG_KMS("Turn eDP power on\n");
DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
port_name(dp_to_dig_port(intel_dp)->port));
 
if (edp_have_panel_power(intel_dp)) {
DRM_DEBUG_KMS("eDP power already on\n");
if (WARN(edp_have_panel_power(intel_dp),
"eDP port %c panel power already on\n",
port_name(dp_to_dig_port(intel_dp)->port)))
return;
}
 
wait_panel_power_cycle(intel_dp);
 
1312,8 → 1688,19
}
}
 
void intel_edp_panel_off(struct intel_dp *intel_dp)
void intel_edp_panel_on(struct intel_dp *intel_dp)
{
if (!is_edp(intel_dp))
return;
 
pps_lock(intel_dp);
edp_panel_on(intel_dp);
pps_unlock(intel_dp);
}
 
 
static void edp_panel_off(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_device *dev = intel_dp_to_dev(intel_dp);
1322,12 → 1709,16
u32 pp;
u32 pp_ctrl_reg;
 
lockdep_assert_held(&dev_priv->pps_mutex);
 
if (!is_edp(intel_dp))
return;
 
DRM_DEBUG_KMS("Turn eDP power off\n");
DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
port_name(dp_to_dig_port(intel_dp)->port));
 
WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
port_name(dp_to_dig_port(intel_dp)->port));
 
pp = ironlake_get_pp_control(intel_dp);
/* We need to switch off panel power _and_ force vdd, for otherwise some
1350,8 → 1741,19
intel_display_power_put(dev_priv, power_domain);
}
 
void intel_edp_backlight_on(struct intel_dp *intel_dp)
void intel_edp_panel_off(struct intel_dp *intel_dp)
{
if (!is_edp(intel_dp))
return;
 
pps_lock(intel_dp);
edp_panel_off(intel_dp);
pps_unlock(intel_dp);
}
 
/* Enable backlight in the panel power control. */
static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
1358,13 → 1760,6
u32 pp;
u32 pp_ctrl_reg;
 
if (!is_edp(intel_dp))
return;
 
DRM_DEBUG_KMS("\n");
 
intel_panel_enable_backlight(intel_dp->attached_connector);
 
/*
* If we enable the backlight right away following a panel power
* on, we may see slight flicker as the panel syncs with the eDP
1372,6 → 1767,9
* allowing it to appear.
*/
wait_backlight_on(intel_dp);
 
pps_lock(intel_dp);
 
pp = ironlake_get_pp_control(intel_dp);
pp |= EDP_BLC_ENABLE;
 
1379,10 → 1777,25
 
I915_WRITE(pp_ctrl_reg, pp);
POSTING_READ(pp_ctrl_reg);
 
pps_unlock(intel_dp);
}
 
void intel_edp_backlight_off(struct intel_dp *intel_dp)
/* Enable backlight PWM and backlight PP control. */
void intel_edp_backlight_on(struct intel_dp *intel_dp)
{
if (!is_edp(intel_dp))
return;
 
DRM_DEBUG_KMS("\n");
 
intel_panel_enable_backlight(intel_dp->attached_connector);
_intel_edp_backlight_on(intel_dp);
}
 
/* Disable backlight in the panel power control. */
static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
1391,7 → 1804,8
if (!is_edp(intel_dp))
return;
 
DRM_DEBUG_KMS("\n");
pps_lock(intel_dp);
 
pp = ironlake_get_pp_control(intel_dp);
pp &= ~EDP_BLC_ENABLE;
 
1399,13 → 1813,51
 
I915_WRITE(pp_ctrl_reg, pp);
POSTING_READ(pp_ctrl_reg);
 
pps_unlock(intel_dp);
 
intel_dp->last_backlight_off = jiffies;
 
edp_wait_backlight_off(intel_dp);
}
 
/* Disable backlight PP control and backlight PWM. */
void intel_edp_backlight_off(struct intel_dp *intel_dp)
{
if (!is_edp(intel_dp))
return;
 
DRM_DEBUG_KMS("\n");
 
_intel_edp_backlight_off(intel_dp);
intel_panel_disable_backlight(intel_dp->attached_connector);
}
 
/*
* Hook for controlling the panel power control backlight through the bl_power
* sysfs attribute. Take care to handle multiple calls.
*/
static void intel_edp_backlight_power(struct intel_connector *connector,
bool enable)
{
struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
bool is_enabled;
 
pps_lock(intel_dp);
is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
pps_unlock(intel_dp);
 
if (is_enabled == enable)
return;
 
DRM_DEBUG_KMS("panel power control backlight %s\n",
enable ? "enable" : "disable");
 
if (enable)
_intel_edp_backlight_on(intel_dp);
else
_intel_edp_backlight_off(intel_dp);
}
 
static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1469,8 → 1921,6
if (mode != DRM_MODE_DPMS_ON) {
ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
DP_SET_POWER_D3);
if (ret != 1)
DRM_DEBUG_DRIVER("failed to write sink power state\n");
} else {
/*
* When turning on, we need to retry for 1ms to give the sink
1484,6 → 1934,10
msleep(1);
}
}
 
if (ret != 1)
DRM_DEBUG_KMS("failed to %s sink power state\n",
mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
}
 
static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
1497,7 → 1951,7
u32 tmp;
 
power_domain = intel_display_port_power_domain(encoder);
if (!intel_display_power_enabled(dev_priv, power_domain))
if (!intel_display_power_is_enabled(dev_priv, power_domain))
return false;
 
tmp = I915_READ(intel_dp->output_reg);
1530,7 → 1984,7
return true;
}
 
for_each_pipe(i) {
for_each_pipe(dev_priv, i) {
trans_dp = I915_READ(TRANS_DP_CTL(i));
if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
*pipe = i;
1629,370 → 2083,15
}
}
 
static bool is_edp_psr(struct intel_dp *intel_dp)
{
return intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
}
 
static bool intel_edp_is_psr_enabled(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (!HAS_PSR(dev))
return false;
 
return I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
}
 
static void intel_edp_psr_write_vsc(struct intel_dp *intel_dp,
struct edp_vsc_psr *vsc_psr)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
u32 ctl_reg = HSW_TVIDEO_DIP_CTL(crtc->config.cpu_transcoder);
u32 data_reg = HSW_TVIDEO_DIP_VSC_DATA(crtc->config.cpu_transcoder);
uint32_t *data = (uint32_t *) vsc_psr;
unsigned int i;
 
/* As per BSPec (Pipe Video Data Island Packet), we need to disable
the video DIP being updated before program video DIP data buffer
registers for DIP being updated. */
I915_WRITE(ctl_reg, 0);
POSTING_READ(ctl_reg);
 
for (i = 0; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4) {
if (i < sizeof(struct edp_vsc_psr))
I915_WRITE(data_reg + i, *data++);
else
I915_WRITE(data_reg + i, 0);
}
 
I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW);
POSTING_READ(ctl_reg);
}
 
static void intel_edp_psr_setup(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
struct edp_vsc_psr psr_vsc;
 
/* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
memset(&psr_vsc, 0, sizeof(psr_vsc));
psr_vsc.sdp_header.HB0 = 0;
psr_vsc.sdp_header.HB1 = 0x7;
psr_vsc.sdp_header.HB2 = 0x2;
psr_vsc.sdp_header.HB3 = 0x8;
intel_edp_psr_write_vsc(intel_dp, &psr_vsc);
 
/* Avoid continuous PSR exit by masking memup and hpd */
I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
}
 
static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t aux_clock_divider;
int precharge = 0x3;
int msg_size = 5; /* Header(4) + Message(1) */
bool only_standby = false;
 
aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
 
if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
only_standby = true;
 
/* Enable PSR in sink */
if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby)
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE);
else
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
 
/* Setup AUX registers */
I915_WRITE(EDP_PSR_AUX_DATA1(dev), EDP_PSR_DPCD_COMMAND);
I915_WRITE(EDP_PSR_AUX_DATA2(dev), EDP_PSR_DPCD_NORMAL_OPERATION);
I915_WRITE(EDP_PSR_AUX_CTL(dev),
DP_AUX_CH_CTL_TIME_OUT_400us |
(msg_size << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
(precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
(aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
}
 
static void intel_edp_psr_enable_source(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t max_sleep_time = 0x1f;
uint32_t idle_frames = 1;
uint32_t val = 0x0;
const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
bool only_standby = false;
 
if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
only_standby = true;
 
if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby) {
val |= EDP_PSR_LINK_STANDBY;
val |= EDP_PSR_TP2_TP3_TIME_0us;
val |= EDP_PSR_TP1_TIME_0us;
val |= EDP_PSR_SKIP_AUX_EXIT;
val |= IS_BROADWELL(dev) ? BDW_PSR_SINGLE_FRAME : 0;
} else
val |= EDP_PSR_LINK_DISABLE;
 
I915_WRITE(EDP_PSR_CTL(dev), val |
(IS_BROADWELL(dev) ? 0 : link_entry_time) |
max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
EDP_PSR_ENABLE);
}
 
static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = dig_port->base.base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
lockdep_assert_held(&dev_priv->psr.lock);
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
 
dev_priv->psr.source_ok = false;
 
if (IS_HASWELL(dev) && dig_port->port != PORT_A) {
DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
return false;
}
 
if (!i915.enable_psr) {
DRM_DEBUG_KMS("PSR disable by flag\n");
return false;
}
 
/* Below limitations aren't valid for Broadwell */
if (IS_BROADWELL(dev))
goto out;
 
if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
S3D_ENABLE) {
DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
return false;
}
 
if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
return false;
}
 
out:
dev_priv->psr.source_ok = true;
return true;
}
 
static void intel_edp_psr_do_enable(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
WARN_ON(dev_priv->psr.active);
lockdep_assert_held(&dev_priv->psr.lock);
 
/* Enable PSR on the panel */
intel_edp_psr_enable_sink(intel_dp);
 
/* Enable PSR on the host */
intel_edp_psr_enable_source(intel_dp);
 
dev_priv->psr.active = true;
}
 
void intel_edp_psr_enable(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (!HAS_PSR(dev)) {
DRM_DEBUG_KMS("PSR not supported on this platform\n");
return;
}
 
if (!is_edp_psr(intel_dp)) {
DRM_DEBUG_KMS("PSR not supported by this panel\n");
return;
}
 
mutex_lock(&dev_priv->psr.lock);
if (dev_priv->psr.enabled) {
DRM_DEBUG_KMS("PSR already in use\n");
mutex_unlock(&dev_priv->psr.lock);
return;
}
 
dev_priv->psr.busy_frontbuffer_bits = 0;
 
/* Setup PSR once */
intel_edp_psr_setup(intel_dp);
 
if (intel_edp_psr_match_conditions(intel_dp))
dev_priv->psr.enabled = intel_dp;
mutex_unlock(&dev_priv->psr.lock);
}
 
void intel_edp_psr_disable(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
 
mutex_lock(&dev_priv->psr.lock);
if (!dev_priv->psr.enabled) {
mutex_unlock(&dev_priv->psr.lock);
return;
}
 
if (dev_priv->psr.active) {
I915_WRITE(EDP_PSR_CTL(dev),
I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
 
/* Wait till PSR is idle */
if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
DRM_ERROR("Timed out waiting for PSR Idle State\n");
 
dev_priv->psr.active = false;
} else {
WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
}
 
dev_priv->psr.enabled = NULL;
mutex_unlock(&dev_priv->psr.lock);
 
cancel_delayed_work_sync(&dev_priv->psr.work);
}
 
static void intel_edp_psr_work(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, typeof(*dev_priv), psr.work.work);
struct intel_dp *intel_dp = dev_priv->psr.enabled;
 
mutex_lock(&dev_priv->psr.lock);
intel_dp = dev_priv->psr.enabled;
 
if (!intel_dp)
goto unlock;
 
/*
* The delayed work can race with an invalidate hence we need to
* recheck. Since psr_flush first clears this and then reschedules we
* won't ever miss a flush when bailing out here.
*/
if (dev_priv->psr.busy_frontbuffer_bits)
goto unlock;
 
intel_edp_psr_do_enable(intel_dp);
unlock:
mutex_unlock(&dev_priv->psr.lock);
}
 
static void intel_edp_psr_do_exit(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (dev_priv->psr.active) {
u32 val = I915_READ(EDP_PSR_CTL(dev));
 
WARN_ON(!(val & EDP_PSR_ENABLE));
 
I915_WRITE(EDP_PSR_CTL(dev), val & ~EDP_PSR_ENABLE);
 
dev_priv->psr.active = false;
}
 
}
 
void intel_edp_psr_invalidate(struct drm_device *dev,
unsigned frontbuffer_bits)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
enum pipe pipe;
 
mutex_lock(&dev_priv->psr.lock);
if (!dev_priv->psr.enabled) {
mutex_unlock(&dev_priv->psr.lock);
return;
}
 
crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
pipe = to_intel_crtc(crtc)->pipe;
 
intel_edp_psr_do_exit(dev);
 
frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
 
dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
mutex_unlock(&dev_priv->psr.lock);
}
 
void intel_edp_psr_flush(struct drm_device *dev,
unsigned frontbuffer_bits)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
enum pipe pipe;
 
mutex_lock(&dev_priv->psr.lock);
if (!dev_priv->psr.enabled) {
mutex_unlock(&dev_priv->psr.lock);
return;
}
 
crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
pipe = to_intel_crtc(crtc)->pipe;
dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
 
/*
* On Haswell sprite plane updates don't result in a psr invalidating
* signal in the hardware. Which means we need to manually fake this in
* software for all flushes, not just when we've seen a preceding
* invalidation through frontbuffer rendering.
*/
if (IS_HASWELL(dev) &&
(frontbuffer_bits & INTEL_FRONTBUFFER_SPRITE(pipe)))
intel_edp_psr_do_exit(dev);
 
if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
schedule_delayed_work(&dev_priv->psr.work,
msecs_to_jiffies(100));
mutex_unlock(&dev_priv->psr.lock);
}
 
void intel_edp_psr_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
INIT_DELAYED_WORK(&dev_priv->psr.work, intel_edp_psr_work);
mutex_init(&dev_priv->psr.lock);
}
 
static void intel_disable_dp(struct intel_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
enum port port = dp_to_dig_port(intel_dp)->port;
struct drm_device *dev = encoder->base.dev;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
 
if (crtc->config.has_audio)
intel_audio_codec_disable(encoder);
 
/* Make sure the panel is off before trying to change the mode. But also
* ensure that we have vdd while we switch off the panel. */
intel_edp_panel_vdd_on(intel_dp);
2000,20 → 2099,18
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
intel_edp_panel_off(intel_dp);
 
/* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */
if (!(port == PORT_A || IS_VALLEYVIEW(dev)))
/* disable the port before the pipe on g4x */
if (INTEL_INFO(dev)->gen < 5)
intel_dp_link_down(intel_dp);
}
 
static void g4x_post_disable_dp(struct intel_encoder *encoder)
static void ilk_post_disable_dp(struct intel_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
enum port port = dp_to_dig_port(intel_dp)->port;
 
if (port != PORT_A)
return;
 
intel_dp_link_down(intel_dp);
if (port == PORT_A)
ironlake_edp_pll_off(intel_dp);
}
 
2060,24 → 2157,151
mutex_unlock(&dev_priv->dpio_lock);
}
 
static void
_intel_dp_set_link_train(struct intel_dp *intel_dp,
uint32_t *DP,
uint8_t dp_train_pat)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum port port = intel_dig_port->port;
 
if (HAS_DDI(dev)) {
uint32_t temp = I915_READ(DP_TP_CTL(port));
 
if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
else
temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
 
temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
case DP_TRAINING_PATTERN_DISABLE:
temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
 
break;
case DP_TRAINING_PATTERN_1:
temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
break;
case DP_TRAINING_PATTERN_2:
temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
break;
case DP_TRAINING_PATTERN_3:
temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
break;
}
I915_WRITE(DP_TP_CTL(port), temp);
 
} else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
*DP &= ~DP_LINK_TRAIN_MASK_CPT;
 
switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
case DP_TRAINING_PATTERN_DISABLE:
*DP |= DP_LINK_TRAIN_OFF_CPT;
break;
case DP_TRAINING_PATTERN_1:
*DP |= DP_LINK_TRAIN_PAT_1_CPT;
break;
case DP_TRAINING_PATTERN_2:
*DP |= DP_LINK_TRAIN_PAT_2_CPT;
break;
case DP_TRAINING_PATTERN_3:
DRM_ERROR("DP training pattern 3 not supported\n");
*DP |= DP_LINK_TRAIN_PAT_2_CPT;
break;
}
 
} else {
if (IS_CHERRYVIEW(dev))
*DP &= ~DP_LINK_TRAIN_MASK_CHV;
else
*DP &= ~DP_LINK_TRAIN_MASK;
 
switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
case DP_TRAINING_PATTERN_DISABLE:
*DP |= DP_LINK_TRAIN_OFF;
break;
case DP_TRAINING_PATTERN_1:
*DP |= DP_LINK_TRAIN_PAT_1;
break;
case DP_TRAINING_PATTERN_2:
*DP |= DP_LINK_TRAIN_PAT_2;
break;
case DP_TRAINING_PATTERN_3:
if (IS_CHERRYVIEW(dev)) {
*DP |= DP_LINK_TRAIN_PAT_3_CHV;
} else {
DRM_ERROR("DP training pattern 3 not supported\n");
*DP |= DP_LINK_TRAIN_PAT_2;
}
break;
}
}
}
 
static void intel_dp_enable_port(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
 
/* enable with pattern 1 (as per spec) */
_intel_dp_set_link_train(intel_dp, &intel_dp->DP,
DP_TRAINING_PATTERN_1);
 
I915_WRITE(intel_dp->output_reg, intel_dp->DP);
POSTING_READ(intel_dp->output_reg);
 
/*
* Magic for VLV/CHV. We _must_ first set up the register
* without actually enabling the port, and then do another
* write to enable the port. Otherwise link training will
* fail when the power sequencer is freshly used for this port.
*/
intel_dp->DP |= DP_PORT_EN;
 
I915_WRITE(intel_dp->output_reg, intel_dp->DP);
POSTING_READ(intel_dp->output_reg);
}
 
static void intel_enable_dp(struct intel_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
uint32_t dp_reg = I915_READ(intel_dp->output_reg);
 
if (WARN_ON(dp_reg & DP_PORT_EN))
return;
 
intel_edp_panel_vdd_on(intel_dp);
pps_lock(intel_dp);
 
if (IS_VALLEYVIEW(dev))
vlv_init_panel_power_sequencer(intel_dp);
 
intel_dp_enable_port(intel_dp);
 
edp_panel_vdd_on(intel_dp);
edp_panel_on(intel_dp);
edp_panel_vdd_off(intel_dp, true);
 
pps_unlock(intel_dp);
 
if (IS_VALLEYVIEW(dev))
vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp));
 
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
intel_dp_start_link_train(intel_dp);
intel_edp_panel_on(intel_dp);
edp_panel_vdd_off(intel_dp, true);
intel_dp_complete_link_train(intel_dp);
intel_dp_stop_link_train(intel_dp);
 
if (crtc->config.has_audio) {
DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
pipe_name(crtc->pipe));
intel_audio_codec_enable(encoder);
}
}
 
static void g4x_enable_dp(struct intel_encoder *encoder)
{
2108,6 → 2332,110
}
}
 
static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
enum pipe pipe = intel_dp->pps_pipe;
int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
 
edp_panel_vdd_off_sync(intel_dp);
 
/*
* VLV seems to get confused when multiple power seqeuencers
* have the same port selected (even if only one has power/vdd
* enabled). The failure manifests as vlv_wait_port_ready() failing
* CHV on the other hand doesn't seem to mind having the same port
* selected in multiple power seqeuencers, but let's clear the
* port select always when logically disconnecting a power sequencer
* from a port.
*/
DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
pipe_name(pipe), port_name(intel_dig_port->port));
I915_WRITE(pp_on_reg, 0);
POSTING_READ(pp_on_reg);
 
intel_dp->pps_pipe = INVALID_PIPE;
}
 
static void vlv_steal_power_sequencer(struct drm_device *dev,
enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *encoder;
 
lockdep_assert_held(&dev_priv->pps_mutex);
 
if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
return;
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
base.head) {
struct intel_dp *intel_dp;
enum port port;
 
if (encoder->type != INTEL_OUTPUT_EDP)
continue;
 
intel_dp = enc_to_intel_dp(&encoder->base);
port = dp_to_dig_port(intel_dp)->port;
 
if (intel_dp->pps_pipe != pipe)
continue;
 
DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
pipe_name(pipe), port_name(port));
 
WARN(encoder->connectors_active,
"stealing pipe %c power sequencer from active eDP port %c\n",
pipe_name(pipe), port_name(port));
 
/* make sure vdd is off before we steal it */
vlv_detach_power_sequencer(intel_dp);
}
}
 
static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *encoder = &intel_dig_port->base;
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
 
lockdep_assert_held(&dev_priv->pps_mutex);
 
if (!is_edp(intel_dp))
return;
 
if (intel_dp->pps_pipe == crtc->pipe)
return;
 
/*
* If another power sequencer was being used on this
* port previously make sure to turn off vdd there while
* we still have control of it.
*/
if (intel_dp->pps_pipe != INVALID_PIPE)
vlv_detach_power_sequencer(intel_dp);
 
/*
* We may be stealing the power
* sequencer from another port.
*/
vlv_steal_power_sequencer(dev, crtc->pipe);
 
/* now it's all ours */
intel_dp->pps_pipe = crtc->pipe;
 
DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
 
/* init power sequencer on this pipe and port */
intel_dp_init_panel_power_sequencer(dev, intel_dp);
intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
}
 
static void vlv_pre_enable_dp(struct intel_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2117,7 → 2445,6
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
enum dpio_channel port = vlv_dport_to_channel(dport);
int pipe = intel_crtc->pipe;
struct edp_power_seq power_seq;
u32 val;
 
mutex_lock(&dev_priv->dpio_lock);
2135,16 → 2462,7
 
mutex_unlock(&dev_priv->dpio_lock);
 
if (is_edp(intel_dp)) {
/* init power sequencer on this pipe and port */
intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
&power_seq);
}
 
intel_enable_dp(encoder);
 
vlv_wait_port_ready(dev_priv, dport);
}
 
static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
2183,7 → 2501,6
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct edp_power_seq power_seq;
struct intel_crtc *intel_crtc =
to_intel_crtc(encoder->base.crtc);
enum dpio_channel ch = vlv_dport_to_channel(dport);
2193,6 → 2510,15
 
mutex_lock(&dev_priv->dpio_lock);
 
/* allow hardware to manage TX FIFO reset source */
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
 
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
 
/* Deassert soft data lane reset*/
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
val |= CHV_PCS_REQ_SOFTRESET_EN;
2228,16 → 2554,7
 
mutex_unlock(&dev_priv->dpio_lock);
 
if (is_edp(intel_dp)) {
/* init power sequencer on this pipe and port */
intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
&power_seq);
}
 
intel_enable_dp(encoder);
 
vlv_wait_port_ready(dev_priv, dport);
}
 
static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2251,6 → 2568,8
enum pipe pipe = intel_crtc->pipe;
u32 val;
 
intel_dp_prepare(encoder);
 
mutex_lock(&dev_priv->dpio_lock);
 
/* program left/right clock distribution */
2318,6 → 2637,13
ssize_t ret;
int i;
 
/*
* Sometime we just get the same incorrect byte repeated
* over the entire buffer. Doing just one throw away read
* initially seems to "solve" it.
*/
drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
 
for (i = 0; i < 3; i++) {
ret = drm_dp_dpcd_read(aux, offset, buffer, size);
if (ret == size)
2348,14 → 2674,16
struct drm_device *dev = intel_dp_to_dev(intel_dp);
enum port port = dp_to_dig_port(intel_dp)->port;
 
if (IS_VALLEYVIEW(dev))
return DP_TRAIN_VOLTAGE_SWING_1200;
if (INTEL_INFO(dev)->gen >= 9)
return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
else if (IS_VALLEYVIEW(dev))
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
else if (IS_GEN7(dev) && port == PORT_A)
return DP_TRAIN_VOLTAGE_SWING_800;
return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
else if (HAS_PCH_CPT(dev) && port != PORT_A)
return DP_TRAIN_VOLTAGE_SWING_1200;
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
else
return DP_TRAIN_VOLTAGE_SWING_800;
return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
}
 
static uint8_t
2364,51 → 2692,62
struct drm_device *dev = intel_dp_to_dev(intel_dp);
enum port port = dp_to_dig_port(intel_dp)->port;
 
if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
if (INTEL_INFO(dev)->gen >= 9) {
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_400:
return DP_TRAIN_PRE_EMPHASIS_9_5;
case DP_TRAIN_VOLTAGE_SWING_600:
return DP_TRAIN_PRE_EMPHASIS_6;
case DP_TRAIN_VOLTAGE_SWING_800:
return DP_TRAIN_PRE_EMPHASIS_3_5;
case DP_TRAIN_VOLTAGE_SWING_1200:
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
return DP_TRAIN_PRE_EMPH_LEVEL_3;
case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
return DP_TRAIN_PRE_EMPH_LEVEL_2;
case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
return DP_TRAIN_PRE_EMPH_LEVEL_1;
default:
return DP_TRAIN_PRE_EMPHASIS_0;
return DP_TRAIN_PRE_EMPH_LEVEL_0;
}
} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
return DP_TRAIN_PRE_EMPH_LEVEL_3;
case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
return DP_TRAIN_PRE_EMPH_LEVEL_2;
case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
return DP_TRAIN_PRE_EMPH_LEVEL_1;
case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
default:
return DP_TRAIN_PRE_EMPH_LEVEL_0;
}
} else if (IS_VALLEYVIEW(dev)) {
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_400:
return DP_TRAIN_PRE_EMPHASIS_9_5;
case DP_TRAIN_VOLTAGE_SWING_600:
return DP_TRAIN_PRE_EMPHASIS_6;
case DP_TRAIN_VOLTAGE_SWING_800:
return DP_TRAIN_PRE_EMPHASIS_3_5;
case DP_TRAIN_VOLTAGE_SWING_1200:
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
return DP_TRAIN_PRE_EMPH_LEVEL_3;
case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
return DP_TRAIN_PRE_EMPH_LEVEL_2;
case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
return DP_TRAIN_PRE_EMPH_LEVEL_1;
case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
default:
return DP_TRAIN_PRE_EMPHASIS_0;
return DP_TRAIN_PRE_EMPH_LEVEL_0;
}
} else if (IS_GEN7(dev) && port == PORT_A) {
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_400:
return DP_TRAIN_PRE_EMPHASIS_6;
case DP_TRAIN_VOLTAGE_SWING_600:
case DP_TRAIN_VOLTAGE_SWING_800:
return DP_TRAIN_PRE_EMPHASIS_3_5;
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
return DP_TRAIN_PRE_EMPH_LEVEL_2;
case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
return DP_TRAIN_PRE_EMPH_LEVEL_1;
default:
return DP_TRAIN_PRE_EMPHASIS_0;
return DP_TRAIN_PRE_EMPH_LEVEL_0;
}
} else {
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_400:
return DP_TRAIN_PRE_EMPHASIS_6;
case DP_TRAIN_VOLTAGE_SWING_600:
return DP_TRAIN_PRE_EMPHASIS_6;
case DP_TRAIN_VOLTAGE_SWING_800:
return DP_TRAIN_PRE_EMPHASIS_3_5;
case DP_TRAIN_VOLTAGE_SWING_1200:
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
return DP_TRAIN_PRE_EMPH_LEVEL_2;
case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
return DP_TRAIN_PRE_EMPH_LEVEL_2;
case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
return DP_TRAIN_PRE_EMPH_LEVEL_1;
case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
default:
return DP_TRAIN_PRE_EMPHASIS_0;
return DP_TRAIN_PRE_EMPH_LEVEL_0;
}
}
}
2427,22 → 2766,22
int pipe = intel_crtc->pipe;
 
switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
case DP_TRAIN_PRE_EMPHASIS_0:
case DP_TRAIN_PRE_EMPH_LEVEL_0:
preemph_reg_value = 0x0004000;
switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_400:
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
demph_reg_value = 0x2B405555;
uniqtranscale_reg_value = 0x552AB83A;
break;
case DP_TRAIN_VOLTAGE_SWING_600:
case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
demph_reg_value = 0x2B404040;
uniqtranscale_reg_value = 0x5548B83A;
break;
case DP_TRAIN_VOLTAGE_SWING_800:
case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
demph_reg_value = 0x2B245555;
uniqtranscale_reg_value = 0x5560B83A;
break;
case DP_TRAIN_VOLTAGE_SWING_1200:
case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
demph_reg_value = 0x2B405555;
uniqtranscale_reg_value = 0x5598DA3A;
break;
2450,18 → 2789,18
return 0;
}
break;
case DP_TRAIN_PRE_EMPHASIS_3_5:
case DP_TRAIN_PRE_EMPH_LEVEL_1:
preemph_reg_value = 0x0002000;
switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_400:
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
demph_reg_value = 0x2B404040;
uniqtranscale_reg_value = 0x5552B83A;
break;
case DP_TRAIN_VOLTAGE_SWING_600:
case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
demph_reg_value = 0x2B404848;
uniqtranscale_reg_value = 0x5580B83A;
break;
case DP_TRAIN_VOLTAGE_SWING_800:
case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
demph_reg_value = 0x2B404040;
uniqtranscale_reg_value = 0x55ADDA3A;
break;
2469,14 → 2808,14
return 0;
}
break;
case DP_TRAIN_PRE_EMPHASIS_6:
case DP_TRAIN_PRE_EMPH_LEVEL_2:
preemph_reg_value = 0x0000000;
switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_400:
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
demph_reg_value = 0x2B305555;
uniqtranscale_reg_value = 0x5570B83A;
break;
case DP_TRAIN_VOLTAGE_SWING_600:
case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
demph_reg_value = 0x2B2B4040;
uniqtranscale_reg_value = 0x55ADDA3A;
break;
2484,10 → 2823,10
return 0;
}
break;
case DP_TRAIN_PRE_EMPHASIS_9_5:
case DP_TRAIN_PRE_EMPH_LEVEL_3:
preemph_reg_value = 0x0006000;
switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_400:
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
demph_reg_value = 0x1B405555;
uniqtranscale_reg_value = 0x55ADDA3A;
break;
2526,21 → 2865,21
int i;
 
switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
case DP_TRAIN_PRE_EMPHASIS_0:
case DP_TRAIN_PRE_EMPH_LEVEL_0:
switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_400:
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
deemph_reg_value = 128;
margin_reg_value = 52;
break;
case DP_TRAIN_VOLTAGE_SWING_600:
case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
deemph_reg_value = 128;
margin_reg_value = 77;
break;
case DP_TRAIN_VOLTAGE_SWING_800:
case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
deemph_reg_value = 128;
margin_reg_value = 102;
break;
case DP_TRAIN_VOLTAGE_SWING_1200:
case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
deemph_reg_value = 128;
margin_reg_value = 154;
/* FIXME extra to set for 1200 */
2549,17 → 2888,17
return 0;
}
break;
case DP_TRAIN_PRE_EMPHASIS_3_5:
case DP_TRAIN_PRE_EMPH_LEVEL_1:
switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_400:
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
deemph_reg_value = 85;
margin_reg_value = 78;
break;
case DP_TRAIN_VOLTAGE_SWING_600:
case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
deemph_reg_value = 85;
margin_reg_value = 116;
break;
case DP_TRAIN_VOLTAGE_SWING_800:
case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
deemph_reg_value = 85;
margin_reg_value = 154;
break;
2567,13 → 2906,13
return 0;
}
break;
case DP_TRAIN_PRE_EMPHASIS_6:
case DP_TRAIN_PRE_EMPH_LEVEL_2:
switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_400:
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
deemph_reg_value = 64;
margin_reg_value = 104;
break;
case DP_TRAIN_VOLTAGE_SWING_600:
case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
deemph_reg_value = 64;
margin_reg_value = 154;
break;
2581,9 → 2920,9
return 0;
}
break;
case DP_TRAIN_PRE_EMPHASIS_9_5:
case DP_TRAIN_PRE_EMPH_LEVEL_3:
switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_400:
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
deemph_reg_value = 43;
margin_reg_value = 154;
break;
2600,12 → 2939,26
/* Clear calc init */
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
 
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
 
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
 
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
 
/* Program swing deemph */
for (i = 0; i < 4; i++) {
val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
2617,8 → 2970,8
/* Program swing margin */
for (i = 0; i < 4; i++) {
val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
val &= ~DPIO_SWING_MARGIN_MASK;
val |= margin_reg_value << DPIO_SWING_MARGIN_SHIFT;
val &= ~DPIO_SWING_MARGIN000_MASK;
val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
}
 
2630,9 → 2983,9
}
 
if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
== DP_TRAIN_PRE_EMPHASIS_0) &&
== DP_TRAIN_PRE_EMPH_LEVEL_0) &&
((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
== DP_TRAIN_VOLTAGE_SWING_1200)) {
== DP_TRAIN_VOLTAGE_SWING_LEVEL_3)) {
 
/*
* The document said it needs to set bit 27 for ch0 and bit 26
2711,32 → 3064,32
uint32_t signal_levels = 0;
 
switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_400:
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
default:
signal_levels |= DP_VOLTAGE_0_4;
break;
case DP_TRAIN_VOLTAGE_SWING_600:
case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
signal_levels |= DP_VOLTAGE_0_6;
break;
case DP_TRAIN_VOLTAGE_SWING_800:
case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
signal_levels |= DP_VOLTAGE_0_8;
break;
case DP_TRAIN_VOLTAGE_SWING_1200:
case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
signal_levels |= DP_VOLTAGE_1_2;
break;
}
switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
case DP_TRAIN_PRE_EMPHASIS_0:
case DP_TRAIN_PRE_EMPH_LEVEL_0:
default:
signal_levels |= DP_PRE_EMPHASIS_0;
break;
case DP_TRAIN_PRE_EMPHASIS_3_5:
case DP_TRAIN_PRE_EMPH_LEVEL_1:
signal_levels |= DP_PRE_EMPHASIS_3_5;
break;
case DP_TRAIN_PRE_EMPHASIS_6:
case DP_TRAIN_PRE_EMPH_LEVEL_2:
signal_levels |= DP_PRE_EMPHASIS_6;
break;
case DP_TRAIN_PRE_EMPHASIS_9_5:
case DP_TRAIN_PRE_EMPH_LEVEL_3:
signal_levels |= DP_PRE_EMPHASIS_9_5;
break;
}
2750,19 → 3103,19
int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
DP_TRAIN_PRE_EMPHASIS_MASK);
switch (signal_levels) {
case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0:
case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
default:
DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
2778,21 → 3131,21
int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
DP_TRAIN_PRE_EMPHASIS_MASK);
switch (signal_levels) {
case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
return EDP_LINK_TRAIN_400MV_0DB_IVB;
case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
return EDP_LINK_TRAIN_400MV_6DB_IVB;
 
case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
return EDP_LINK_TRAIN_600MV_0DB_IVB;
case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
 
case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
return EDP_LINK_TRAIN_800MV_0DB_IVB;
case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
 
default:
2809,30 → 3162,30
int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
DP_TRAIN_PRE_EMPHASIS_MASK);
switch (signal_levels) {
case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
return DDI_BUF_EMP_400MV_0DB_HSW;
case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
return DDI_BUF_EMP_400MV_3_5DB_HSW;
case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
return DDI_BUF_EMP_400MV_6DB_HSW;
case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_9_5:
return DDI_BUF_EMP_400MV_9_5DB_HSW;
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
return DDI_BUF_TRANS_SELECT(0);
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
return DDI_BUF_TRANS_SELECT(1);
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
return DDI_BUF_TRANS_SELECT(2);
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
return DDI_BUF_TRANS_SELECT(3);
 
case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
return DDI_BUF_EMP_600MV_0DB_HSW;
case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
return DDI_BUF_EMP_600MV_3_5DB_HSW;
case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
return DDI_BUF_EMP_600MV_6DB_HSW;
case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
return DDI_BUF_TRANS_SELECT(4);
case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
return DDI_BUF_TRANS_SELECT(5);
case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
return DDI_BUF_TRANS_SELECT(6);
 
case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
return DDI_BUF_EMP_800MV_0DB_HSW;
case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
return DDI_BUF_EMP_800MV_3_5DB_HSW;
case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
return DDI_BUF_TRANS_SELECT(7);
case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
return DDI_BUF_TRANS_SELECT(8);
default:
DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
"0x%x\n", signal_levels);
return DDI_BUF_EMP_400MV_0DB_HSW;
return DDI_BUF_TRANS_SELECT(0);
}
}
 
2846,7 → 3199,7
uint32_t signal_levels, mask;
uint8_t train_set = intel_dp->train_set[0];
 
if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
signal_levels = intel_hsw_signal_levels(train_set);
mask = DDI_BUF_EMP_MASK;
} else if (IS_CHERRYVIEW(dev)) {
2879,75 → 3232,11
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum port port = intel_dig_port->port;
uint8_t buf[sizeof(intel_dp->train_set) + 1];
int ret, len;
 
if (HAS_DDI(dev)) {
uint32_t temp = I915_READ(DP_TP_CTL(port));
_intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
 
if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
else
temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
 
temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
case DP_TRAINING_PATTERN_DISABLE:
temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
 
break;
case DP_TRAINING_PATTERN_1:
temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
break;
case DP_TRAINING_PATTERN_2:
temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
break;
case DP_TRAINING_PATTERN_3:
temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
break;
}
I915_WRITE(DP_TP_CTL(port), temp);
 
} else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
*DP &= ~DP_LINK_TRAIN_MASK_CPT;
 
switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
case DP_TRAINING_PATTERN_DISABLE:
*DP |= DP_LINK_TRAIN_OFF_CPT;
break;
case DP_TRAINING_PATTERN_1:
*DP |= DP_LINK_TRAIN_PAT_1_CPT;
break;
case DP_TRAINING_PATTERN_2:
*DP |= DP_LINK_TRAIN_PAT_2_CPT;
break;
case DP_TRAINING_PATTERN_3:
DRM_ERROR("DP training pattern 3 not supported\n");
*DP |= DP_LINK_TRAIN_PAT_2_CPT;
break;
}
 
} else {
*DP &= ~DP_LINK_TRAIN_MASK;
 
switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
case DP_TRAINING_PATTERN_DISABLE:
*DP |= DP_LINK_TRAIN_OFF;
break;
case DP_TRAINING_PATTERN_1:
*DP |= DP_LINK_TRAIN_PAT_1;
break;
case DP_TRAINING_PATTERN_2:
*DP |= DP_LINK_TRAIN_PAT_2;
break;
case DP_TRAINING_PATTERN_3:
DRM_ERROR("DP training pattern 3 not supported\n");
*DP |= DP_LINK_TRAIN_PAT_2;
break;
}
}
 
I915_WRITE(intel_dp->output_reg, *DP);
POSTING_READ(intel_dp->output_reg);
 
3174,7 → 3463,6
 
/* Try 5 times, then try clock recovery if that fails */
if (tries > 5) {
intel_dp_link_down(intel_dp);
intel_dp_start_link_train(intel_dp);
intel_dp_set_link_train(intel_dp, &DP,
training_pattern |
3230,6 → 3518,9
DP &= ~DP_LINK_TRAIN_MASK_CPT;
I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
} else {
if (IS_CHERRYVIEW(dev))
DP &= ~DP_LINK_TRAIN_MASK_CHV;
else
DP &= ~DP_LINK_TRAIN_MASK;
I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
}
3276,15 → 3567,11
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3];
 
if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
sizeof(intel_dp->dpcd)) < 0)
return false; /* aux transfer failed */
 
hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd),
32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false);
DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump);
DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
 
if (intel_dp->dpcd[DP_DPCD_REV] == 0)
return false; /* DPCD not present */
3301,11 → 3588,12
}
}
 
/* Training Pattern 3 support */
/* Training Pattern 3 support, both source and sink */
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED) {
intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
(IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) {
intel_dp->use_tps3 = true;
DRM_DEBUG_KMS("Displayport TPS3 supported");
DRM_DEBUG_KMS("Displayport TPS3 supported\n");
} else
intel_dp->use_tps3 = false;
 
3332,8 → 3620,6
if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
return;
 
intel_edp_panel_vdd_on(intel_dp);
 
if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
buf[0], buf[1], buf[2]);
3341,8 → 3627,6
if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
buf[0], buf[1], buf[2]);
 
edp_panel_vdd_off(intel_dp, false);
}
 
static bool
3356,7 → 3640,6
if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
return false;
 
_edp_panel_vdd_on(intel_dp);
if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
if (buf[0] & DP_MST_CAP) {
DRM_DEBUG_KMS("Sink is MST capable\n");
3366,7 → 3649,6
intel_dp->is_mst = false;
}
}
edp_panel_vdd_off(intel_dp, false);
 
drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
return intel_dp->is_mst;
3378,26 → 3660,48
struct drm_device *dev = intel_dig_port->base.base.dev;
struct intel_crtc *intel_crtc =
to_intel_crtc(intel_dig_port->base.base.crtc);
u8 buf[1];
u8 buf;
int test_crc_count;
int attempts = 6;
 
if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, buf) < 0)
return -EAGAIN;
if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
return -EIO;
 
if (!(buf[0] & DP_TEST_CRC_SUPPORTED))
if (!(buf & DP_TEST_CRC_SUPPORTED))
return -ENOTTY;
 
if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
return -EIO;
 
if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
DP_TEST_SINK_START) < 0)
return -EAGAIN;
buf | DP_TEST_SINK_START) < 0)
return -EIO;
 
/* Wait 2 vblanks to be sure we will have the correct CRC value */
if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
return -EIO;
test_crc_count = buf & DP_TEST_COUNT_MASK;
 
do {
if (drm_dp_dpcd_readb(&intel_dp->aux,
DP_TEST_SINK_MISC, &buf) < 0)
return -EIO;
intel_wait_for_vblank(dev, intel_crtc->pipe);
intel_wait_for_vblank(dev, intel_crtc->pipe);
} while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
 
if (attempts == 0) {
DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
return -ETIMEDOUT;
}
 
if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
return -EAGAIN;
return -EIO;
 
drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK, 0);
if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
return -EIO;
if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
buf & ~DP_TEST_SINK_START) < 0)
return -EIO;
 
return 0;
}
 
3598,21 → 3902,25
}
 
static enum drm_connector_status
ironlake_dp_detect(struct intel_dp *intel_dp)
edp_detect(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
enum drm_connector_status status;
 
/* Can't disconnect eDP, but you can close the lid... */
if (is_edp(intel_dp)) {
status = intel_panel_detect(dev);
if (status == connector_status_unknown)
status = connector_status_connected;
 
return status;
}
 
static enum drm_connector_status
ironlake_dp_detect(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 
if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
return connector_status_disconnected;
 
3687,9 → 3995,9
}
 
static struct edid *
intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
intel_dp_get_edid(struct intel_dp *intel_dp)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
struct intel_connector *intel_connector = intel_dp->attached_connector;
 
/* use cached edid if we have one */
if (intel_connector->edid) {
3698,29 → 4006,57
return NULL;
 
return drm_edid_duplicate(intel_connector->edid);
} else
return drm_get_edid(&intel_connector->base,
&intel_dp->aux.ddc);
}
 
return drm_get_edid(connector, adapter);
static void
intel_dp_set_edid(struct intel_dp *intel_dp)
{
struct intel_connector *intel_connector = intel_dp->attached_connector;
struct edid *edid;
 
edid = intel_dp_get_edid(intel_dp);
intel_connector->detect_edid = edid;
 
if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
else
intel_dp->has_audio = drm_detect_monitor_audio(edid);
}
 
static int
intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *adapter)
static void
intel_dp_unset_edid(struct intel_dp *intel_dp)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
struct intel_connector *intel_connector = intel_dp->attached_connector;
 
/* use cached edid if we have one */
if (intel_connector->edid) {
/* invalid edid */
if (IS_ERR(intel_connector->edid))
return 0;
kfree(intel_connector->detect_edid);
intel_connector->detect_edid = NULL;
 
return intel_connector_update_modes(connector,
intel_connector->edid);
intel_dp->has_audio = false;
}
 
return intel_ddc_get_modes(connector, adapter);
static enum intel_display_power_domain
intel_dp_power_get(struct intel_dp *dp)
{
struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
enum intel_display_power_domain power_domain;
 
power_domain = intel_display_port_power_domain(encoder);
intel_display_power_get(to_i915(encoder->base.dev), power_domain);
 
return power_domain;
}
 
static void
intel_dp_power_put(struct intel_dp *dp,
enum intel_display_power_domain power_domain)
{
struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
intel_display_power_put(to_i915(encoder->base.dev), power_domain);
}
 
static enum drm_connector_status
intel_dp_detect(struct drm_connector *connector, bool force)
{
3728,33 → 4064,30
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum drm_connector_status status;
enum intel_display_power_domain power_domain;
struct edid *edid = NULL;
bool ret;
 
power_domain = intel_display_port_power_domain(intel_encoder);
intel_display_power_get(dev_priv, power_domain);
 
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
intel_dp_unset_edid(intel_dp);
 
if (intel_dp->is_mst) {
/* MST devices are disconnected from a monitor POV */
if (intel_encoder->type != INTEL_OUTPUT_EDP)
intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
status = connector_status_disconnected;
goto out;
return connector_status_disconnected;
}
 
intel_dp->has_audio = false;
power_domain = intel_dp_power_get(intel_dp);
 
if (HAS_PCH_SPLIT(dev))
/* Can't disconnect eDP, but you can close the lid... */
if (is_edp(intel_dp))
status = edp_detect(intel_dp);
else if (HAS_PCH_SPLIT(dev))
status = ironlake_dp_detect(intel_dp);
else
status = g4x_dp_detect(intel_dp);
 
if (status != connector_status_connected)
goto out;
 
3770,15 → 4103,7
goto out;
}
 
if (intel_dp->force_audio != HDMI_AUDIO_AUTO) {
intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON);
} else {
edid = intel_dp_get_edid(connector, &intel_dp->aux.ddc);
if (edid) {
intel_dp->has_audio = drm_detect_monitor_audio(edid);
kfree(edid);
}
}
intel_dp_set_edid(intel_dp);
 
if (intel_encoder->type != INTEL_OUTPUT_EDP)
intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
3785,36 → 4110,52
status = connector_status_connected;
 
out:
intel_display_power_put(dev_priv, power_domain);
intel_dp_power_put(intel_dp, power_domain);
return status;
}
 
static int intel_dp_get_modes(struct drm_connector *connector)
static void
intel_dp_force(struct drm_connector *connector)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
enum intel_display_power_domain power_domain;
int ret;
 
/* We should parse the EDID data and find out if it has an audio sink
*/
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
intel_dp_unset_edid(intel_dp);
 
power_domain = intel_display_port_power_domain(intel_encoder);
intel_display_power_get(dev_priv, power_domain);
if (connector->status != connector_status_connected)
return;
 
ret = intel_dp_get_edid_modes(connector, &intel_dp->aux.ddc);
intel_display_power_put(dev_priv, power_domain);
power_domain = intel_dp_power_get(intel_dp);
 
intel_dp_set_edid(intel_dp);
 
intel_dp_power_put(intel_dp, power_domain);
 
if (intel_encoder->type != INTEL_OUTPUT_EDP)
intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
}
 
static int intel_dp_get_modes(struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
struct edid *edid;
 
edid = intel_connector->detect_edid;
if (edid) {
int ret = intel_connector_update_modes(connector, edid);
if (ret)
return ret;
}
 
/* if eDP has no EDID, fall back to fixed mode */
if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
if (is_edp(intel_attached_dp(connector)) &&
intel_connector->panel.fixed_mode) {
struct drm_display_mode *mode;
mode = drm_mode_duplicate(dev,
 
mode = drm_mode_duplicate(connector->dev,
intel_connector->panel.fixed_mode);
if (mode) {
drm_mode_probed_add(connector, mode);
3821,6 → 4162,7
return 1;
}
}
 
return 0;
}
 
3827,26 → 4169,13
static bool
intel_dp_detect_audio(struct drm_connector *connector)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum intel_display_power_domain power_domain;
bool has_audio = false;
struct edid *edid;
bool has_audio = false;
 
power_domain = intel_display_port_power_domain(intel_encoder);
intel_display_power_get(dev_priv, power_domain);
 
edid = intel_dp_get_edid(connector, &intel_dp->aux.ddc);
if (edid) {
edid = to_intel_connector(connector)->detect_edid;
if (edid)
has_audio = drm_detect_monitor_audio(edid);
kfree(edid);
}
 
intel_display_power_put(dev_priv, power_domain);
 
return has_audio;
}
 
3943,6 → 4272,8
{
struct intel_connector *intel_connector = to_intel_connector(connector);
 
kfree(intel_connector->detect_edid);
 
if (!IS_ERR_OR_NULL(intel_connector->edid))
kfree(intel_connector->edid);
 
3959,7 → 4290,6
{
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct drm_device *dev = intel_dp_to_dev(intel_dp);
 
drm_dp_aux_unregister(&intel_dp->aux);
intel_dp_mst_encoder_cleanup(intel_dig_port);
3966,9 → 4296,14
drm_encoder_cleanup(encoder);
if (is_edp(intel_dp)) {
cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
/*
* vdd might still be enabled do to the delayed vdd off.
* Make sure vdd is actually turned off here.
*/
pps_lock(intel_dp);
edp_panel_vdd_off_sync(intel_dp);
drm_modeset_unlock(&dev->mode_config.connection_mutex);
pps_unlock(intel_dp);
 
}
kfree(intel_dig_port);
}
3980,17 → 4315,68
if (!is_edp(intel_dp))
return;
 
/*
* vdd might still be enabled do to the delayed vdd off.
* Make sure vdd is actually turned off here.
*/
cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
pps_lock(intel_dp);
edp_panel_vdd_off_sync(intel_dp);
pps_unlock(intel_dp);
}
 
static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum intel_display_power_domain power_domain;
 
lockdep_assert_held(&dev_priv->pps_mutex);
 
if (!edp_have_panel_vdd(intel_dp))
return;
 
/*
* The VDD bit needs a power domain reference, so if the bit is
* already enabled when we boot or resume, grab this reference and
* schedule a vdd off, so we don't hold on to the reference
* indefinitely.
*/
DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
power_domain = intel_display_port_power_domain(&intel_dig_port->base);
intel_display_power_get(dev_priv, power_domain);
 
edp_panel_vdd_schedule_off(intel_dp);
}
 
static void intel_dp_encoder_reset(struct drm_encoder *encoder)
{
intel_edp_panel_vdd_sanitize(to_intel_encoder(encoder));
struct intel_dp *intel_dp;
 
if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
return;
 
intel_dp = enc_to_intel_dp(encoder);
 
pps_lock(intel_dp);
 
/*
* Read out the current power sequencer assignment,
* in case the BIOS did something with it.
*/
if (IS_VALLEYVIEW(encoder->dev))
vlv_initial_power_sequencer_setup(intel_dp);
 
intel_edp_panel_vdd_sanitize(intel_dp);
 
pps_unlock(intel_dp);
}
 
static const struct drm_connector_funcs intel_dp_connector_funcs = {
.dpms = intel_connector_dpms,
.detect = intel_dp_detect,
.force = intel_dp_force,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_dp_set_property,
.destroy = intel_dp_connector_destroy,
4026,7 → 4412,20
if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
 
DRM_DEBUG_KMS("got hpd irq on port %d - %s\n", intel_dig_port->port,
if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
/*
* vdd off can generate a long pulse on eDP which
* would require vdd on to handle it, and thus we
* would end up in an endless cycle of
* "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
*/
DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
port_name(intel_dig_port->port));
return false;
}
 
DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
port_name(intel_dig_port->port),
long_hpd ? "long" : "short");
 
power_domain = intel_display_port_power_domain(intel_encoder);
4158,14 → 4557,20
 
static void
intel_dp_init_panel_power_sequencer(struct drm_device *dev,
struct intel_dp *intel_dp,
struct edp_power_seq *out)
struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct edp_power_seq cur, vbt, spec, final;
struct edp_power_seq cur, vbt, spec,
*final = &intel_dp->pps_delays;
u32 pp_on, pp_off, pp_div, pp;
int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
 
lockdep_assert_held(&dev_priv->pps_mutex);
 
/* already initialized? */
if (final->t11_t12 != 0)
return;
 
if (HAS_PCH_SPLIT(dev)) {
pp_ctrl_reg = PCH_PP_CONTROL;
pp_on_reg = PCH_PP_ON_DELAYS;
4227,7 → 4632,7
 
/* Use the max of the register settings and vbt. If both are
* unset, fall back to the spec limits. */
#define assign_final(field) final.field = (max(cur.field, vbt.field) == 0 ? \
#define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
spec.field : \
max(cur.field, vbt.field))
assign_final(t1_t3);
4237,7 → 4642,7
assign_final(t11_t12);
#undef assign_final
 
#define get_delay(field) (DIV_ROUND_UP(final.field, 10))
#define get_delay(field) (DIV_ROUND_UP(final->field, 10))
intel_dp->panel_power_up_delay = get_delay(t1_t3);
intel_dp->backlight_on_delay = get_delay(t8);
intel_dp->backlight_off_delay = get_delay(t9);
4251,21 → 4656,21
 
DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
 
if (out)
*out = final;
}
 
static void
intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
struct intel_dp *intel_dp,
struct edp_power_seq *seq)
struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp_on, pp_off, pp_div, port_sel = 0;
int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
int pp_on_reg, pp_off_reg, pp_div_reg;
enum port port = dp_to_dig_port(intel_dp)->port;
const struct edp_power_seq *seq = &intel_dp->pps_delays;
 
lockdep_assert_held(&dev_priv->pps_mutex);
 
if (HAS_PCH_SPLIT(dev)) {
pp_on_reg = PCH_PP_ON_DELAYS;
pp_off_reg = PCH_PP_OFF_DELAYS;
4299,12 → 4704,9
/* Haswell doesn't have any port selection bits for the panel
* power sequencer any more. */
if (IS_VALLEYVIEW(dev)) {
if (dp_to_dig_port(intel_dp)->port == PORT_B)
port_sel = PANEL_PORT_SELECT_DPB_VLV;
else
port_sel = PANEL_PORT_SELECT_DPC_VLV;
port_sel = PANEL_PORT_SELECT_VLV(port);
} else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
if (dp_to_dig_port(intel_dp)->port == PORT_A)
if (port == PORT_A)
port_sel = PANEL_PORT_SELECT_DPA;
else
port_sel = PANEL_PORT_SELECT_DPD;
4348,7 → 4750,7
* hard to tell without seeing the user of this function of this code.
* Check locking and ordering once that lands.
*/
if (INTEL_INFO(dev)->gen < 8 && intel_edp_is_psr_enabled(dev)) {
if (INTEL_INFO(dev)->gen < 8 && intel_psr_is_enabled(dev)) {
DRM_DEBUG_KMS("DRRS is disabled as PSR is enabled\n");
return;
}
4388,7 → 4790,7
val = I915_READ(reg);
if (index > DRRS_HIGH_RR) {
val |= PIPECONF_EDP_RR_MODE_SWITCH;
intel_dp_set_m2_n2(intel_crtc, &config->dp_m2_n2);
intel_dp_set_m_n(intel_crtc);
} else {
val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
}
4428,7 → 4830,7
}
 
if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
DRM_INFO("VBT doesn't support DRRS\n");
DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
return NULL;
}
 
4436,7 → 4838,7
(dev, fixed_mode, connector);
 
if (!downclock_mode) {
DRM_INFO("DRRS not supported\n");
DRM_DEBUG_KMS("DRRS not supported\n");
return NULL;
}
 
4447,39 → 4849,12
intel_dp->drrs_state.type = dev_priv->vbt.drrs_type;
 
intel_dp->drrs_state.refresh_rate_type = DRRS_HIGH_RR;
DRM_INFO("seamless DRRS supported for eDP panel.\n");
DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
return downclock_mode;
}
 
void intel_edp_panel_vdd_sanitize(struct intel_encoder *intel_encoder)
{
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dp *intel_dp;
enum intel_display_power_domain power_domain;
 
if (intel_encoder->type != INTEL_OUTPUT_EDP)
return;
 
intel_dp = enc_to_intel_dp(&intel_encoder->base);
if (!edp_have_panel_vdd(intel_dp))
return;
/*
* The VDD bit needs a power domain reference, so if the bit is
* already enabled when we boot or resume, grab this reference and
* schedule a vdd off, so we don't hold on to the reference
* indefinitely.
*/
DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
power_domain = intel_display_port_power_domain(intel_encoder);
intel_display_power_get(dev_priv, power_domain);
 
edp_panel_vdd_schedule_off(intel_dp);
}
 
static bool intel_edp_init_connector(struct intel_dp *intel_dp,
struct intel_connector *intel_connector,
struct edp_power_seq *power_seq)
struct intel_connector *intel_connector)
{
struct drm_connector *connector = &intel_connector->base;
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4491,6 → 4866,7
bool has_dpcd;
struct drm_display_mode *scan;
struct edid *edid;
enum pipe pipe = INVALID_PIPE;
 
intel_dp->drrs_state.type = DRRS_NOT_SUPPORTED;
 
4497,12 → 4873,12
if (!is_edp(intel_dp))
return true;
 
intel_edp_panel_vdd_sanitize(intel_encoder);
pps_lock(intel_dp);
intel_edp_panel_vdd_sanitize(intel_dp);
pps_unlock(intel_dp);
 
/* Cache DPCD and EDID for edp. */
intel_edp_panel_vdd_on(intel_dp);
has_dpcd = intel_dp_get_dpcd(intel_dp);
edp_panel_vdd_off(intel_dp, false);
 
if (has_dpcd) {
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
4516,7 → 4892,9
}
 
/* We now know it's not a ghost, init power sequence regs. */
intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, power_seq);
pps_lock(intel_dp);
intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
pps_unlock(intel_dp);
 
mutex_lock(&dev->mode_config.mutex);
edid = drm_get_edid(connector, &intel_dp->aux.ddc);
4554,8 → 4932,31
}
mutex_unlock(&dev->mode_config.mutex);
 
if (IS_VALLEYVIEW(dev)) {
 
/*
* Figure out the current pipe for the initial backlight setup.
* If the current pipe isn't valid, try the PPS pipe, and if that
* fails just assume pipe A.
*/
if (IS_CHERRYVIEW(dev))
pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
else
pipe = PORT_TO_PIPE(intel_dp->DP);
 
if (pipe != PIPE_A && pipe != PIPE_B)
pipe = intel_dp->pps_pipe;
 
if (pipe != PIPE_A && pipe != PIPE_B)
pipe = PIPE_A;
 
DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
pipe_name(pipe));
}
 
intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
intel_panel_setup_backlight(connector);
intel_connector->panel.backlight_power = intel_edp_backlight_power;
intel_panel_setup_backlight(connector, pipe);
 
return true;
}
4570,11 → 4971,14
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum port port = intel_dig_port->port;
struct edp_power_seq power_seq = { 0 };
int type;
 
intel_dp->pps_pipe = INVALID_PIPE;
 
/* intel_dp vfuncs */
if (IS_VALLEYVIEW(dev))
if (INTEL_INFO(dev)->gen >= 9)
intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
else if (IS_VALLEYVIEW(dev))
intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
4583,6 → 4987,9
else
intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
 
if (INTEL_INFO(dev)->gen >= 9)
intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
else
intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
 
/* Preserve the current hw state. */
4602,6 → 5009,11
if (type == DRM_MODE_CONNECTOR_eDP)
intel_encoder->type = INTEL_OUTPUT_EDP;
 
/* eDP only on port B and/or C on vlv/chv */
if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
port != PORT_B && port != PORT_C))
return false;
 
DRM_DEBUG_KMS("Adding %s connector on port %c\n",
type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
port_name(port));
4643,8 → 5055,13
}
 
if (is_edp(intel_dp)) {
pps_lock(intel_dp);
intel_dp_init_panel_power_timestamps(intel_dp);
intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
if (IS_VALLEYVIEW(dev))
vlv_initial_power_sequencer_setup(intel_dp);
else
intel_dp_init_panel_power_sequencer(dev, intel_dp);
pps_unlock(intel_dp);
}
 
intel_dp_aux_init(intel_dp, intel_connector);
4652,17 → 5069,22
/* init MST on ports that can support it */
if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
if (port == PORT_B || port == PORT_C || port == PORT_D) {
intel_dp_mst_encoder_init(intel_dig_port, intel_connector->base.base.id);
intel_dp_mst_encoder_init(intel_dig_port,
intel_connector->base.base.id);
}
}
 
if (!intel_edp_init_connector(intel_dp, intel_connector, &power_seq)) {
if (!intel_edp_init_connector(intel_dp, intel_connector)) {
drm_dp_aux_unregister(&intel_dp->aux);
if (is_edp(intel_dp)) {
cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
/*
* vdd might still be enabled do to the delayed vdd off.
* Make sure vdd is actually turned off here.
*/
pps_lock(intel_dp);
edp_panel_vdd_off_sync(intel_dp);
drm_modeset_unlock(&dev->mode_config.connection_mutex);
pps_unlock(intel_dp);
}
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
4726,7 → 5148,8
} else {
intel_encoder->pre_enable = g4x_pre_enable_dp;
intel_encoder->enable = g4x_enable_dp;
intel_encoder->post_disable = g4x_post_disable_dp;
if (INTEL_INFO(dev)->gen >= 5)
intel_encoder->post_disable = ilk_post_disable_dp;
}
 
intel_dig_port->port = port;
/drivers/video/drm/i915/intel_dp_mst.c
278,22 → 278,14
}
 
static enum drm_connector_status
intel_mst_port_dp_detect(struct drm_connector *connector)
intel_dp_mst_detect(struct drm_connector *connector, bool force)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
struct intel_dp *intel_dp = intel_connector->mst_port;
 
return drm_dp_mst_detect_port(&intel_dp->mst_mgr, intel_connector->port);
return drm_dp_mst_detect_port(connector, &intel_dp->mst_mgr, intel_connector->port);
}
 
static enum drm_connector_status
intel_dp_mst_detect(struct drm_connector *connector, bool force)
{
enum drm_connector_status status;
status = intel_mst_port_dp_detect(connector);
return status;
}
 
static int
intel_dp_mst_set_property(struct drm_connector *connector,
struct drm_property *property,
393,7 → 385,7
#endif
}
 
static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, char *pathprop)
static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *pathprop)
{
struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
422,6 → 414,8
intel_dp_add_properties(intel_dp, connector);
 
drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0);
drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0);
 
drm_mode_connector_set_path_property(connector, pathprop);
drm_reinit_primary_mode_group(dev);
mutex_lock(&dev->mode_config.mutex);
/drivers/video/drm/i915/intel_drv.h
25,6 → 25,7
#ifndef __INTEL_DRV_H__
#define __INTEL_DRV_H__
 
#include <linux/async.h>
#include <linux/i2c.h>
#include <linux/hdmi.h>
#include <drm/i915_drm.h>
33,12 → 34,11
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_dp_mst_helper.h>
#include <drm/drm_rect.h>
 
#define KBUILD_MODNAME "i915.dll"
#define DIV_ROUND_CLOSEST_ULL(ll, d) \
({ unsigned long long _tmp = (ll)+(d)/2; do_div(_tmp, d); _tmp; })
 
 
#define cpu_relax() asm volatile("rep; nop")
 
/**
* _wait_for - magic (register) wait macro
*
94,18 → 94,20
 
/* these are outputs from the chip - integrated only
external chips are via DVO or SDVO output */
#define INTEL_OUTPUT_UNUSED 0
#define INTEL_OUTPUT_ANALOG 1
#define INTEL_OUTPUT_DVO 2
#define INTEL_OUTPUT_SDVO 3
#define INTEL_OUTPUT_LVDS 4
#define INTEL_OUTPUT_TVOUT 5
#define INTEL_OUTPUT_HDMI 6
#define INTEL_OUTPUT_DISPLAYPORT 7
#define INTEL_OUTPUT_EDP 8
#define INTEL_OUTPUT_DSI 9
#define INTEL_OUTPUT_UNKNOWN 10
#define INTEL_OUTPUT_DP_MST 11
enum intel_output_type {
INTEL_OUTPUT_UNUSED = 0,
INTEL_OUTPUT_ANALOG = 1,
INTEL_OUTPUT_DVO = 2,
INTEL_OUTPUT_SDVO = 3,
INTEL_OUTPUT_LVDS = 4,
INTEL_OUTPUT_TVOUT = 5,
INTEL_OUTPUT_HDMI = 6,
INTEL_OUTPUT_DISPLAYPORT = 7,
INTEL_OUTPUT_EDP = 8,
INTEL_OUTPUT_DSI = 9,
INTEL_OUTPUT_UNKNOWN = 10,
INTEL_OUTPUT_DP_MST = 11,
};
 
#define INTEL_DVO_CHIP_NONE 0
#define INTEL_DVO_CHIP_LVDS 1
136,7 → 138,7
*/
struct intel_crtc *new_crtc;
 
int type;
enum intel_output_type type;
unsigned int cloneable;
bool connectors_active;
void (*hot_plug)(struct intel_encoder *);
184,6 → 186,8
bool active_low_pwm;
struct backlight_device *device;
} backlight;
 
void (*backlight_power)(struct intel_connector *, bool enable);
};
 
struct intel_connector {
216,6 → 220,7
 
/* Cached EDID for eDP and LVDS. May hold ERR_PTR for invalid EDID. */
struct edid *edid;
struct edid *detect_edid;
 
/* since POLL and HPD connectors may use the same HPD line keep the native
state of connector->polled in case hotplug storm detection changes it */
238,6 → 243,17
int p;
} intel_clock_t;
 
struct intel_plane_state {
struct drm_crtc *crtc;
struct drm_framebuffer *fb;
struct drm_rect src;
struct drm_rect dst;
struct drm_rect clip;
struct drm_rect orig_src;
struct drm_rect orig_dst;
bool visible;
};
 
struct intel_plane_config {
bool tiled;
int size;
276,6 → 292,9
* between pch encoders and cpu encoders. */
bool has_pch_encoder;
 
/* Are we sending infoframes on the attached port */
bool has_infoframe;
 
/* CPU Transcoder for the pipe. Currently this can only differ from the
* pipe on Haswell (where we have a special eDP transcoder). */
enum transcoder cpu_transcoder;
324,7 → 343,10
/* Selected dpll when shared or DPLL_ID_PRIVATE. */
enum intel_dpll_id shared_dpll;
 
/* PORT_CLK_SEL for DDI ports. */
/*
* - PORT_CLK_SEL for DDI ports on HSW/BDW.
* - enum skl_dpll on SKL
*/
uint32_t ddi_pll_sel;
 
/* Actual register state of the dpll, for shared dpll cross-checking. */
335,6 → 357,7
 
/* m2_n2 for eDP downclock */
struct intel_link_m_n dp_m2_n2;
bool has_drrs;
 
/*
* Frequence the dpll for the port should run at. Differs from the
384,9 → 407,16
 
struct intel_mmio_flip {
u32 seqno;
u32 ring_id;
struct intel_engine_cs *ring;
struct work_struct work;
};
 
struct skl_pipe_wm {
struct skl_wm_level wm[8];
struct skl_wm_level trans_wm;
uint32_t linetime;
};
 
struct intel_crtc {
struct drm_crtc base;
enum pipe pipe;
415,6 → 445,7
uint32_t cursor_addr;
int16_t cursor_width, cursor_height;
uint32_t cursor_cntl;
uint32_t cursor_size;
uint32_t cursor_base;
 
struct intel_plane_config plane_config;
433,11 → 464,12
struct {
/* watermarks currently being used */
struct intel_pipe_wm active;
/* SKL wm values currently in use */
struct skl_pipe_wm skl_active;
} wm;
 
wait_queue_head_t vbl_wait;
 
int scanline_offset;
struct intel_mmio_flip mmio_flip;
};
 
struct intel_plane_wm_parameters {
459,6 → 491,7
unsigned int crtc_w, crtc_h;
uint32_t src_x, src_y;
uint32_t src_w, src_h;
unsigned int rotation;
 
/* Since we need to change the watermarks before/after
* enabling/disabling the planes, we need to store the parameters here
525,6 → 558,7
void (*set_infoframes)(struct drm_encoder *encoder,
bool enable,
struct drm_display_mode *adjusted_mode);
bool (*infoframe_enabled)(struct drm_encoder *encoder);
};
 
struct intel_dp_mst_encoder;
567,6 → 601,13
unsigned long last_power_on;
unsigned long last_backlight_off;
 
/*
* Pipe whose power sequencer is currently locked into
* this port. Only relevant on VLV/CHV.
*/
enum pipe pps_pipe;
struct edp_power_seq pps_delays;
 
bool use_tps3;
bool can_mst; /* this port supports mst */
bool is_mst;
665,6 → 706,10
#define INTEL_FLIP_COMPLETE 2
u32 flip_count;
u32 gtt_offset;
struct intel_engine_cs *flip_queued_ring;
u32 flip_queued_seqno;
int flip_queued_vblank;
int flip_ready_vblank;
bool enable_stall_check;
};
 
718,21 → 763,37
return container_of(intel_hdmi, struct intel_digital_port, hdmi);
}
 
/*
* Returns the number of planes for this pipe, ie the number of sprites + 1
* (primary plane). This doesn't count the cursor plane then.
*/
static inline unsigned int intel_num_planes(struct intel_crtc *crtc)
{
return INTEL_INFO(crtc->base.dev)->num_sprites[crtc->pipe] + 1;
}
 
/* i915_irq.c */
bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
/* intel_fifo_underrun.c */
bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
enum pipe pipe, bool enable);
bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
enum transcoder pch_transcoder,
bool enable);
void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
enum pipe pipe);
void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
enum transcoder pch_transcoder);
void i9xx_check_fifo_underruns(struct drm_i915_private *dev_priv);
 
/* i915_irq.c */
void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen8_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen8_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void intel_runtime_pm_disable_interrupts(struct drm_device *dev);
void intel_runtime_pm_restore_interrupts(struct drm_device *dev);
void gen6_reset_rps_interrupts(struct drm_device *dev);
void gen6_enable_rps_interrupts(struct drm_device *dev);
void gen6_disable_rps_interrupts(struct drm_device *dev);
void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv);
void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv);
static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
{
/*
739,11 → 800,10
* We only use drm_irq_uninstall() at unload and VT switch, so
* this is the only thing we need to check.
*/
return !dev_priv->pm._irqs_disabled;
return dev_priv->pm.irqs_enabled;
}
 
int intel_get_crtc_scanline(struct intel_crtc *crtc);
void i9xx_check_fifo_underruns(struct drm_device *dev);
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv);
 
/* intel_crt.c */
776,11 → 836,7
struct intel_crtc_config *pipe_config);
void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state);
 
/* intel_display.c */
const char *intel_output_name(int output);
bool intel_has_pending_fb_unpin(struct drm_device *dev);
int intel_pch_rawclk(struct drm_device *dev);
void intel_mark_busy(struct drm_device *dev);
/* intel_frontbuffer.c */
void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
struct intel_engine_cs *ring);
void intel_frontbuffer_flip_prepare(struct drm_device *dev,
790,7 → 846,7
void intel_frontbuffer_flush(struct drm_device *dev,
unsigned frontbuffer_bits);
/**
* intel_frontbuffer_flip - prepare frontbuffer flip
* intel_frontbuffer_flip - synchronous frontbuffer flip
* @dev: DRM device
* @frontbuffer_bits: frontbuffer plane tracking bits
*
808,6 → 864,18
}
 
void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire);
 
 
/* intel_audio.c */
void intel_init_audio(struct drm_device *dev);
void intel_audio_codec_enable(struct intel_encoder *encoder);
void intel_audio_codec_disable(struct intel_encoder *encoder);
 
/* intel_display.c */
const char *intel_output_name(int output);
bool intel_has_pending_fb_unpin(struct drm_device *dev);
int intel_pch_rawclk(struct drm_device *dev);
void intel_mark_busy(struct drm_device *dev);
void intel_mark_idle(struct drm_device *dev);
void intel_crtc_restore_mode(struct drm_crtc *crtc);
void intel_crtc_control(struct drm_crtc *crtc, bool enable);
828,8 → 896,12
struct drm_file *file_priv);
enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
enum pipe pipe);
void intel_wait_for_vblank(struct drm_device *dev, int pipe);
void intel_wait_for_pipe_off(struct drm_device *dev, int pipe);
bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type);
static inline void
intel_wait_for_vblank(struct drm_device *dev, int pipe)
{
drm_wait_one_vblank(dev, pipe);
}
int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
struct intel_digital_port *dport);
839,8 → 911,8
struct drm_modeset_acquire_ctx *ctx);
void intel_release_load_detect_pipe(struct drm_connector *connector,
struct intel_load_detect_pipe *old);
int intel_pin_and_fence_fb_obj(struct drm_device *dev,
struct drm_i915_gem_object *obj,
int intel_pin_and_fence_fb_obj(struct drm_plane *plane,
struct drm_framebuffer *fb,
struct intel_engine_cs *pipelined);
void intel_unpin_fb_obj(struct drm_i915_gem_object *obj);
struct drm_framebuffer *
850,6 → 922,7
void intel_prepare_page_flip(struct drm_device *dev, int plane);
void intel_finish_page_flip(struct drm_device *dev, int pipe);
void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
void intel_check_page_flip(struct drm_device *dev, int pipe);
 
/* shared dpll functions */
struct intel_shared_dpll *intel_crtc_to_shared_dpll(struct intel_crtc *crtc);
861,7 → 934,13
struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc);
void intel_put_shared_dpll(struct intel_crtc *crtc);
 
void vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
const struct dpll *dpll);
void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe);
 
/* modesetting asserts */
void assert_panel_unlocked(struct drm_i915_private *dev_priv,
enum pipe pipe);
void assert_pll(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state);
#define assert_pll_enabled(d, p) assert_pll(d, p, true)
873,17 → 952,17
void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state);
#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
void intel_write_eld(struct drm_encoder *encoder,
struct drm_display_mode *mode);
unsigned long intel_gen4_compute_page_offset(int *x, int *y,
unsigned int tiling_mode,
unsigned int bpp,
unsigned int pitch);
void intel_display_handle_reset(struct drm_device *dev);
void intel_prepare_reset(struct drm_device *dev);
void intel_finish_reset(struct drm_device *dev);
void hsw_enable_pc8(struct drm_i915_private *dev_priv);
void hsw_disable_pc8(struct drm_i915_private *dev_priv);
void intel_dp_get_m_n(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config);
void intel_dp_set_m_n(struct intel_crtc *crtc);
int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
void
ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config,
891,7 → 970,6
bool intel_crtc_active(struct drm_crtc *crtc);
void hsw_enable_ips(struct intel_crtc *crtc);
void hsw_disable_ips(struct intel_crtc *crtc);
void intel_display_set_init_power(struct drm_i915_private *dev, bool enable);
enum intel_display_power_domain
intel_display_port_power_domain(struct intel_encoder *intel_encoder);
void intel_mode_from_pipe_config(struct drm_display_mode *mode,
898,8 → 976,8
struct intel_crtc_config *pipe_config);
int intel_format_to_fourcc(int format);
void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc);
void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file);
 
 
/* intel_dp.c */
void intel_dp_init(struct drm_device *dev, int output_reg, enum port port);
bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
919,24 → 997,18
void intel_edp_backlight_on(struct intel_dp *intel_dp);
void intel_edp_backlight_off(struct intel_dp *intel_dp);
void intel_edp_panel_vdd_on(struct intel_dp *intel_dp);
void intel_edp_panel_vdd_sanitize(struct intel_encoder *intel_encoder);
void intel_edp_panel_on(struct intel_dp *intel_dp);
void intel_edp_panel_off(struct intel_dp *intel_dp);
void intel_edp_psr_enable(struct intel_dp *intel_dp);
void intel_edp_psr_disable(struct intel_dp *intel_dp);
void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate);
void intel_edp_psr_invalidate(struct drm_device *dev,
unsigned frontbuffer_bits);
void intel_edp_psr_flush(struct drm_device *dev,
unsigned frontbuffer_bits);
void intel_edp_psr_init(struct drm_device *dev);
 
int intel_dp_handle_hpd_irq(struct intel_digital_port *digport, bool long_hpd);
void intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector);
void intel_dp_mst_suspend(struct drm_device *dev);
void intel_dp_mst_resume(struct drm_device *dev);
int intel_dp_max_link_bw(struct intel_dp *intel_dp);
void intel_dp_hot_plug(struct intel_encoder *intel_encoder);
void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv);
uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes);
void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes);
 
/* intel_dp_mst.c */
int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port);
951,9 → 1023,9
/* legacy fbdev emulation in intel_fbdev.c */
#ifdef CONFIG_DRM_I915_FBDEV
extern int intel_fbdev_init(struct drm_device *dev);
extern void intel_fbdev_initial_config(struct drm_device *dev);
extern void intel_fbdev_initial_config(void *data, async_cookie_t cookie);
extern void intel_fbdev_fini(struct drm_device *dev);
extern void intel_fbdev_set_suspend(struct drm_device *dev, int state);
extern void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous);
extern void intel_fbdev_output_poll_changed(struct drm_device *dev);
extern void intel_fbdev_restore_mode(struct drm_device *dev);
#else
962,7 → 1034,7
return 0;
}
 
static inline void intel_fbdev_initial_config(struct drm_device *dev)
static inline void intel_fbdev_initial_config(void *data, async_cookie_t cookie)
{
}
 
970,7 → 1042,7
{
}
 
static inline void intel_fbdev_set_suspend(struct drm_device *dev, int state)
static inline void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous)
{
}
 
1026,7 → 1098,7
int fitting_mode);
void intel_panel_set_backlight_acpi(struct intel_connector *connector,
u32 level, u32 max);
int intel_panel_setup_backlight(struct drm_connector *connector);
int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe);
void intel_panel_enable_backlight(struct intel_connector *connector);
void intel_panel_disable_backlight(struct intel_connector *connector);
void intel_panel_destroy_backlight(struct drm_connector *connector);
1036,7 → 1108,42
struct drm_device *dev,
struct drm_display_mode *fixed_mode,
struct drm_connector *connector);
void intel_backlight_register(struct drm_device *dev);
void intel_backlight_unregister(struct drm_device *dev);
 
 
/* intel_psr.c */
bool intel_psr_is_enabled(struct drm_device *dev);
void intel_psr_enable(struct intel_dp *intel_dp);
void intel_psr_disable(struct intel_dp *intel_dp);
void intel_psr_invalidate(struct drm_device *dev,
unsigned frontbuffer_bits);
void intel_psr_flush(struct drm_device *dev,
unsigned frontbuffer_bits);
void intel_psr_init(struct drm_device *dev);
 
/* intel_runtime_pm.c */
int intel_power_domains_init(struct drm_i915_private *);
void intel_power_domains_fini(struct drm_i915_private *);
void intel_power_domains_init_hw(struct drm_i915_private *dev_priv);
void intel_runtime_pm_enable(struct drm_i915_private *dev_priv);
 
bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
void intel_display_power_get(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
void intel_display_power_put(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
void intel_runtime_pm_get(struct drm_i915_private *dev_priv);
void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv);
void intel_runtime_pm_put(struct drm_i915_private *dev_priv);
 
void intel_display_set_init_power(struct drm_i915_private *dev, bool enable);
 
/* intel_pm.c */
void intel_init_clock_gating(struct drm_device *dev);
void intel_suspend_hw(struct drm_device *dev);
1054,17 → 1161,6
void intel_update_fbc(struct drm_device *dev);
void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
void intel_gpu_ips_teardown(void);
int intel_power_domains_init(struct drm_i915_private *);
void intel_power_domains_remove(struct drm_i915_private *);
bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
bool intel_display_power_enabled_unlocked(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
void intel_display_power_get(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
void intel_display_power_put(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
void intel_power_domains_init_hw(struct drm_i915_private *dev_priv);
void intel_init_gt_powersave(struct drm_device *dev);
void intel_cleanup_gt_powersave(struct drm_device *dev);
void intel_enable_gt_powersave(struct drm_device *dev);
1075,14 → 1171,10
void gen6_update_ring_freq(struct drm_device *dev);
void gen6_rps_idle(struct drm_i915_private *dev_priv);
void gen6_rps_boost(struct drm_i915_private *dev_priv);
void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
void intel_runtime_pm_get(struct drm_i915_private *dev_priv);
void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv);
void intel_runtime_pm_put(struct drm_i915_private *dev_priv);
void intel_init_runtime_pm(struct drm_i915_private *dev_priv);
void intel_fini_runtime_pm(struct drm_i915_private *dev_priv);
void ilk_wm_get_hw_state(struct drm_device *dev);
void skl_wm_get_hw_state(struct drm_device *dev);
void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
struct skl_ddb_allocation *ddb /* out */);
 
 
/* intel_sdvo.c */
1093,14 → 1185,19
int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane);
void intel_flush_primary_plane(struct drm_i915_private *dev_priv,
enum plane plane);
void intel_plane_restore(struct drm_plane *plane);
int intel_plane_set_property(struct drm_plane *plane,
struct drm_property *prop,
uint64_t val);
int intel_plane_restore(struct drm_plane *plane);
void intel_plane_disable(struct drm_plane *plane);
int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv);
bool intel_pipe_update_start(struct intel_crtc *crtc,
uint32_t *start_vbl_count);
void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count);
 
 
/* intel_tv.c */
void intel_tv_init(struct drm_device *dev);
 
/drivers/video/drm/i915/intel_dsi.c
344,7 → 344,7
DRM_DEBUG_KMS("\n");
 
power_domain = intel_display_port_power_domain(encoder);
if (!intel_display_power_enabled(dev_priv, power_domain))
if (!intel_display_power_is_enabled(dev_priv, power_domain))
return false;
 
/* XXX: this only works for one DSI output */
423,9 → 423,11
}
 
/* return pixels in terms of txbyteclkhs */
static u16 txbyteclkhs(u16 pixels, int bpp, int lane_count)
static u16 txbyteclkhs(u16 pixels, int bpp, int lane_count,
u16 burst_mode_ratio)
{
return DIV_ROUND_UP(DIV_ROUND_UP(pixels * bpp, 8), lane_count);
return DIV_ROUND_UP(DIV_ROUND_UP(pixels * bpp * burst_mode_ratio,
8 * 100), lane_count);
}
 
static void set_dsi_timings(struct drm_encoder *encoder,
451,10 → 453,12
vbp = mode->vtotal - mode->vsync_end;
 
/* horizontal values are in terms of high speed byte clock */
hactive = txbyteclkhs(hactive, bpp, lane_count);
hfp = txbyteclkhs(hfp, bpp, lane_count);
hsync = txbyteclkhs(hsync, bpp, lane_count);
hbp = txbyteclkhs(hbp, bpp, lane_count);
hactive = txbyteclkhs(hactive, bpp, lane_count,
intel_dsi->burst_mode_ratio);
hfp = txbyteclkhs(hfp, bpp, lane_count, intel_dsi->burst_mode_ratio);
hsync = txbyteclkhs(hsync, bpp, lane_count,
intel_dsi->burst_mode_ratio);
hbp = txbyteclkhs(hbp, bpp, lane_count, intel_dsi->burst_mode_ratio);
 
I915_WRITE(MIPI_HACTIVE_AREA_COUNT(pipe), hactive);
I915_WRITE(MIPI_HFP_COUNT(pipe), hfp);
541,12 → 545,14
intel_dsi->video_mode_format == VIDEO_MODE_BURST) {
I915_WRITE(MIPI_HS_TX_TIMEOUT(pipe),
txbyteclkhs(adjusted_mode->htotal, bpp,
intel_dsi->lane_count) + 1);
intel_dsi->lane_count,
intel_dsi->burst_mode_ratio) + 1);
} else {
I915_WRITE(MIPI_HS_TX_TIMEOUT(pipe),
txbyteclkhs(adjusted_mode->vtotal *
adjusted_mode->htotal,
bpp, intel_dsi->lane_count) + 1);
bpp, intel_dsi->lane_count,
intel_dsi->burst_mode_ratio) + 1);
}
I915_WRITE(MIPI_LP_RX_TIMEOUT(pipe), intel_dsi->lp_rx_timeout);
I915_WRITE(MIPI_TURN_AROUND_TIMEOUT(pipe), intel_dsi->turn_arnd_val);
/drivers/video/drm/i915/intel_dsi.h
116,6 → 116,8
u16 clk_hs_to_lp_count;
 
u16 init_count;
u32 pclk;
u16 burst_mode_ratio;
 
/* all delays in ms */
u16 backlight_off_delay;
/drivers/video/drm/i915/intel_dsi_panel_vbt.c
271,6 → 271,8
u32 ths_prepare_ns, tclk_trail_ns;
u32 tclk_prepare_clkzero, ths_prepare_hszero;
u32 lp_to_hs_switch, hs_to_lp_switch;
u32 pclk, computed_ddr;
u16 burst_mode_ratio;
 
DRM_DEBUG_KMS("\n");
 
284,8 → 286,6
else if (intel_dsi->pixel_format == VID_MODE_FORMAT_RGB565)
bits_per_pixel = 16;
 
bitrate = (mode->clock * bits_per_pixel) / intel_dsi->lane_count;
 
intel_dsi->operation_mode = mipi_config->is_cmd_mode;
intel_dsi->video_mode_format = mipi_config->video_transfer_mode;
intel_dsi->escape_clk_div = mipi_config->byte_clk_sel;
297,6 → 297,40
intel_dsi->video_frmt_cfg_bits =
mipi_config->bta_enabled ? DISABLE_VIDEO_BTA : 0;
 
pclk = mode->clock;
 
/* Burst Mode Ratio
* Target ddr frequency from VBT / non burst ddr freq
* multiply by 100 to preserve remainder
*/
if (intel_dsi->video_mode_format == VIDEO_MODE_BURST) {
if (mipi_config->target_burst_mode_freq) {
computed_ddr =
(pclk * bits_per_pixel) / intel_dsi->lane_count;
 
if (mipi_config->target_burst_mode_freq <
computed_ddr) {
DRM_ERROR("Burst mode freq is less than computed\n");
return false;
}
 
burst_mode_ratio = DIV_ROUND_UP(
mipi_config->target_burst_mode_freq * 100,
computed_ddr);
 
pclk = DIV_ROUND_UP(pclk * burst_mode_ratio, 100);
} else {
DRM_ERROR("Burst mode target is not set\n");
return false;
}
} else
burst_mode_ratio = 100;
 
intel_dsi->burst_mode_ratio = burst_mode_ratio;
intel_dsi->pclk = pclk;
 
bitrate = (pclk * bits_per_pixel) / intel_dsi->lane_count;
 
switch (intel_dsi->escape_clk_div) {
case 0:
tlpx_ns = 50;
/drivers/video/drm/i915/intel_dsi_pll.c
134,8 → 134,7
#else
 
/* Get DSI clock from pixel clock */
static u32 dsi_clk_from_pclk(const struct drm_display_mode *mode,
int pixel_format, int lane_count)
static u32 dsi_clk_from_pclk(u32 pclk, int pixel_format, int lane_count)
{
u32 dsi_clk_khz;
u32 bpp;
156,7 → 155,7
 
/* DSI data rate = pixel clock * bits per pixel / lane count
pixel clock is converted from KHz to Hz */
dsi_clk_khz = DIV_ROUND_CLOSEST(mode->clock * bpp, lane_count);
dsi_clk_khz = DIV_ROUND_CLOSEST(pclk * bpp, lane_count);
 
return dsi_clk_khz;
}
228,14 → 227,12
static void vlv_configure_dsi_pll(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
int ret;
struct dsi_mnp dsi_mnp;
u32 dsi_clk;
 
dsi_clk = dsi_clk_from_pclk(mode, intel_dsi->pixel_format,
dsi_clk = dsi_clk_from_pclk(intel_dsi->pclk, intel_dsi->pixel_format,
intel_dsi->lane_count);
 
ret = dsi_calc_mnp(dsi_clk, &dsi_mnp);
/drivers/video/drm/i915/intel_dvo.c
85,7 → 85,7
{
.type = INTEL_DVO_CHIP_TMDS,
.name = "ns2501",
.dvo_reg = DVOC,
.dvo_reg = DVOB,
.slave_addr = NS2501_ADDR,
.dev_ops = &ns2501_ops,
}
185,12 → 185,13
u32 dvo_reg = intel_dvo->dev.dvo_reg;
u32 temp = I915_READ(dvo_reg);
 
I915_WRITE(dvo_reg, temp | DVO_ENABLE);
I915_READ(dvo_reg);
intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev,
&crtc->config.requested_mode,
&crtc->config.adjusted_mode);
 
I915_WRITE(dvo_reg, temp | DVO_ENABLE);
I915_READ(dvo_reg);
 
intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
}
 
226,10 → 227,6
 
intel_crtc_update_dpms(crtc);
 
intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev,
&config->requested_mode,
&config->adjusted_mode);
 
intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
} else {
intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false);
/drivers/video/drm/i915/intel_fbdev.c
24,6 → 24,7
* David Airlie
*/
 
#include <linux/async.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
31,7 → 32,7
//#include <linux/mm.h>
//#include <linux/tty.h>
#include <linux/sysrq.h>
//#include <linux/delay.h>
#include <linux/delay.h>
#include <linux/fb.h>
//#include <linux/init.h>
//#include <linux/vga_switcheroo.h>
70,11 → 71,36
#undef BYTES_PER_LONG
}
 
static int intel_fbdev_set_par(struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
struct intel_fbdev *ifbdev =
container_of(fb_helper, struct intel_fbdev, helper);
int ret;
 
ret = drm_fb_helper_set_par(info);
 
if (ret == 0) {
/*
* FIXME: fbdev presumes that all callbacks also work from
* atomic contexts and relies on that for emergency oops
* printing. KMS totally doesn't do that and the locking here is
* by far not the only place this goes wrong. Ignore this for
* now until we solve this for real.
*/
mutex_lock(&fb_helper->dev->struct_mutex);
ret = i915_gem_object_set_to_gtt_domain(ifbdev->fb->obj,
true);
mutex_unlock(&fb_helper->dev->struct_mutex);
}
 
return ret;
}
 
static struct fb_ops intelfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
.fb_set_par = intel_fbdev_set_par,
// .fb_fillrect = cfb_fillrect,
// .fb_copyarea = cfb_copyarea,
// .fb_imageblit = cfb_imageblit,
103,8 → 129,8
mode_cmd.width = sizes->surface_width;
mode_cmd.height = sizes->surface_height;
 
mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((sizes->surface_bpp + 7) /
8), 512);
mode_cmd.pitches[0] = ALIGN(mode_cmd.width *
DIV_ROUND_UP(sizes->surface_bpp, 8), 64);
mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
sizes->surface_depth);
 
111,32 → 137,32
size = mode_cmd.pitches[0] * mode_cmd.height;
size = ALIGN(size, PAGE_SIZE);
obj = main_fb_obj;
obj->stride = mode_cmd.pitches[0];
if (!obj) {
DRM_ERROR("failed to allocate framebuffer\n");
ret = -ENOMEM;
goto out;
}
obj->stride = mode_cmd.pitches[0];
 
fb = __intel_framebuffer_create(dev, &mode_cmd, obj);
if (IS_ERR(fb)) {
ret = PTR_ERR(fb);
goto out_unref;
}
 
/* Flush everything out, we'll be doing GTT only from now on */
ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
ret = intel_pin_and_fence_fb_obj(NULL, fb, NULL);
if (ret) {
DRM_ERROR("failed to pin obj: %d\n", ret);
goto out_unref;
goto out_fb;
}
 
fb = __intel_framebuffer_create(dev, &mode_cmd, obj);
if (IS_ERR(fb)) {
ret = PTR_ERR(fb);
goto out_unpin;
}
 
ifbdev->fb = to_intel_framebuffer(fb);
 
return 0;
 
out_unpin:
i915_gem_object_ggtt_unpin(obj);
out_fb:
drm_framebuffer_remove(fb);
out_unref:
drm_gem_object_unreference(&obj->base);
out:
302,6 → 328,7
static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
struct drm_fb_helper_crtc **crtcs,
struct drm_display_mode **modes,
struct drm_fb_offset *offsets,
bool *enabled, int width, int height)
{
struct drm_device *dev = fb_helper->dev;
310,25 → 337,9
bool fallback = true;
int num_connectors_enabled = 0;
int num_connectors_detected = 0;
uint64_t conn_configured = 0, mask;
int pass = 0;
 
/*
* If the user specified any force options, just bail here
* and use that config.
*/
for (i = 0; i < fb_helper->connector_count; i++) {
struct drm_fb_helper_connector *fb_conn;
struct drm_connector *connector;
 
fb_conn = fb_helper->connector_info[i];
connector = fb_conn->connector;
 
if (!enabled[i])
continue;
 
if (connector->force != DRM_FORCE_UNSPECIFIED)
return false;
}
 
save_enabled = kcalloc(dev->mode_config.num_connector, sizeof(bool),
GFP_KERNEL);
if (!save_enabled)
335,7 → 346,8
return false;
 
memcpy(save_enabled, enabled, dev->mode_config.num_connector);
 
mask = (1 << fb_helper->connector_count) - 1;
retry:
for (i = 0; i < fb_helper->connector_count; i++) {
struct drm_fb_helper_connector *fb_conn;
struct drm_connector *connector;
345,6 → 357,12
fb_conn = fb_helper->connector_info[i];
connector = fb_conn->connector;
 
if (conn_configured & (1 << i))
continue;
 
if (pass == 0 && !connector->has_tile)
continue;
 
if (connector->status == connector_status_connected)
num_connectors_detected++;
 
351,14 → 369,26
if (!enabled[i]) {
DRM_DEBUG_KMS("connector %s not enabled, skipping\n",
connector->name);
conn_configured |= (1 << i);
continue;
}
 
if (connector->force == DRM_FORCE_OFF) {
DRM_DEBUG_KMS("connector %s is disabled by user, skipping\n",
connector->name);
enabled[i] = false;
continue;
}
 
encoder = connector->encoder;
if (!encoder || WARN_ON(!encoder->crtc)) {
if (connector->force > DRM_FORCE_OFF)
goto bail;
 
DRM_DEBUG_KMS("connector %s has no encoder or crtc, skipping\n",
connector->name);
enabled[i] = false;
conn_configured |= (1 << i);
continue;
}
 
374,8 → 404,7
for (j = 0; j < fb_helper->connector_count; j++) {
if (crtcs[j] == new_crtc) {
DRM_DEBUG_KMS("fallback: cloned configuration\n");
fallback = true;
goto out;
goto bail;
}
}
 
387,8 → 416,8
 
/* try for preferred next */
if (!modes[i]) {
DRM_DEBUG_KMS("looking for preferred mode on connector %s\n",
connector->name);
DRM_DEBUG_KMS("looking for preferred mode on connector %s %d\n",
connector->name, connector->has_tile);
modes[i] = drm_has_preferred_mode(fb_conn, width,
height);
}
431,8 → 460,14
modes[i]->flags & DRM_MODE_FLAG_INTERLACE ? "i" :"");
 
fallback = false;
conn_configured |= (1 << i);
}
 
if ((conn_configured & mask) != mask) {
pass++;
goto retry;
}
 
/*
* If the BIOS didn't enable everything it could, fall back to have the
* same user experiencing of lighting up as much as possible like the
446,8 → 481,8
fallback = true;
}
 
out:
if (fallback) {
bail:
DRM_DEBUG_KMS("Not using firmware configuration\n");
memcpy(enabled, save_enabled, dev->mode_config.num_connector);
kfree(save_enabled);
627,9 → 662,9
return 0;
}
 
void intel_fbdev_initial_config(struct drm_device *dev)
void intel_fbdev_initial_config(void *data, async_cookie_t cookie)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = data;
struct intel_fbdev *ifbdev = dev_priv->fbdev;
 
/* Due to peculiar init order wrt to hpd handling this is separate. */
/drivers/video/drm/i915/intel_fifo_underrun.c
0,0 → 1,381
/*
* Copyright © 2014 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Daniel Vetter <daniel.vetter@ffwll.ch>
*
*/
 
#include "i915_drv.h"
#include "intel_drv.h"
 
/**
* DOC: fifo underrun handling
*
* The i915 driver checks for display fifo underruns using the interrupt signals
* provided by the hardware. This is enabled by default and fairly useful to
* debug display issues, especially watermark settings.
*
* If an underrun is detected this is logged into dmesg. To avoid flooding logs
* and occupying the cpu underrun interrupts are disabled after the first
* occurrence until the next modeset on a given pipe.
*
* Note that underrun detection on gmch platforms is a bit more ugly since there
* is no interrupt (despite that the signalling bit is in the PIPESTAT pipe
* interrupt register). Also on some other platforms underrun interrupts are
* shared, which means that if we detect an underrun we need to disable underrun
* reporting on all pipes.
*
* The code also supports underrun detection on the PCH transcoder.
*/
 
static bool ivb_can_enable_err_int(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc;
enum pipe pipe;
 
assert_spin_locked(&dev_priv->irq_lock);
 
for_each_pipe(dev_priv, pipe) {
crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
 
if (crtc->cpu_fifo_underrun_disabled)
return false;
}
 
return true;
}
 
static bool cpt_can_enable_serr_int(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe pipe;
struct intel_crtc *crtc;
 
assert_spin_locked(&dev_priv->irq_lock);
 
for_each_pipe(dev_priv, pipe) {
crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
 
if (crtc->pch_fifo_underrun_disabled)
return false;
}
 
return true;
}
 
/**
* i9xx_check_fifo_underruns - check for fifo underruns
* @dev_priv: i915 device instance
*
* This function checks for fifo underruns on GMCH platforms. This needs to be
* done manually on modeset to make sure that we catch all underruns since they
* do not generate an interrupt by themselves on these platforms.
*/
void i9xx_check_fifo_underruns(struct drm_i915_private *dev_priv)
{
struct intel_crtc *crtc;
 
spin_lock_irq(&dev_priv->irq_lock);
 
for_each_intel_crtc(dev_priv->dev, crtc) {
u32 reg = PIPESTAT(crtc->pipe);
u32 pipestat;
 
if (crtc->cpu_fifo_underrun_disabled)
continue;
 
pipestat = I915_READ(reg) & 0xffff0000;
if ((pipestat & PIPE_FIFO_UNDERRUN_STATUS) == 0)
continue;
 
I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
POSTING_READ(reg);
 
DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe));
}
 
spin_unlock_irq(&dev_priv->irq_lock);
}
 
static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
enum pipe pipe,
bool enable, bool old)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg = PIPESTAT(pipe);
u32 pipestat = I915_READ(reg) & 0xffff0000;
 
assert_spin_locked(&dev_priv->irq_lock);
 
if (enable) {
I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
POSTING_READ(reg);
} else {
if (old && pipestat & PIPE_FIFO_UNDERRUN_STATUS)
DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
}
}
 
static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
enum pipe pipe, bool enable)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
DE_PIPEB_FIFO_UNDERRUN;
 
if (enable)
ironlake_enable_display_irq(dev_priv, bit);
else
ironlake_disable_display_irq(dev_priv, bit);
}
 
static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
enum pipe pipe,
bool enable, bool old)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (enable) {
I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
 
if (!ivb_can_enable_err_int(dev))
return;
 
ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
} else {
ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
 
if (old &&
I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) {
DRM_ERROR("uncleared fifo underrun on pipe %c\n",
pipe_name(pipe));
}
}
}
 
static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev,
enum pipe pipe, bool enable)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
assert_spin_locked(&dev_priv->irq_lock);
 
if (enable)
dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN;
else
dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN;
I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
}
 
static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
enum transcoder pch_transcoder,
bool enable)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t bit = (pch_transcoder == TRANSCODER_A) ?
SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
 
if (enable)
ibx_enable_display_interrupt(dev_priv, bit);
else
ibx_disable_display_interrupt(dev_priv, bit);
}
 
static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
enum transcoder pch_transcoder,
bool enable, bool old)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (enable) {
I915_WRITE(SERR_INT,
SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
 
if (!cpt_can_enable_serr_int(dev))
return;
 
ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT);
} else {
ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
 
if (old && I915_READ(SERR_INT) &
SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) {
DRM_ERROR("uncleared pch fifo underrun on pch transcoder %c\n",
transcoder_name(pch_transcoder));
}
}
}
 
static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
enum pipe pipe, bool enable)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
bool old;
 
assert_spin_locked(&dev_priv->irq_lock);
 
old = !intel_crtc->cpu_fifo_underrun_disabled;
intel_crtc->cpu_fifo_underrun_disabled = !enable;
 
if (HAS_GMCH_DISPLAY(dev))
i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old);
else if (IS_GEN5(dev) || IS_GEN6(dev))
ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
else if (IS_GEN7(dev))
ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old);
else if (IS_GEN8(dev) || IS_GEN9(dev))
broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
 
return old;
}
 
/**
* intel_set_cpu_fifo_underrun_reporting - set cpu fifo underrrun reporting state
* @dev_priv: i915 device instance
* @pipe: (CPU) pipe to set state for
* @enable: whether underruns should be reported or not
*
* This function sets the fifo underrun state for @pipe. It is used in the
* modeset code to avoid false positives since on many platforms underruns are
* expected when disabling or enabling the pipe.
*
* Notice that on some platforms disabling underrun reports for one pipe
* disables for all due to shared interrupts. Actual reporting is still per-pipe
* though.
*
* Returns the previous state of underrun reporting.
*/
bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
enum pipe pipe, bool enable)
{
unsigned long flags;
bool ret;
 
spin_lock_irqsave(&dev_priv->irq_lock, flags);
ret = __intel_set_cpu_fifo_underrun_reporting(dev_priv->dev, pipe,
enable);
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
return ret;
}
 
static bool
__cpu_fifo_underrun_reporting_enabled(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
return !intel_crtc->cpu_fifo_underrun_disabled;
}
 
/**
* intel_set_pch_fifo_underrun_reporting - set PCH fifo underrun reporting state
* @dev_priv: i915 device instance
* @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
* @enable: whether underruns should be reported or not
*
* This function makes us disable or enable PCH fifo underruns for a specific
* PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO
* underrun reporting for one transcoder may also disable all the other PCH
* error interruts for the other transcoders, due to the fact that there's just
* one interrupt mask/enable bit for all the transcoders.
*
* Returns the previous state of underrun reporting.
*/
bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
enum transcoder pch_transcoder,
bool enable)
{
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
unsigned long flags;
bool old;
 
/*
* NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT
* has only one pch transcoder A that all pipes can use. To avoid racy
* pch transcoder -> pipe lookups from interrupt code simply store the
* underrun statistics in crtc A. Since we never expose this anywhere
* nor use it outside of the fifo underrun code here using the "wrong"
* crtc on LPT won't cause issues.
*/
 
spin_lock_irqsave(&dev_priv->irq_lock, flags);
 
old = !intel_crtc->pch_fifo_underrun_disabled;
intel_crtc->pch_fifo_underrun_disabled = !enable;
 
if (HAS_PCH_IBX(dev_priv->dev))
ibx_set_fifo_underrun_reporting(dev_priv->dev, pch_transcoder,
enable);
else
cpt_set_fifo_underrun_reporting(dev_priv->dev, pch_transcoder,
enable, old);
 
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
return old;
}
 
/**
* intel_pch_fifo_underrun_irq_handler - handle PCH fifo underrun interrupt
* @dev_priv: i915 device instance
* @pipe: (CPU) pipe to set state for
*
* This handles a CPU fifo underrun interrupt, generating an underrun warning
* into dmesg if underrun reporting is enabled and then disables the underrun
* interrupt to avoid an irq storm.
*/
void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
/* GMCH can't disable fifo underruns, filter them. */
if (HAS_GMCH_DISPLAY(dev_priv->dev) &&
!__cpu_fifo_underrun_reporting_enabled(dev_priv, pipe))
return;
 
if (intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false))
DRM_ERROR("CPU pipe %c FIFO underrun\n",
pipe_name(pipe));
}
 
/**
* intel_pch_fifo_underrun_irq_handler - handle PCH fifo underrun interrupt
* @dev_priv: i915 device instance
* @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
*
* This handles a PCH fifo underrun interrupt, generating an underrun warning
* into dmesg if underrun reporting is enabled and then disables the underrun
* interrupt to avoid an irq storm.
*/
void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
enum transcoder pch_transcoder)
{
if (intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder,
false))
DRM_ERROR("PCH transcoder %c FIFO underrun\n",
transcoder_name(pch_transcoder));
}
/drivers/video/drm/i915/intel_frontbuffer.c
0,0 → 1,279
/*
* Copyright © 2014 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Daniel Vetter <daniel.vetter@ffwll.ch>
*/
 
/**
* DOC: frontbuffer tracking
*
* Many features require us to track changes to the currently active
* frontbuffer, especially rendering targeted at the frontbuffer.
*
* To be able to do so GEM tracks frontbuffers using a bitmask for all possible
* frontbuffer slots through i915_gem_track_fb(). The function in this file are
* then called when the contents of the frontbuffer are invalidated, when
* frontbuffer rendering has stopped again to flush out all the changes and when
* the frontbuffer is exchanged with a flip. Subsystems interested in
* frontbuffer changes (e.g. PSR, FBC, DRRS) should directly put their callbacks
* into the relevant places and filter for the frontbuffer slots that they are
* interested int.
*
* On a high level there are two types of powersaving features. The first one
* work like a special cache (FBC and PSR) and are interested when they should
* stop caching and when to restart caching. This is done by placing callbacks
* into the invalidate and the flush functions: At invalidate the caching must
* be stopped and at flush time it can be restarted. And maybe they need to know
* when the frontbuffer changes (e.g. when the hw doesn't initiate an invalidate
* and flush on its own) which can be achieved with placing callbacks into the
* flip functions.
*
* The other type of display power saving feature only cares about busyness
* (e.g. DRRS). In that case all three (invalidate, flush and flip) indicate
* busyness. There is no direct way to detect idleness. Instead an idle timer
* work delayed work should be started from the flush and flip functions and
* cancelled as soon as busyness is detected.
*
* Note that there's also an older frontbuffer activity tracking scheme which
* just tracks general activity. This is done by the various mark_busy and
* mark_idle functions. For display power management features using these
* functions is deprecated and should be avoided.
*/
 
#include <drm/drmP.h>
 
#include "intel_drv.h"
#include "i915_drv.h"
 
static void intel_increase_pllclock(struct drm_device *dev,
enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int dpll_reg = DPLL(pipe);
int dpll;
 
if (!HAS_GMCH_DISPLAY(dev))
return;
 
if (!dev_priv->lvds_downclock_avail)
return;
 
dpll = I915_READ(dpll_reg);
if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
DRM_DEBUG_DRIVER("upclocking LVDS\n");
 
assert_panel_unlocked(dev_priv, pipe);
 
dpll &= ~DISPLAY_RATE_SELECT_FPA1;
I915_WRITE(dpll_reg, dpll);
intel_wait_for_vblank(dev, pipe);
 
dpll = I915_READ(dpll_reg);
if (dpll & DISPLAY_RATE_SELECT_FPA1)
DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
}
}
 
/**
* intel_mark_fb_busy - mark given planes as busy
* @dev: DRM device
* @frontbuffer_bits: bits for the affected planes
* @ring: optional ring for asynchronous commands
*
* This function gets called every time the screen contents change. It can be
* used to keep e.g. the update rate at the nominal refresh rate with DRRS.
*/
static void intel_mark_fb_busy(struct drm_device *dev,
unsigned frontbuffer_bits,
struct intel_engine_cs *ring)
{
struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe pipe;
 
if (!i915.powersave)
return;
 
for_each_pipe(dev_priv, pipe) {
if (!(frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)))
continue;
 
intel_increase_pllclock(dev, pipe);
if (ring && intel_fbc_enabled(dev))
ring->fbc_dirty = true;
}
}
 
/**
* intel_fb_obj_invalidate - invalidate frontbuffer object
* @obj: GEM object to invalidate
* @ring: set for asynchronous rendering
*
* This function gets called every time rendering on the given object starts and
* frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
* be invalidated. If @ring is non-NULL any subsequent invalidation will be delayed
* until the rendering completes or a flip on this frontbuffer plane is
* scheduled.
*/
void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
struct intel_engine_cs *ring)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
if (!obj->frontbuffer_bits)
return;
 
if (ring) {
mutex_lock(&dev_priv->fb_tracking.lock);
dev_priv->fb_tracking.busy_bits
|= obj->frontbuffer_bits;
dev_priv->fb_tracking.flip_bits
&= ~obj->frontbuffer_bits;
mutex_unlock(&dev_priv->fb_tracking.lock);
}
 
intel_mark_fb_busy(dev, obj->frontbuffer_bits, ring);
 
intel_psr_invalidate(dev, obj->frontbuffer_bits);
}
 
/**
* intel_frontbuffer_flush - flush frontbuffer
* @dev: DRM device
* @frontbuffer_bits: frontbuffer plane tracking bits
*
* This function gets called every time rendering on the given planes has
* completed and frontbuffer caching can be started again. Flushes will get
* delayed if they're blocked by some outstanding asynchronous rendering.
*
* Can be called without any locks held.
*/
void intel_frontbuffer_flush(struct drm_device *dev,
unsigned frontbuffer_bits)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
/* Delay flushing when rings are still busy.*/
mutex_lock(&dev_priv->fb_tracking.lock);
frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits;
mutex_unlock(&dev_priv->fb_tracking.lock);
 
intel_mark_fb_busy(dev, frontbuffer_bits, NULL);
 
intel_psr_flush(dev, frontbuffer_bits);
 
/*
* FIXME: Unconditional fbc flushing here is a rather gross hack and
* needs to be reworked into a proper frontbuffer tracking scheme like
* psr employs.
*/
if (dev_priv->fbc.need_sw_cache_clean) {
dev_priv->fbc.need_sw_cache_clean = false;
bdw_fbc_sw_flush(dev, FBC_REND_CACHE_CLEAN);
}
}
 
/**
* intel_fb_obj_flush - flush frontbuffer object
* @obj: GEM object to flush
* @retire: set when retiring asynchronous rendering
*
* This function gets called every time rendering on the given object has
* completed and frontbuffer caching can be started again. If @retire is true
* then any delayed flushes will be unblocked.
*/
void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
bool retire)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned frontbuffer_bits;
 
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
if (!obj->frontbuffer_bits)
return;
 
frontbuffer_bits = obj->frontbuffer_bits;
 
if (retire) {
mutex_lock(&dev_priv->fb_tracking.lock);
/* Filter out new bits since rendering started. */
frontbuffer_bits &= dev_priv->fb_tracking.busy_bits;
 
dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
mutex_unlock(&dev_priv->fb_tracking.lock);
}
 
intel_frontbuffer_flush(dev, frontbuffer_bits);
}
 
/**
* intel_frontbuffer_flip_prepare - prepare asynchronous frontbuffer flip
* @dev: DRM device
* @frontbuffer_bits: frontbuffer plane tracking bits
*
* This function gets called after scheduling a flip on @obj. The actual
* frontbuffer flushing will be delayed until completion is signalled with
* intel_frontbuffer_flip_complete. If an invalidate happens in between this
* flush will be cancelled.
*
* Can be called without any locks held.
*/
void intel_frontbuffer_flip_prepare(struct drm_device *dev,
unsigned frontbuffer_bits)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
mutex_lock(&dev_priv->fb_tracking.lock);
dev_priv->fb_tracking.flip_bits |= frontbuffer_bits;
/* Remove stale busy bits due to the old buffer. */
dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
mutex_unlock(&dev_priv->fb_tracking.lock);
}
 
/**
* intel_frontbuffer_flip_complete - complete asynchronous frontbuffer flip
* @dev: DRM device
* @frontbuffer_bits: frontbuffer plane tracking bits
*
* This function gets called after the flip has been latched and will complete
* on the next vblank. It will execute the flush if it hasn't been cancelled yet.
*
* Can be called without any locks held.
*/
void intel_frontbuffer_flip_complete(struct drm_device *dev,
unsigned frontbuffer_bits)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
mutex_lock(&dev_priv->fb_tracking.lock);
/* Mask any cancelled flips. */
frontbuffer_bits &= dev_priv->fb_tracking.flip_bits;
dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
mutex_unlock(&dev_priv->fb_tracking.lock);
 
intel_frontbuffer_flush(dev, frontbuffer_bits);
}
/drivers/video/drm/i915/intel_hdmi.c
166,6 → 166,19
POSTING_READ(VIDEO_DIP_CTL);
}
 
static bool g4x_infoframe_enabled(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
u32 val = I915_READ(VIDEO_DIP_CTL);
 
if (VIDEO_DIP_PORT(intel_dig_port->port) == (val & VIDEO_DIP_PORT_MASK))
return val & VIDEO_DIP_ENABLE;
 
return false;
}
 
static void ibx_write_infoframe(struct drm_encoder *encoder,
enum hdmi_infoframe_type type,
const void *frame, ssize_t len)
204,6 → 217,17
POSTING_READ(reg);
}
 
static bool ibx_infoframe_enabled(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
 
return val & VIDEO_DIP_ENABLE;
}
 
static void cpt_write_infoframe(struct drm_encoder *encoder,
enum hdmi_infoframe_type type,
const void *frame, ssize_t len)
245,6 → 269,17
POSTING_READ(reg);
}
 
static bool cpt_infoframe_enabled(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
 
return val & VIDEO_DIP_ENABLE;
}
 
static void vlv_write_infoframe(struct drm_encoder *encoder,
enum hdmi_infoframe_type type,
const void *frame, ssize_t len)
283,6 → 318,17
POSTING_READ(reg);
}
 
static bool vlv_infoframe_enabled(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
int reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
 
return val & VIDEO_DIP_ENABLE;
}
 
static void hsw_write_infoframe(struct drm_encoder *encoder,
enum hdmi_infoframe_type type,
const void *frame, ssize_t len)
320,6 → 366,18
POSTING_READ(ctl_reg);
}
 
static bool hsw_infoframe_enabled(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config.cpu_transcoder);
u32 val = I915_READ(ctl_reg);
 
return val & (VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_SPD_HSW |
VIDEO_DIP_ENABLE_VS_HSW);
}
 
/*
* The data we write to the DIP data buffer registers is 1 byte bigger than the
* HDMI infoframe size because of an ECC/reserved byte at position 3 (starting
661,14 → 719,6
if (crtc->config.has_hdmi_sink)
hdmi_val |= HDMI_MODE_SELECT_HDMI;
 
if (crtc->config.has_audio) {
WARN_ON(!crtc->config.has_hdmi_sink);
DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
pipe_name(crtc->pipe));
hdmi_val |= SDVO_AUDIO_ENABLE;
intel_write_eld(&encoder->base, adjusted_mode);
}
 
if (HAS_PCH_CPT(dev))
hdmi_val |= SDVO_PIPE_SEL_CPT(crtc->pipe);
else if (IS_CHERRYVIEW(dev))
690,7 → 740,7
u32 tmp;
 
power_domain = intel_display_port_power_domain(encoder);
if (!intel_display_power_enabled(dev_priv, power_domain))
if (!intel_display_power_is_enabled(dev_priv, power_domain))
return false;
 
tmp = I915_READ(intel_hdmi->hdmi_reg);
732,7 → 782,10
if (tmp & HDMI_MODE_SELECT_HDMI)
pipe_config->has_hdmi_sink = true;
 
if (tmp & HDMI_MODE_SELECT_HDMI)
if (intel_hdmi->infoframe_enabled(&encoder->base))
pipe_config->has_infoframe = true;
 
if (tmp & SDVO_AUDIO_ENABLE)
pipe_config->has_audio = true;
 
if (!HAS_PCH_SPLIT(dev) &&
791,7 → 844,14
I915_WRITE(intel_hdmi->hdmi_reg, temp);
POSTING_READ(intel_hdmi->hdmi_reg);
}
 
if (intel_crtc->config.has_audio) {
WARN_ON(!intel_crtc->config.has_hdmi_sink);
DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
pipe_name(intel_crtc->pipe));
intel_audio_codec_enable(encoder);
}
}
 
static void vlv_enable_hdmi(struct intel_encoder *encoder)
{
802,9 → 862,13
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
u32 temp;
u32 enable_bits = SDVO_ENABLE | SDVO_AUDIO_ENABLE;
 
if (crtc->config.has_audio)
intel_audio_codec_disable(encoder);
 
temp = I915_READ(intel_hdmi->hdmi_reg);
 
/* HW workaround for IBX, we need to move the port to transcoder A
869,10 → 933,15
intel_hdmi_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
if (mode->clock > hdmi_portclock_limit(intel_attached_hdmi(connector),
int clock = mode->clock;
 
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
clock *= 2;
 
if (clock > hdmi_portclock_limit(intel_attached_hdmi(connector),
true))
return MODE_CLOCK_HIGH;
if (mode->clock < 20000)
if (clock < 20000)
return MODE_CLOCK_LOW;
 
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
890,7 → 959,7
if (HAS_GMCH_DISPLAY(dev))
return false;
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
for_each_intel_encoder(dev, encoder) {
if (encoder->new_crtc != crtc)
continue;
 
917,6 → 986,9
 
pipe_config->has_hdmi_sink = intel_hdmi->has_hdmi_sink;
 
if (pipe_config->has_hdmi_sink)
pipe_config->has_infoframe = true;
 
if (intel_hdmi->color_range_auto) {
/* See CEA-861-E - 5.1 Default Encoding Parameters */
if (pipe_config->has_hdmi_sink &&
926,6 → 998,10
intel_hdmi->color_range = 0;
}
 
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) {
pipe_config->pixel_multiplier = 2;
}
 
if (intel_hdmi->color_range)
pipe_config->limited_color_range = true;
 
967,105 → 1043,118
return true;
}
 
static enum drm_connector_status
intel_hdmi_detect(struct drm_connector *connector, bool force)
static void
intel_hdmi_unset_edid(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
struct intel_digital_port *intel_dig_port =
hdmi_to_dig_port(intel_hdmi);
struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_i915_private *dev_priv = dev->dev_private;
 
intel_hdmi->has_hdmi_sink = false;
intel_hdmi->has_audio = false;
intel_hdmi->rgb_quant_range_selectable = false;
 
kfree(to_intel_connector(connector)->detect_edid);
to_intel_connector(connector)->detect_edid = NULL;
}
 
static bool
intel_hdmi_set_edid(struct drm_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
struct intel_encoder *intel_encoder =
&hdmi_to_dig_port(intel_hdmi)->base;
enum intel_display_power_domain power_domain;
struct edid *edid;
enum intel_display_power_domain power_domain;
enum drm_connector_status status = connector_status_disconnected;
bool connected = false;
 
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
 
power_domain = intel_display_port_power_domain(intel_encoder);
intel_display_power_get(dev_priv, power_domain);
 
intel_hdmi->has_hdmi_sink = false;
intel_hdmi->has_audio = false;
intel_hdmi->rgb_quant_range_selectable = false;
edid = drm_get_edid(connector,
intel_gmbus_get_adapter(dev_priv,
intel_hdmi->ddc_bus));
 
if (edid) {
if (edid->input & DRM_EDID_INPUT_DIGITAL) {
status = connector_status_connected;
if (intel_hdmi->force_audio != HDMI_AUDIO_OFF_DVI)
intel_hdmi->has_hdmi_sink =
drm_detect_hdmi_monitor(edid);
intel_hdmi->has_audio = drm_detect_monitor_audio(edid);
intel_display_power_put(dev_priv, power_domain);
 
to_intel_connector(connector)->detect_edid = edid;
if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) {
intel_hdmi->rgb_quant_range_selectable =
drm_rgb_quant_range_selectable(edid);
}
kfree(edid);
}
 
if (status == connector_status_connected) {
intel_hdmi->has_audio = drm_detect_monitor_audio(edid);
if (intel_hdmi->force_audio != HDMI_AUDIO_AUTO)
intel_hdmi->has_audio =
(intel_hdmi->force_audio == HDMI_AUDIO_ON);
intel_encoder->type = INTEL_OUTPUT_HDMI;
intel_hdmi->force_audio == HDMI_AUDIO_ON;
 
if (intel_hdmi->force_audio != HDMI_AUDIO_OFF_DVI)
intel_hdmi->has_hdmi_sink =
drm_detect_hdmi_monitor(edid);
 
connected = true;
}
 
intel_display_power_put(dev_priv, power_domain);
return connected;
}
 
static enum drm_connector_status
intel_hdmi_detect(struct drm_connector *connector, bool force)
{
enum drm_connector_status status;
 
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
 
intel_hdmi_unset_edid(connector);
 
if (intel_hdmi_set_edid(connector)) {
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
 
hdmi_to_dig_port(intel_hdmi)->base.type = INTEL_OUTPUT_HDMI;
status = connector_status_connected;
} else
status = connector_status_disconnected;
 
return status;
}
 
static int intel_hdmi_get_modes(struct drm_connector *connector)
static void
intel_hdmi_force(struct drm_connector *connector)
{
struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
struct drm_i915_private *dev_priv = connector->dev->dev_private;
enum intel_display_power_domain power_domain;
int ret;
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
 
/* We should parse the EDID data and find out if it's an HDMI sink so
* we can send audio to it.
*/
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
 
power_domain = intel_display_port_power_domain(intel_encoder);
intel_display_power_get(dev_priv, power_domain);
intel_hdmi_unset_edid(connector);
 
ret = intel_ddc_get_modes(connector,
intel_gmbus_get_adapter(dev_priv,
intel_hdmi->ddc_bus));
if (connector->status != connector_status_connected)
return;
 
intel_display_power_put(dev_priv, power_domain);
intel_hdmi_set_edid(connector);
hdmi_to_dig_port(intel_hdmi)->base.type = INTEL_OUTPUT_HDMI;
}
 
return ret;
static int intel_hdmi_get_modes(struct drm_connector *connector)
{
struct edid *edid;
 
edid = to_intel_connector(connector)->detect_edid;
if (edid == NULL)
return 0;
 
return intel_connector_update_modes(connector, edid);
}
 
static bool
intel_hdmi_detect_audio(struct drm_connector *connector)
{
struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
struct drm_i915_private *dev_priv = connector->dev->dev_private;
enum intel_display_power_domain power_domain;
bool has_audio = false;
struct edid *edid;
bool has_audio = false;
 
power_domain = intel_display_port_power_domain(intel_encoder);
intel_display_power_get(dev_priv, power_domain);
 
edid = drm_get_edid(connector,
intel_gmbus_get_adapter(dev_priv,
intel_hdmi->ddc_bus));
if (edid) {
if (edid->input & DRM_EDID_INPUT_DIGITAL)
edid = to_intel_connector(connector)->detect_edid;
if (edid && edid->input & DRM_EDID_INPUT_DIGITAL)
has_audio = drm_detect_monitor_audio(edid);
kfree(edid);
}
 
intel_display_power_put(dev_priv, power_domain);
 
return has_audio;
}
 
1265,6 → 1354,8
enum pipe pipe = intel_crtc->pipe;
u32 val;
 
intel_hdmi_prepare(encoder);
 
mutex_lock(&dev_priv->dpio_lock);
 
/* program left/right clock distribution */
1370,10 → 1461,13
static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
struct intel_hdmi *intel_hdmi = &dport->hdmi;
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc =
to_intel_crtc(encoder->base.crtc);
struct drm_display_mode *adjusted_mode =
&intel_crtc->config.adjusted_mode;
enum dpio_channel ch = vlv_dport_to_channel(dport);
int pipe = intel_crtc->pipe;
int data, i;
1381,6 → 1475,15
 
mutex_lock(&dev_priv->dpio_lock);
 
/* allow hardware to manage TX FIFO reset source */
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
 
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
 
/* Deassert soft data lane reset*/
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
val |= CHV_PCS_REQ_SOFTRESET_EN;
1417,12 → 1520,26
/* Clear calc init */
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
 
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
 
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
 
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
 
/* FIXME: Program the support xxx V-dB */
/* Use 800mV-0dB */
for (i = 0; i < 4; i++) {
1434,8 → 1551,8
 
for (i = 0; i < 4; i++) {
val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
val &= ~DPIO_SWING_MARGIN_MASK;
val |= 102 << DPIO_SWING_MARGIN_SHIFT;
val &= ~DPIO_SWING_MARGIN000_MASK;
val |= 102 << DPIO_SWING_MARGIN000_SHIFT;
vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
}
 
1475,6 → 1592,10
 
mutex_unlock(&dev_priv->dpio_lock);
 
intel_hdmi->set_infoframes(&encoder->base,
intel_crtc->config.has_hdmi_sink,
adjusted_mode);
 
intel_enable_hdmi(encoder);
 
vlv_wait_port_ready(dev_priv, dport);
1482,6 → 1603,7
 
static void intel_hdmi_destroy(struct drm_connector *connector)
{
kfree(to_intel_connector(connector)->detect_edid);
drm_connector_cleanup(connector);
kfree(connector);
}
1489,6 → 1611,7
static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
.dpms = intel_connector_dpms,
.detect = intel_hdmi_detect,
.force = intel_hdmi_force,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_hdmi_set_property,
.destroy = intel_hdmi_destroy,
1567,18 → 1690,23
if (IS_VALLEYVIEW(dev)) {
intel_hdmi->write_infoframe = vlv_write_infoframe;
intel_hdmi->set_infoframes = vlv_set_infoframes;
intel_hdmi->infoframe_enabled = vlv_infoframe_enabled;
} else if (IS_G4X(dev)) {
intel_hdmi->write_infoframe = g4x_write_infoframe;
intel_hdmi->set_infoframes = g4x_set_infoframes;
intel_hdmi->infoframe_enabled = g4x_infoframe_enabled;
} else if (HAS_DDI(dev)) {
intel_hdmi->write_infoframe = hsw_write_infoframe;
intel_hdmi->set_infoframes = hsw_set_infoframes;
intel_hdmi->infoframe_enabled = hsw_infoframe_enabled;
} else if (HAS_PCH_IBX(dev)) {
intel_hdmi->write_infoframe = ibx_write_infoframe;
intel_hdmi->set_infoframes = ibx_set_infoframes;
intel_hdmi->infoframe_enabled = ibx_infoframe_enabled;
} else {
intel_hdmi->write_infoframe = cpt_write_infoframe;
intel_hdmi->set_infoframes = cpt_set_infoframes;
intel_hdmi->infoframe_enabled = cpt_infoframe_enabled;
}
 
if (HAS_DDI(dev))
/drivers/video/drm/i915/intel_lrc.c
0,0 → 1,1938
/*
* Copyright © 2014 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Ben Widawsky <ben@bwidawsk.net>
* Michel Thierry <michel.thierry@intel.com>
* Thomas Daniel <thomas.daniel@intel.com>
* Oscar Mateo <oscar.mateo@intel.com>
*
*/
 
/**
* DOC: Logical Rings, Logical Ring Contexts and Execlists
*
* Motivation:
* GEN8 brings an expansion of the HW contexts: "Logical Ring Contexts".
* These expanded contexts enable a number of new abilities, especially
* "Execlists" (also implemented in this file).
*
* One of the main differences with the legacy HW contexts is that logical
* ring contexts incorporate many more things to the context's state, like
* PDPs or ringbuffer control registers:
*
* The reason why PDPs are included in the context is straightforward: as
* PPGTTs (per-process GTTs) are actually per-context, having the PDPs
* contained there mean you don't need to do a ppgtt->switch_mm yourself,
* instead, the GPU will do it for you on the context switch.
*
* But, what about the ringbuffer control registers (head, tail, etc..)?
* shouldn't we just need a set of those per engine command streamer? This is
* where the name "Logical Rings" starts to make sense: by virtualizing the
* rings, the engine cs shifts to a new "ring buffer" with every context
* switch. When you want to submit a workload to the GPU you: A) choose your
* context, B) find its appropriate virtualized ring, C) write commands to it
* and then, finally, D) tell the GPU to switch to that context.
*
* Instead of the legacy MI_SET_CONTEXT, the way you tell the GPU to switch
* to a contexts is via a context execution list, ergo "Execlists".
*
* LRC implementation:
* Regarding the creation of contexts, we have:
*
* - One global default context.
* - One local default context for each opened fd.
* - One local extra context for each context create ioctl call.
*
* Now that ringbuffers belong per-context (and not per-engine, like before)
* and that contexts are uniquely tied to a given engine (and not reusable,
* like before) we need:
*
* - One ringbuffer per-engine inside each context.
* - One backing object per-engine inside each context.
*
* The global default context starts its life with these new objects fully
* allocated and populated. The local default context for each opened fd is
* more complex, because we don't know at creation time which engine is going
* to use them. To handle this, we have implemented a deferred creation of LR
* contexts:
*
* The local context starts its life as a hollow or blank holder, that only
* gets populated for a given engine once we receive an execbuffer. If later
* on we receive another execbuffer ioctl for the same context but a different
* engine, we allocate/populate a new ringbuffer and context backing object and
* so on.
*
* Finally, regarding local contexts created using the ioctl call: as they are
* only allowed with the render ring, we can allocate & populate them right
* away (no need to defer anything, at least for now).
*
* Execlists implementation:
* Execlists are the new method by which, on gen8+ hardware, workloads are
* submitted for execution (as opposed to the legacy, ringbuffer-based, method).
* This method works as follows:
*
* When a request is committed, its commands (the BB start and any leading or
* trailing commands, like the seqno breadcrumbs) are placed in the ringbuffer
* for the appropriate context. The tail pointer in the hardware context is not
* updated at this time, but instead, kept by the driver in the ringbuffer
* structure. A structure representing this request is added to a request queue
* for the appropriate engine: this structure contains a copy of the context's
* tail after the request was written to the ring buffer and a pointer to the
* context itself.
*
* If the engine's request queue was empty before the request was added, the
* queue is processed immediately. Otherwise the queue will be processed during
* a context switch interrupt. In any case, elements on the queue will get sent
* (in pairs) to the GPU's ExecLists Submit Port (ELSP, for short) with a
* globally unique 20-bits submission ID.
*
* When execution of a request completes, the GPU updates the context status
* buffer with a context complete event and generates a context switch interrupt.
* During the interrupt handling, the driver examines the events in the buffer:
* for each context complete event, if the announced ID matches that on the head
* of the request queue, then that request is retired and removed from the queue.
*
* After processing, if any requests were retired and the queue is not empty
* then a new execution list can be submitted. The two requests at the front of
* the queue are next to be submitted but since a context may not occur twice in
* an execution list, if subsequent requests have the same ID as the first then
* the two requests must be combined. This is done simply by discarding requests
* at the head of the queue until either only one requests is left (in which case
* we use a NULL second context) or the first two requests have unique IDs.
*
* By always executing the first two requests in the queue the driver ensures
* that the GPU is kept as busy as possible. In the case where a single context
* completes but a second context is still executing, the request for this second
* context will be at the head of the queue when we remove the first one. This
* request will then be resubmitted along with a new request for a different context,
* which will cause the hardware to continue executing the second request and queue
* the new request (the GPU detects the condition of a context getting preempted
* with the same context and optimizes the context switch flow by not doing
* preemption, but just sampling the new tail pointer).
*
*/
 
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "intel_drv.h"
#include "i915_drv.h"
 
#define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
#define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
#define GEN8_LR_CONTEXT_OTHER_SIZE (2 * PAGE_SIZE)
 
#define RING_EXECLIST_QFULL (1 << 0x2)
#define RING_EXECLIST1_VALID (1 << 0x3)
#define RING_EXECLIST0_VALID (1 << 0x4)
#define RING_EXECLIST_ACTIVE_STATUS (3 << 0xE)
#define RING_EXECLIST1_ACTIVE (1 << 0x11)
#define RING_EXECLIST0_ACTIVE (1 << 0x12)
 
#define GEN8_CTX_STATUS_IDLE_ACTIVE (1 << 0)
#define GEN8_CTX_STATUS_PREEMPTED (1 << 1)
#define GEN8_CTX_STATUS_ELEMENT_SWITCH (1 << 2)
#define GEN8_CTX_STATUS_ACTIVE_IDLE (1 << 3)
#define GEN8_CTX_STATUS_COMPLETE (1 << 4)
#define GEN8_CTX_STATUS_LITE_RESTORE (1 << 15)
 
#define CTX_LRI_HEADER_0 0x01
#define CTX_CONTEXT_CONTROL 0x02
#define CTX_RING_HEAD 0x04
#define CTX_RING_TAIL 0x06
#define CTX_RING_BUFFER_START 0x08
#define CTX_RING_BUFFER_CONTROL 0x0a
#define CTX_BB_HEAD_U 0x0c
#define CTX_BB_HEAD_L 0x0e
#define CTX_BB_STATE 0x10
#define CTX_SECOND_BB_HEAD_U 0x12
#define CTX_SECOND_BB_HEAD_L 0x14
#define CTX_SECOND_BB_STATE 0x16
#define CTX_BB_PER_CTX_PTR 0x18
#define CTX_RCS_INDIRECT_CTX 0x1a
#define CTX_RCS_INDIRECT_CTX_OFFSET 0x1c
#define CTX_LRI_HEADER_1 0x21
#define CTX_CTX_TIMESTAMP 0x22
#define CTX_PDP3_UDW 0x24
#define CTX_PDP3_LDW 0x26
#define CTX_PDP2_UDW 0x28
#define CTX_PDP2_LDW 0x2a
#define CTX_PDP1_UDW 0x2c
#define CTX_PDP1_LDW 0x2e
#define CTX_PDP0_UDW 0x30
#define CTX_PDP0_LDW 0x32
#define CTX_LRI_HEADER_2 0x41
#define CTX_R_PWR_CLK_STATE 0x42
#define CTX_GPGPU_CSR_BASE_ADDRESS 0x44
 
#define GEN8_CTX_VALID (1<<0)
#define GEN8_CTX_FORCE_PD_RESTORE (1<<1)
#define GEN8_CTX_FORCE_RESTORE (1<<2)
#define GEN8_CTX_L3LLC_COHERENT (1<<5)
#define GEN8_CTX_PRIVILEGE (1<<8)
enum {
ADVANCED_CONTEXT = 0,
LEGACY_CONTEXT,
ADVANCED_AD_CONTEXT,
LEGACY_64B_CONTEXT
};
#define GEN8_CTX_MODE_SHIFT 3
enum {
FAULT_AND_HANG = 0,
FAULT_AND_HALT, /* Debug only */
FAULT_AND_STREAM,
FAULT_AND_CONTINUE /* Unsupported */
};
#define GEN8_CTX_ID_SHIFT 32
 
static int intel_lr_context_pin(struct intel_engine_cs *ring,
struct intel_context *ctx);
 
/**
* intel_sanitize_enable_execlists() - sanitize i915.enable_execlists
* @dev: DRM device.
* @enable_execlists: value of i915.enable_execlists module parameter.
*
* Only certain platforms support Execlists (the prerequisites being
* support for Logical Ring Contexts and Aliasing PPGTT or better),
* and only when enabled via module parameter.
*
* Return: 1 if Execlists is supported and has to be enabled.
*/
int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists)
{
WARN_ON(i915.enable_ppgtt == -1);
 
if (INTEL_INFO(dev)->gen >= 9)
return 1;
 
if (enable_execlists == 0)
return 0;
 
if (HAS_LOGICAL_RING_CONTEXTS(dev) && USES_PPGTT(dev) &&
i915.use_mmio_flip >= 0)
return 1;
 
return 0;
}
 
/**
* intel_execlists_ctx_id() - get the Execlists Context ID
* @ctx_obj: Logical Ring Context backing object.
*
* Do not confuse with ctx->id! Unfortunately we have a name overload
* here: the old context ID we pass to userspace as a handler so that
* they can refer to a context, and the new context ID we pass to the
* ELSP so that the GPU can inform us of the context status via
* interrupts.
*
* Return: 20-bits globally unique context ID.
*/
u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj)
{
u32 lrca = i915_gem_obj_ggtt_offset(ctx_obj);
 
/* LRCA is required to be 4K aligned so the more significant 20 bits
* are globally unique */
return lrca >> 12;
}
 
static uint64_t execlists_ctx_descriptor(struct drm_i915_gem_object *ctx_obj)
{
uint64_t desc;
uint64_t lrca = i915_gem_obj_ggtt_offset(ctx_obj);
 
WARN_ON(lrca & 0xFFFFFFFF00000FFFULL);
 
desc = GEN8_CTX_VALID;
desc |= LEGACY_CONTEXT << GEN8_CTX_MODE_SHIFT;
desc |= GEN8_CTX_L3LLC_COHERENT;
desc |= GEN8_CTX_PRIVILEGE;
desc |= lrca;
desc |= (u64)intel_execlists_ctx_id(ctx_obj) << GEN8_CTX_ID_SHIFT;
 
/* TODO: WaDisableLiteRestore when we start using semaphore
* signalling between Command Streamers */
/* desc |= GEN8_CTX_FORCE_RESTORE; */
 
return desc;
}
 
static void execlists_elsp_write(struct intel_engine_cs *ring,
struct drm_i915_gem_object *ctx_obj0,
struct drm_i915_gem_object *ctx_obj1)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint64_t temp = 0;
uint32_t desc[4];
unsigned long flags;
 
/* XXX: You must always write both descriptors in the order below. */
if (ctx_obj1)
temp = execlists_ctx_descriptor(ctx_obj1);
else
temp = 0;
desc[1] = (u32)(temp >> 32);
desc[0] = (u32)temp;
 
temp = execlists_ctx_descriptor(ctx_obj0);
desc[3] = (u32)(temp >> 32);
desc[2] = (u32)temp;
 
/* Set Force Wakeup bit to prevent GT from entering C6 while ELSP writes
* are in progress.
*
* The other problem is that we can't just call gen6_gt_force_wake_get()
* because that function calls intel_runtime_pm_get(), which might sleep.
* Instead, we do the runtime_pm_get/put when creating/destroying requests.
*/
spin_lock_irqsave(&dev_priv->uncore.lock, flags);
if (IS_CHERRYVIEW(dev) || INTEL_INFO(dev)->gen >= 9) {
if (dev_priv->uncore.fw_rendercount++ == 0)
dev_priv->uncore.funcs.force_wake_get(dev_priv,
FORCEWAKE_RENDER);
if (dev_priv->uncore.fw_mediacount++ == 0)
dev_priv->uncore.funcs.force_wake_get(dev_priv,
FORCEWAKE_MEDIA);
if (INTEL_INFO(dev)->gen >= 9) {
if (dev_priv->uncore.fw_blittercount++ == 0)
dev_priv->uncore.funcs.force_wake_get(dev_priv,
FORCEWAKE_BLITTER);
}
} else {
if (dev_priv->uncore.forcewake_count++ == 0)
dev_priv->uncore.funcs.force_wake_get(dev_priv,
FORCEWAKE_ALL);
}
spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
 
I915_WRITE(RING_ELSP(ring), desc[1]);
I915_WRITE(RING_ELSP(ring), desc[0]);
I915_WRITE(RING_ELSP(ring), desc[3]);
/* The context is automatically loaded after the following */
I915_WRITE(RING_ELSP(ring), desc[2]);
 
/* ELSP is a wo register, so use another nearby reg for posting instead */
POSTING_READ(RING_EXECLIST_STATUS(ring));
 
/* Release Force Wakeup (see the big comment above). */
spin_lock_irqsave(&dev_priv->uncore.lock, flags);
if (IS_CHERRYVIEW(dev) || INTEL_INFO(dev)->gen >= 9) {
if (--dev_priv->uncore.fw_rendercount == 0)
dev_priv->uncore.funcs.force_wake_put(dev_priv,
FORCEWAKE_RENDER);
if (--dev_priv->uncore.fw_mediacount == 0)
dev_priv->uncore.funcs.force_wake_put(dev_priv,
FORCEWAKE_MEDIA);
if (INTEL_INFO(dev)->gen >= 9) {
if (--dev_priv->uncore.fw_blittercount == 0)
dev_priv->uncore.funcs.force_wake_put(dev_priv,
FORCEWAKE_BLITTER);
}
} else {
if (--dev_priv->uncore.forcewake_count == 0)
dev_priv->uncore.funcs.force_wake_put(dev_priv,
FORCEWAKE_ALL);
}
 
spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
}
 
static int execlists_update_context(struct drm_i915_gem_object *ctx_obj,
struct drm_i915_gem_object *ring_obj,
u32 tail)
{
struct page *page;
uint32_t *reg_state;
 
page = i915_gem_object_get_page(ctx_obj, 1);
reg_state = kmap_atomic(page);
 
reg_state[CTX_RING_TAIL+1] = tail;
reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(ring_obj);
 
kunmap_atomic(reg_state);
 
return 0;
}
 
static void execlists_submit_contexts(struct intel_engine_cs *ring,
struct intel_context *to0, u32 tail0,
struct intel_context *to1, u32 tail1)
{
struct drm_i915_gem_object *ctx_obj0 = to0->engine[ring->id].state;
struct intel_ringbuffer *ringbuf0 = to0->engine[ring->id].ringbuf;
struct drm_i915_gem_object *ctx_obj1 = NULL;
struct intel_ringbuffer *ringbuf1 = NULL;
 
BUG_ON(!ctx_obj0);
WARN_ON(!i915_gem_obj_is_pinned(ctx_obj0));
WARN_ON(!i915_gem_obj_is_pinned(ringbuf0->obj));
 
execlists_update_context(ctx_obj0, ringbuf0->obj, tail0);
 
if (to1) {
ringbuf1 = to1->engine[ring->id].ringbuf;
ctx_obj1 = to1->engine[ring->id].state;
BUG_ON(!ctx_obj1);
WARN_ON(!i915_gem_obj_is_pinned(ctx_obj1));
WARN_ON(!i915_gem_obj_is_pinned(ringbuf1->obj));
 
execlists_update_context(ctx_obj1, ringbuf1->obj, tail1);
}
 
execlists_elsp_write(ring, ctx_obj0, ctx_obj1);
}
 
static void execlists_context_unqueue(struct intel_engine_cs *ring)
{
struct intel_ctx_submit_request *req0 = NULL, *req1 = NULL;
struct intel_ctx_submit_request *cursor = NULL, *tmp = NULL;
 
assert_spin_locked(&ring->execlist_lock);
 
if (list_empty(&ring->execlist_queue))
return;
 
/* Try to read in pairs */
list_for_each_entry_safe(cursor, tmp, &ring->execlist_queue,
execlist_link) {
if (!req0) {
req0 = cursor;
} else if (req0->ctx == cursor->ctx) {
/* Same ctx: ignore first request, as second request
* will update tail past first request's workload */
cursor->elsp_submitted = req0->elsp_submitted;
list_del(&req0->execlist_link);
list_add_tail(&req0->execlist_link,
&ring->execlist_retired_req_list);
req0 = cursor;
} else {
req1 = cursor;
break;
}
}
 
WARN_ON(req1 && req1->elsp_submitted);
 
execlists_submit_contexts(ring, req0->ctx, req0->tail,
req1 ? req1->ctx : NULL,
req1 ? req1->tail : 0);
 
req0->elsp_submitted++;
if (req1)
req1->elsp_submitted++;
}
 
static bool execlists_check_remove_request(struct intel_engine_cs *ring,
u32 request_id)
{
struct intel_ctx_submit_request *head_req;
 
assert_spin_locked(&ring->execlist_lock);
 
head_req = list_first_entry_or_null(&ring->execlist_queue,
struct intel_ctx_submit_request,
execlist_link);
 
if (head_req != NULL) {
struct drm_i915_gem_object *ctx_obj =
head_req->ctx->engine[ring->id].state;
if (intel_execlists_ctx_id(ctx_obj) == request_id) {
WARN(head_req->elsp_submitted == 0,
"Never submitted head request\n");
 
if (--head_req->elsp_submitted <= 0) {
list_del(&head_req->execlist_link);
list_add_tail(&head_req->execlist_link,
&ring->execlist_retired_req_list);
return true;
}
}
}
 
return false;
}
 
/**
* intel_execlists_handle_ctx_events() - handle Context Switch interrupts
* @ring: Engine Command Streamer to handle.
*
* Check the unread Context Status Buffers and manage the submission of new
* contexts to the ELSP accordingly.
*/
void intel_execlists_handle_ctx_events(struct intel_engine_cs *ring)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
u32 status_pointer;
u8 read_pointer;
u8 write_pointer;
u32 status;
u32 status_id;
u32 submit_contexts = 0;
 
status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring));
 
read_pointer = ring->next_context_status_buffer;
write_pointer = status_pointer & 0x07;
if (read_pointer > write_pointer)
write_pointer += 6;
 
spin_lock(&ring->execlist_lock);
 
while (read_pointer < write_pointer) {
read_pointer++;
status = I915_READ(RING_CONTEXT_STATUS_BUF(ring) +
(read_pointer % 6) * 8);
status_id = I915_READ(RING_CONTEXT_STATUS_BUF(ring) +
(read_pointer % 6) * 8 + 4);
 
if (status & GEN8_CTX_STATUS_PREEMPTED) {
if (status & GEN8_CTX_STATUS_LITE_RESTORE) {
if (execlists_check_remove_request(ring, status_id))
WARN(1, "Lite Restored request removed from queue\n");
} else
WARN(1, "Preemption without Lite Restore\n");
}
 
if ((status & GEN8_CTX_STATUS_ACTIVE_IDLE) ||
(status & GEN8_CTX_STATUS_ELEMENT_SWITCH)) {
if (execlists_check_remove_request(ring, status_id))
submit_contexts++;
}
}
 
if (submit_contexts != 0)
execlists_context_unqueue(ring);
 
spin_unlock(&ring->execlist_lock);
 
WARN(submit_contexts > 2, "More than two context complete events?\n");
ring->next_context_status_buffer = write_pointer % 6;
 
I915_WRITE(RING_CONTEXT_STATUS_PTR(ring),
((u32)ring->next_context_status_buffer & 0x07) << 8);
}
 
static int execlists_context_queue(struct intel_engine_cs *ring,
struct intel_context *to,
u32 tail)
{
struct intel_ctx_submit_request *req = NULL, *cursor;
struct drm_i915_private *dev_priv = ring->dev->dev_private;
unsigned long flags;
int num_elements = 0;
 
req = kzalloc(sizeof(*req), GFP_KERNEL);
if (req == NULL)
return -ENOMEM;
req->ctx = to;
i915_gem_context_reference(req->ctx);
 
if (to != ring->default_context)
intel_lr_context_pin(ring, to);
 
req->ring = ring;
req->tail = tail;
 
intel_runtime_pm_get(dev_priv);
 
spin_lock_irqsave(&ring->execlist_lock, flags);
 
list_for_each_entry(cursor, &ring->execlist_queue, execlist_link)
if (++num_elements > 2)
break;
 
if (num_elements > 2) {
struct intel_ctx_submit_request *tail_req;
 
tail_req = list_last_entry(&ring->execlist_queue,
struct intel_ctx_submit_request,
execlist_link);
 
if (to == tail_req->ctx) {
WARN(tail_req->elsp_submitted != 0,
"More than 2 already-submitted reqs queued\n");
list_del(&tail_req->execlist_link);
list_add_tail(&tail_req->execlist_link,
&ring->execlist_retired_req_list);
}
}
 
list_add_tail(&req->execlist_link, &ring->execlist_queue);
if (num_elements == 0)
execlists_context_unqueue(ring);
 
spin_unlock_irqrestore(&ring->execlist_lock, flags);
 
return 0;
}
 
static int logical_ring_invalidate_all_caches(struct intel_ringbuffer *ringbuf)
{
struct intel_engine_cs *ring = ringbuf->ring;
uint32_t flush_domains;
int ret;
 
flush_domains = 0;
if (ring->gpu_caches_dirty)
flush_domains = I915_GEM_GPU_DOMAINS;
 
ret = ring->emit_flush(ringbuf, I915_GEM_GPU_DOMAINS, flush_domains);
if (ret)
return ret;
 
ring->gpu_caches_dirty = false;
return 0;
}
 
static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
struct list_head *vmas)
{
struct intel_engine_cs *ring = ringbuf->ring;
struct i915_vma *vma;
uint32_t flush_domains = 0;
bool flush_chipset = false;
int ret;
 
list_for_each_entry(vma, vmas, exec_list) {
struct drm_i915_gem_object *obj = vma->obj;
 
ret = i915_gem_object_sync(obj, ring);
if (ret)
return ret;
 
if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
flush_chipset |= i915_gem_clflush_object(obj, false);
 
flush_domains |= obj->base.write_domain;
}
 
if (flush_domains & I915_GEM_DOMAIN_GTT)
wmb();
 
/* Unconditionally invalidate gpu caches and ensure that we do flush
* any residual writes from the previous batch.
*/
return logical_ring_invalidate_all_caches(ringbuf);
}
 
/**
* execlists_submission() - submit a batchbuffer for execution, Execlists style
* @dev: DRM device.
* @file: DRM file.
* @ring: Engine Command Streamer to submit to.
* @ctx: Context to employ for this submission.
* @args: execbuffer call arguments.
* @vmas: list of vmas.
* @batch_obj: the batchbuffer to submit.
* @exec_start: batchbuffer start virtual address pointer.
* @flags: translated execbuffer call flags.
*
* This is the evil twin version of i915_gem_ringbuffer_submission. It abstracts
* away the submission details of the execbuffer ioctl call.
*
* Return: non-zero if the submission fails.
*/
int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
struct intel_engine_cs *ring,
struct intel_context *ctx,
struct drm_i915_gem_execbuffer2 *args,
struct list_head *vmas,
struct drm_i915_gem_object *batch_obj,
u64 exec_start, u32 flags)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
int instp_mode;
u32 instp_mask;
int ret;
 
instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
instp_mask = I915_EXEC_CONSTANTS_MASK;
switch (instp_mode) {
case I915_EXEC_CONSTANTS_REL_GENERAL:
case I915_EXEC_CONSTANTS_ABSOLUTE:
case I915_EXEC_CONSTANTS_REL_SURFACE:
if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) {
DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
return -EINVAL;
}
 
if (instp_mode != dev_priv->relative_constants_mode) {
if (instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
return -EINVAL;
}
 
/* The HW changed the meaning on this bit on gen6 */
instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
}
break;
default:
DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
return -EINVAL;
}
 
if (args->num_cliprects != 0) {
DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
return -EINVAL;
} else {
if (args->DR4 == 0xffffffff) {
DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
args->DR4 = 0;
}
 
if (args->DR1 || args->DR4 || args->cliprects_ptr) {
DRM_DEBUG("0 cliprects but dirt in cliprects fields\n");
return -EINVAL;
}
}
 
if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
DRM_DEBUG("sol reset is gen7 only\n");
return -EINVAL;
}
 
ret = execlists_move_to_gpu(ringbuf, vmas);
if (ret)
return ret;
 
if (ring == &dev_priv->ring[RCS] &&
instp_mode != dev_priv->relative_constants_mode) {
ret = intel_logical_ring_begin(ringbuf, 4);
if (ret)
return ret;
 
intel_logical_ring_emit(ringbuf, MI_NOOP);
intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(1));
intel_logical_ring_emit(ringbuf, INSTPM);
intel_logical_ring_emit(ringbuf, instp_mask << 16 | instp_mode);
intel_logical_ring_advance(ringbuf);
 
dev_priv->relative_constants_mode = instp_mode;
}
 
ret = ring->emit_bb_start(ringbuf, exec_start, flags);
if (ret)
return ret;
 
i915_gem_execbuffer_move_to_active(vmas, ring);
i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
 
return 0;
}
 
void intel_execlists_retire_requests(struct intel_engine_cs *ring)
{
struct intel_ctx_submit_request *req, *tmp;
struct drm_i915_private *dev_priv = ring->dev->dev_private;
unsigned long flags;
struct list_head retired_list;
 
WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
if (list_empty(&ring->execlist_retired_req_list))
return;
 
INIT_LIST_HEAD(&retired_list);
spin_lock_irqsave(&ring->execlist_lock, flags);
list_replace_init(&ring->execlist_retired_req_list, &retired_list);
spin_unlock_irqrestore(&ring->execlist_lock, flags);
 
list_for_each_entry_safe(req, tmp, &retired_list, execlist_link) {
struct intel_context *ctx = req->ctx;
struct drm_i915_gem_object *ctx_obj =
ctx->engine[ring->id].state;
 
if (ctx_obj && (ctx != ring->default_context))
intel_lr_context_unpin(ring, ctx);
intel_runtime_pm_put(dev_priv);
i915_gem_context_unreference(req->ctx);
list_del(&req->execlist_link);
kfree(req);
}
}
 
void intel_logical_ring_stop(struct intel_engine_cs *ring)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
int ret;
 
if (!intel_ring_initialized(ring))
return;
 
ret = intel_ring_idle(ring);
if (ret && !i915_reset_in_progress(&to_i915(ring->dev)->gpu_error))
DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
ring->name, ret);
 
/* TODO: Is this correct with Execlists enabled? */
I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING));
if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) {
DRM_ERROR("%s :timed out trying to stop ring\n", ring->name);
return;
}
I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING));
}
 
int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf)
{
struct intel_engine_cs *ring = ringbuf->ring;
int ret;
 
if (!ring->gpu_caches_dirty)
return 0;
 
ret = ring->emit_flush(ringbuf, 0, I915_GEM_GPU_DOMAINS);
if (ret)
return ret;
 
ring->gpu_caches_dirty = false;
return 0;
}
 
/**
* intel_logical_ring_advance_and_submit() - advance the tail and submit the workload
* @ringbuf: Logical Ringbuffer to advance.
*
* The tail is updated in our logical ringbuffer struct, not in the actual context. What
* really happens during submission is that the context and current tail will be placed
* on a queue waiting for the ELSP to be ready to accept a new context submission. At that
* point, the tail *inside* the context is updated and the ELSP written to.
*/
void intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf)
{
struct intel_engine_cs *ring = ringbuf->ring;
struct intel_context *ctx = ringbuf->FIXME_lrc_ctx;
 
intel_logical_ring_advance(ringbuf);
 
if (intel_ring_stopped(ring))
return;
 
execlists_context_queue(ring, ctx, ringbuf->tail);
}
 
static int intel_lr_context_pin(struct intel_engine_cs *ring,
struct intel_context *ctx)
{
struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
int ret = 0;
 
WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
if (ctx->engine[ring->id].unpin_count++ == 0) {
ret = i915_gem_obj_ggtt_pin(ctx_obj,
GEN8_LR_CONTEXT_ALIGN, 0);
if (ret)
goto reset_unpin_count;
 
ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf);
if (ret)
goto unpin_ctx_obj;
}
 
return ret;
 
unpin_ctx_obj:
i915_gem_object_ggtt_unpin(ctx_obj);
reset_unpin_count:
ctx->engine[ring->id].unpin_count = 0;
 
return ret;
}
 
void intel_lr_context_unpin(struct intel_engine_cs *ring,
struct intel_context *ctx)
{
struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
 
if (ctx_obj) {
WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
if (--ctx->engine[ring->id].unpin_count == 0) {
intel_unpin_ringbuffer_obj(ringbuf);
i915_gem_object_ggtt_unpin(ctx_obj);
}
}
}
 
static int logical_ring_alloc_seqno(struct intel_engine_cs *ring,
struct intel_context *ctx)
{
int ret;
 
if (ring->outstanding_lazy_seqno)
return 0;
 
if (ring->preallocated_lazy_request == NULL) {
struct drm_i915_gem_request *request;
 
request = kmalloc(sizeof(*request), GFP_KERNEL);
if (request == NULL)
return -ENOMEM;
 
if (ctx != ring->default_context) {
ret = intel_lr_context_pin(ring, ctx);
if (ret) {
kfree(request);
return ret;
}
}
 
/* Hold a reference to the context this request belongs to
* (we will need it when the time comes to emit/retire the
* request).
*/
request->ctx = ctx;
i915_gem_context_reference(request->ctx);
 
ring->preallocated_lazy_request = request;
}
 
return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
}
 
static int logical_ring_wait_request(struct intel_ringbuffer *ringbuf,
int bytes)
{
struct intel_engine_cs *ring = ringbuf->ring;
struct drm_i915_gem_request *request;
u32 seqno = 0;
int ret;
 
if (ringbuf->last_retired_head != -1) {
ringbuf->head = ringbuf->last_retired_head;
ringbuf->last_retired_head = -1;
 
ringbuf->space = intel_ring_space(ringbuf);
if (ringbuf->space >= bytes)
return 0;
}
 
list_for_each_entry(request, &ring->request_list, list) {
if (__intel_ring_space(request->tail, ringbuf->tail,
ringbuf->size) >= bytes) {
seqno = request->seqno;
break;
}
}
 
if (seqno == 0)
return -ENOSPC;
 
ret = i915_wait_seqno(ring, seqno);
if (ret)
return ret;
 
i915_gem_retire_requests_ring(ring);
ringbuf->head = ringbuf->last_retired_head;
ringbuf->last_retired_head = -1;
 
ringbuf->space = intel_ring_space(ringbuf);
return 0;
}
 
static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
int bytes)
{
struct intel_engine_cs *ring = ringbuf->ring;
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long end;
int ret;
 
ret = logical_ring_wait_request(ringbuf, bytes);
if (ret != -ENOSPC)
return ret;
 
/* Force the context submission in case we have been skipping it */
intel_logical_ring_advance_and_submit(ringbuf);
 
/* With GEM the hangcheck timer should kick us out of the loop,
* leaving it early runs the risk of corrupting GEM state (due
* to running on almost untested codepaths). But on resume
* timers don't work yet, so prevent a complete hang in that
* case by choosing an insanely large timeout. */
end = jiffies + 60 * HZ;
 
do {
ringbuf->head = I915_READ_HEAD(ring);
ringbuf->space = intel_ring_space(ringbuf);
if (ringbuf->space >= bytes) {
ret = 0;
break;
}
 
msleep(1);
 
ret = i915_gem_check_wedge(&dev_priv->gpu_error,
dev_priv->mm.interruptible);
if (ret)
break;
 
if (time_after(jiffies, end)) {
ret = -EBUSY;
break;
}
} while (1);
 
return ret;
}
 
static int logical_ring_wrap_buffer(struct intel_ringbuffer *ringbuf)
{
uint32_t __iomem *virt;
int rem = ringbuf->size - ringbuf->tail;
 
if (ringbuf->space < rem) {
int ret = logical_ring_wait_for_space(ringbuf, rem);
 
if (ret)
return ret;
}
 
virt = ringbuf->virtual_start + ringbuf->tail;
rem /= 4;
while (rem--)
iowrite32(MI_NOOP, virt++);
 
ringbuf->tail = 0;
ringbuf->space = intel_ring_space(ringbuf);
 
return 0;
}
 
static int logical_ring_prepare(struct intel_ringbuffer *ringbuf, int bytes)
{
int ret;
 
if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) {
ret = logical_ring_wrap_buffer(ringbuf);
if (unlikely(ret))
return ret;
}
 
if (unlikely(ringbuf->space < bytes)) {
ret = logical_ring_wait_for_space(ringbuf, bytes);
if (unlikely(ret))
return ret;
}
 
return 0;
}
 
/**
* intel_logical_ring_begin() - prepare the logical ringbuffer to accept some commands
*
* @ringbuf: Logical ringbuffer.
* @num_dwords: number of DWORDs that we plan to write to the ringbuffer.
*
* The ringbuffer might not be ready to accept the commands right away (maybe it needs to
* be wrapped, or wait a bit for the tail to be updated). This function takes care of that
* and also preallocates a request (every workload submission is still mediated through
* requests, same as it did with legacy ringbuffer submission).
*
* Return: non-zero if the ringbuffer is not ready to be written to.
*/
int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf, int num_dwords)
{
struct intel_engine_cs *ring = ringbuf->ring;
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
 
ret = i915_gem_check_wedge(&dev_priv->gpu_error,
dev_priv->mm.interruptible);
if (ret)
return ret;
 
ret = logical_ring_prepare(ringbuf, num_dwords * sizeof(uint32_t));
if (ret)
return ret;
 
/* Preallocate the olr before touching the ring */
ret = logical_ring_alloc_seqno(ring, ringbuf->FIXME_lrc_ctx);
if (ret)
return ret;
 
ringbuf->space -= num_dwords * sizeof(uint32_t);
return 0;
}
 
static int intel_logical_ring_workarounds_emit(struct intel_engine_cs *ring,
struct intel_context *ctx)
{
int ret, i;
struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_workarounds *w = &dev_priv->workarounds;
 
if (WARN_ON(w->count == 0))
return 0;
 
ring->gpu_caches_dirty = true;
ret = logical_ring_flush_all_caches(ringbuf);
if (ret)
return ret;
 
ret = intel_logical_ring_begin(ringbuf, w->count * 2 + 2);
if (ret)
return ret;
 
intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(w->count));
for (i = 0; i < w->count; i++) {
intel_logical_ring_emit(ringbuf, w->reg[i].addr);
intel_logical_ring_emit(ringbuf, w->reg[i].value);
}
intel_logical_ring_emit(ringbuf, MI_NOOP);
 
intel_logical_ring_advance(ringbuf);
 
ring->gpu_caches_dirty = true;
ret = logical_ring_flush_all_caches(ringbuf);
if (ret)
return ret;
 
return 0;
}
 
static int gen8_init_common_ring(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff);
 
I915_WRITE(RING_MODE_GEN7(ring),
_MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
_MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
POSTING_READ(RING_MODE_GEN7(ring));
DRM_DEBUG_DRIVER("Execlists enabled for %s\n", ring->name);
 
memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
 
return 0;
}
 
static int gen8_init_render_ring(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
 
ret = gen8_init_common_ring(ring);
if (ret)
return ret;
 
/* We need to disable the AsyncFlip performance optimisations in order
* to use MI_WAIT_FOR_EVENT within the CS. It should already be
* programmed to '1' on all products.
*
* WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
*/
I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
 
ret = intel_init_pipe_control(ring);
if (ret)
return ret;
 
I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
 
return init_workarounds_ring(ring);
}
 
static int gen8_emit_bb_start(struct intel_ringbuffer *ringbuf,
u64 offset, unsigned flags)
{
bool ppgtt = !(flags & I915_DISPATCH_SECURE);
int ret;
 
ret = intel_logical_ring_begin(ringbuf, 4);
if (ret)
return ret;
 
/* FIXME(BDW): Address space and security selectors. */
intel_logical_ring_emit(ringbuf, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8));
intel_logical_ring_emit(ringbuf, lower_32_bits(offset));
intel_logical_ring_emit(ringbuf, upper_32_bits(offset));
intel_logical_ring_emit(ringbuf, MI_NOOP);
intel_logical_ring_advance(ringbuf);
 
return 0;
}
 
static bool gen8_logical_ring_get_irq(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
 
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return false;
 
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (ring->irq_refcount++ == 0) {
I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
POSTING_READ(RING_IMR(ring->mmio_base));
}
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
return true;
}
 
static void gen8_logical_ring_put_irq(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
 
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (--ring->irq_refcount == 0) {
I915_WRITE_IMR(ring, ~ring->irq_keep_mask);
POSTING_READ(RING_IMR(ring->mmio_base));
}
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
}
 
static int gen8_emit_flush(struct intel_ringbuffer *ringbuf,
u32 invalidate_domains,
u32 unused)
{
struct intel_engine_cs *ring = ringbuf->ring;
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t cmd;
int ret;
 
ret = intel_logical_ring_begin(ringbuf, 4);
if (ret)
return ret;
 
cmd = MI_FLUSH_DW + 1;
 
if (ring == &dev_priv->ring[VCS]) {
if (invalidate_domains & I915_GEM_GPU_DOMAINS)
cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD |
MI_FLUSH_DW_STORE_INDEX |
MI_FLUSH_DW_OP_STOREDW;
} else {
if (invalidate_domains & I915_GEM_DOMAIN_RENDER)
cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX |
MI_FLUSH_DW_OP_STOREDW;
}
 
intel_logical_ring_emit(ringbuf, cmd);
intel_logical_ring_emit(ringbuf,
I915_GEM_HWS_SCRATCH_ADDR |
MI_FLUSH_DW_USE_GTT);
intel_logical_ring_emit(ringbuf, 0); /* upper addr */
intel_logical_ring_emit(ringbuf, 0); /* value */
intel_logical_ring_advance(ringbuf);
 
return 0;
}
 
static int gen8_emit_flush_render(struct intel_ringbuffer *ringbuf,
u32 invalidate_domains,
u32 flush_domains)
{
struct intel_engine_cs *ring = ringbuf->ring;
u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
u32 flags = 0;
int ret;
 
flags |= PIPE_CONTROL_CS_STALL;
 
if (flush_domains) {
flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
}
 
if (invalidate_domains) {
flags |= PIPE_CONTROL_TLB_INVALIDATE;
flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_QW_WRITE;
flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
}
 
ret = intel_logical_ring_begin(ringbuf, 6);
if (ret)
return ret;
 
intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
intel_logical_ring_emit(ringbuf, flags);
intel_logical_ring_emit(ringbuf, scratch_addr);
intel_logical_ring_emit(ringbuf, 0);
intel_logical_ring_emit(ringbuf, 0);
intel_logical_ring_emit(ringbuf, 0);
intel_logical_ring_advance(ringbuf);
 
return 0;
}
 
static u32 gen8_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
{
return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
}
 
static void gen8_set_seqno(struct intel_engine_cs *ring, u32 seqno)
{
intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
}
 
static int gen8_emit_request(struct intel_ringbuffer *ringbuf)
{
struct intel_engine_cs *ring = ringbuf->ring;
u32 cmd;
int ret;
 
ret = intel_logical_ring_begin(ringbuf, 6);
if (ret)
return ret;
 
cmd = MI_STORE_DWORD_IMM_GEN8;
cmd |= MI_GLOBAL_GTT;
 
intel_logical_ring_emit(ringbuf, cmd);
intel_logical_ring_emit(ringbuf,
(ring->status_page.gfx_addr +
(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)));
intel_logical_ring_emit(ringbuf, 0);
intel_logical_ring_emit(ringbuf, ring->outstanding_lazy_seqno);
intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
intel_logical_ring_emit(ringbuf, MI_NOOP);
intel_logical_ring_advance_and_submit(ringbuf);
 
return 0;
}
 
/**
* intel_logical_ring_cleanup() - deallocate the Engine Command Streamer
*
* @ring: Engine Command Streamer.
*
*/
void intel_logical_ring_cleanup(struct intel_engine_cs *ring)
{
struct drm_i915_private *dev_priv;
 
if (!intel_ring_initialized(ring))
return;
 
dev_priv = ring->dev->dev_private;
 
intel_logical_ring_stop(ring);
WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
ring->preallocated_lazy_request = NULL;
ring->outstanding_lazy_seqno = 0;
 
if (ring->cleanup)
ring->cleanup(ring);
 
i915_cmd_parser_fini_ring(ring);
 
if (ring->status_page.obj) {
kunmap(sg_page(ring->status_page.obj->pages->sgl));
ring->status_page.obj = NULL;
}
}
 
static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring)
{
int ret;
 
/* Intentionally left blank. */
ring->buffer = NULL;
 
ring->dev = dev;
INIT_LIST_HEAD(&ring->active_list);
INIT_LIST_HEAD(&ring->request_list);
init_waitqueue_head(&ring->irq_queue);
 
INIT_LIST_HEAD(&ring->execlist_queue);
INIT_LIST_HEAD(&ring->execlist_retired_req_list);
spin_lock_init(&ring->execlist_lock);
ring->next_context_status_buffer = 0;
 
ret = i915_cmd_parser_init_ring(ring);
if (ret)
return ret;
 
if (ring->init) {
ret = ring->init(ring);
if (ret)
return ret;
}
 
ret = intel_lr_context_deferred_create(ring->default_context, ring);
 
return ret;
}
 
static int logical_render_ring_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring = &dev_priv->ring[RCS];
 
ring->name = "render ring";
ring->id = RCS;
ring->mmio_base = RENDER_RING_BASE;
ring->irq_enable_mask =
GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT;
ring->irq_keep_mask =
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT;
if (HAS_L3_DPF(dev))
ring->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
 
ring->init = gen8_init_render_ring;
ring->init_context = intel_logical_ring_workarounds_emit;
ring->cleanup = intel_fini_pipe_control;
ring->get_seqno = gen8_get_seqno;
ring->set_seqno = gen8_set_seqno;
ring->emit_request = gen8_emit_request;
ring->emit_flush = gen8_emit_flush_render;
ring->irq_get = gen8_logical_ring_get_irq;
ring->irq_put = gen8_logical_ring_put_irq;
ring->emit_bb_start = gen8_emit_bb_start;
 
return logical_ring_init(dev, ring);
}
 
static int logical_bsd_ring_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring = &dev_priv->ring[VCS];
 
ring->name = "bsd ring";
ring->id = VCS;
ring->mmio_base = GEN6_BSD_RING_BASE;
ring->irq_enable_mask =
GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
ring->irq_keep_mask =
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
 
ring->init = gen8_init_common_ring;
ring->get_seqno = gen8_get_seqno;
ring->set_seqno = gen8_set_seqno;
ring->emit_request = gen8_emit_request;
ring->emit_flush = gen8_emit_flush;
ring->irq_get = gen8_logical_ring_get_irq;
ring->irq_put = gen8_logical_ring_put_irq;
ring->emit_bb_start = gen8_emit_bb_start;
 
return logical_ring_init(dev, ring);
}
 
static int logical_bsd2_ring_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring = &dev_priv->ring[VCS2];
 
ring->name = "bds2 ring";
ring->id = VCS2;
ring->mmio_base = GEN8_BSD2_RING_BASE;
ring->irq_enable_mask =
GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
ring->irq_keep_mask =
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
 
ring->init = gen8_init_common_ring;
ring->get_seqno = gen8_get_seqno;
ring->set_seqno = gen8_set_seqno;
ring->emit_request = gen8_emit_request;
ring->emit_flush = gen8_emit_flush;
ring->irq_get = gen8_logical_ring_get_irq;
ring->irq_put = gen8_logical_ring_put_irq;
ring->emit_bb_start = gen8_emit_bb_start;
 
return logical_ring_init(dev, ring);
}
 
static int logical_blt_ring_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring = &dev_priv->ring[BCS];
 
ring->name = "blitter ring";
ring->id = BCS;
ring->mmio_base = BLT_RING_BASE;
ring->irq_enable_mask =
GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
ring->irq_keep_mask =
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
 
ring->init = gen8_init_common_ring;
ring->get_seqno = gen8_get_seqno;
ring->set_seqno = gen8_set_seqno;
ring->emit_request = gen8_emit_request;
ring->emit_flush = gen8_emit_flush;
ring->irq_get = gen8_logical_ring_get_irq;
ring->irq_put = gen8_logical_ring_put_irq;
ring->emit_bb_start = gen8_emit_bb_start;
 
return logical_ring_init(dev, ring);
}
 
static int logical_vebox_ring_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring = &dev_priv->ring[VECS];
 
ring->name = "video enhancement ring";
ring->id = VECS;
ring->mmio_base = VEBOX_RING_BASE;
ring->irq_enable_mask =
GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
ring->irq_keep_mask =
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
 
ring->init = gen8_init_common_ring;
ring->get_seqno = gen8_get_seqno;
ring->set_seqno = gen8_set_seqno;
ring->emit_request = gen8_emit_request;
ring->emit_flush = gen8_emit_flush;
ring->irq_get = gen8_logical_ring_get_irq;
ring->irq_put = gen8_logical_ring_put_irq;
ring->emit_bb_start = gen8_emit_bb_start;
 
return logical_ring_init(dev, ring);
}
 
/**
* intel_logical_rings_init() - allocate, populate and init the Engine Command Streamers
* @dev: DRM device.
*
* This function inits the engines for an Execlists submission style (the equivalent in the
* legacy ringbuffer submission world would be i915_gem_init_rings). It does it only for
* those engines that are present in the hardware.
*
* Return: non-zero if the initialization failed.
*/
int intel_logical_rings_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
 
ret = logical_render_ring_init(dev);
if (ret)
return ret;
 
if (HAS_BSD(dev)) {
ret = logical_bsd_ring_init(dev);
if (ret)
goto cleanup_render_ring;
}
 
if (HAS_BLT(dev)) {
ret = logical_blt_ring_init(dev);
if (ret)
goto cleanup_bsd_ring;
}
 
if (HAS_VEBOX(dev)) {
ret = logical_vebox_ring_init(dev);
if (ret)
goto cleanup_blt_ring;
}
 
if (HAS_BSD2(dev)) {
ret = logical_bsd2_ring_init(dev);
if (ret)
goto cleanup_vebox_ring;
}
 
ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
if (ret)
goto cleanup_bsd2_ring;
 
return 0;
 
cleanup_bsd2_ring:
intel_logical_ring_cleanup(&dev_priv->ring[VCS2]);
cleanup_vebox_ring:
intel_logical_ring_cleanup(&dev_priv->ring[VECS]);
cleanup_blt_ring:
intel_logical_ring_cleanup(&dev_priv->ring[BCS]);
cleanup_bsd_ring:
intel_logical_ring_cleanup(&dev_priv->ring[VCS]);
cleanup_render_ring:
intel_logical_ring_cleanup(&dev_priv->ring[RCS]);
 
return ret;
}
 
int intel_lr_context_render_state_init(struct intel_engine_cs *ring,
struct intel_context *ctx)
{
struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
struct render_state so;
struct drm_i915_file_private *file_priv = ctx->file_priv;
struct drm_file *file = file_priv ? file_priv->file : NULL;
int ret;
 
ret = i915_gem_render_state_prepare(ring, &so);
if (ret)
return ret;
 
if (so.rodata == NULL)
return 0;
 
ret = ring->emit_bb_start(ringbuf,
so.ggtt_offset,
I915_DISPATCH_SECURE);
if (ret)
goto out;
 
i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring);
 
ret = __i915_add_request(ring, file, so.obj, NULL);
/* intel_logical_ring_add_request moves object to inactive if it
* fails */
out:
i915_gem_render_state_fini(&so);
return ret;
}
 
static int
populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_obj,
struct intel_engine_cs *ring, struct intel_ringbuffer *ringbuf)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
struct page *page;
uint32_t *reg_state;
int ret;
 
if (!ppgtt)
ppgtt = dev_priv->mm.aliasing_ppgtt;
 
ret = i915_gem_object_set_to_cpu_domain(ctx_obj, true);
if (ret) {
DRM_DEBUG_DRIVER("Could not set to CPU domain\n");
return ret;
}
 
ret = i915_gem_object_get_pages(ctx_obj);
if (ret) {
DRM_DEBUG_DRIVER("Could not get object pages\n");
return ret;
}
 
i915_gem_object_pin_pages(ctx_obj);
 
/* The second page of the context object contains some fields which must
* be set up prior to the first execution. */
page = i915_gem_object_get_page(ctx_obj, 1);
reg_state = kmap_atomic(page);
 
/* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM
* commands followed by (reg, value) pairs. The values we are setting here are
* only for the first context restore: on a subsequent save, the GPU will
* recreate this batchbuffer with new values (including all the missing
* MI_LOAD_REGISTER_IMM commands that we are not initializing here). */
if (ring->id == RCS)
reg_state[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(14);
else
reg_state[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(11);
reg_state[CTX_LRI_HEADER_0] |= MI_LRI_FORCE_POSTED;
reg_state[CTX_CONTEXT_CONTROL] = RING_CONTEXT_CONTROL(ring);
reg_state[CTX_CONTEXT_CONTROL+1] =
_MASKED_BIT_ENABLE((1<<3) | MI_RESTORE_INHIBIT);
reg_state[CTX_RING_HEAD] = RING_HEAD(ring->mmio_base);
reg_state[CTX_RING_HEAD+1] = 0;
reg_state[CTX_RING_TAIL] = RING_TAIL(ring->mmio_base);
reg_state[CTX_RING_TAIL+1] = 0;
reg_state[CTX_RING_BUFFER_START] = RING_START(ring->mmio_base);
/* Ring buffer start address is not known until the buffer is pinned.
* It is written to the context image in execlists_update_context()
*/
reg_state[CTX_RING_BUFFER_CONTROL] = RING_CTL(ring->mmio_base);
reg_state[CTX_RING_BUFFER_CONTROL+1] =
((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID;
reg_state[CTX_BB_HEAD_U] = ring->mmio_base + 0x168;
reg_state[CTX_BB_HEAD_U+1] = 0;
reg_state[CTX_BB_HEAD_L] = ring->mmio_base + 0x140;
reg_state[CTX_BB_HEAD_L+1] = 0;
reg_state[CTX_BB_STATE] = ring->mmio_base + 0x110;
reg_state[CTX_BB_STATE+1] = (1<<5);
reg_state[CTX_SECOND_BB_HEAD_U] = ring->mmio_base + 0x11c;
reg_state[CTX_SECOND_BB_HEAD_U+1] = 0;
reg_state[CTX_SECOND_BB_HEAD_L] = ring->mmio_base + 0x114;
reg_state[CTX_SECOND_BB_HEAD_L+1] = 0;
reg_state[CTX_SECOND_BB_STATE] = ring->mmio_base + 0x118;
reg_state[CTX_SECOND_BB_STATE+1] = 0;
if (ring->id == RCS) {
/* TODO: according to BSpec, the register state context
* for CHV does not have these. OTOH, these registers do
* exist in CHV. I'm waiting for a clarification */
reg_state[CTX_BB_PER_CTX_PTR] = ring->mmio_base + 0x1c0;
reg_state[CTX_BB_PER_CTX_PTR+1] = 0;
reg_state[CTX_RCS_INDIRECT_CTX] = ring->mmio_base + 0x1c4;
reg_state[CTX_RCS_INDIRECT_CTX+1] = 0;
reg_state[CTX_RCS_INDIRECT_CTX_OFFSET] = ring->mmio_base + 0x1c8;
reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] = 0;
}
reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9);
reg_state[CTX_LRI_HEADER_1] |= MI_LRI_FORCE_POSTED;
reg_state[CTX_CTX_TIMESTAMP] = ring->mmio_base + 0x3a8;
reg_state[CTX_CTX_TIMESTAMP+1] = 0;
reg_state[CTX_PDP3_UDW] = GEN8_RING_PDP_UDW(ring, 3);
reg_state[CTX_PDP3_LDW] = GEN8_RING_PDP_LDW(ring, 3);
reg_state[CTX_PDP2_UDW] = GEN8_RING_PDP_UDW(ring, 2);
reg_state[CTX_PDP2_LDW] = GEN8_RING_PDP_LDW(ring, 2);
reg_state[CTX_PDP1_UDW] = GEN8_RING_PDP_UDW(ring, 1);
reg_state[CTX_PDP1_LDW] = GEN8_RING_PDP_LDW(ring, 1);
reg_state[CTX_PDP0_UDW] = GEN8_RING_PDP_UDW(ring, 0);
reg_state[CTX_PDP0_LDW] = GEN8_RING_PDP_LDW(ring, 0);
reg_state[CTX_PDP3_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[3]);
reg_state[CTX_PDP3_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[3]);
reg_state[CTX_PDP2_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[2]);
reg_state[CTX_PDP2_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[2]);
reg_state[CTX_PDP1_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[1]);
reg_state[CTX_PDP1_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[1]);
reg_state[CTX_PDP0_UDW+1] = upper_32_bits(ppgtt->pd_dma_addr[0]);
reg_state[CTX_PDP0_LDW+1] = lower_32_bits(ppgtt->pd_dma_addr[0]);
if (ring->id == RCS) {
reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
reg_state[CTX_R_PWR_CLK_STATE] = 0x20c8;
reg_state[CTX_R_PWR_CLK_STATE+1] = 0;
}
 
kunmap_atomic(reg_state);
 
ctx_obj->dirty = 1;
// set_page_dirty(page);
i915_gem_object_unpin_pages(ctx_obj);
 
return 0;
}
 
/**
* intel_lr_context_free() - free the LRC specific bits of a context
* @ctx: the LR context to free.
*
* The real context freeing is done in i915_gem_context_free: this only
* takes care of the bits that are LRC related: the per-engine backing
* objects and the logical ringbuffer.
*/
void intel_lr_context_free(struct intel_context *ctx)
{
int i;
 
for (i = 0; i < I915_NUM_RINGS; i++) {
struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
 
if (ctx_obj) {
struct intel_ringbuffer *ringbuf =
ctx->engine[i].ringbuf;
struct intel_engine_cs *ring = ringbuf->ring;
 
if (ctx == ring->default_context) {
intel_unpin_ringbuffer_obj(ringbuf);
i915_gem_object_ggtt_unpin(ctx_obj);
}
intel_destroy_ringbuffer_obj(ringbuf);
kfree(ringbuf);
drm_gem_object_unreference(&ctx_obj->base);
}
}
}
 
static uint32_t get_lr_context_size(struct intel_engine_cs *ring)
{
int ret = 0;
 
WARN_ON(INTEL_INFO(ring->dev)->gen < 8);
 
switch (ring->id) {
case RCS:
if (INTEL_INFO(ring->dev)->gen >= 9)
ret = GEN9_LR_CONTEXT_RENDER_SIZE;
else
ret = GEN8_LR_CONTEXT_RENDER_SIZE;
break;
case VCS:
case BCS:
case VECS:
case VCS2:
ret = GEN8_LR_CONTEXT_OTHER_SIZE;
break;
}
 
return ret;
}
 
static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
struct drm_i915_gem_object *default_ctx_obj)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
 
/* The status page is offset 0 from the default context object
* in LRC mode. */
ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(default_ctx_obj);
ring->status_page.page_addr =
kmap(sg_page(default_ctx_obj->pages->sgl));
ring->status_page.obj = default_ctx_obj;
 
I915_WRITE(RING_HWS_PGA(ring->mmio_base),
(u32)ring->status_page.gfx_addr);
POSTING_READ(RING_HWS_PGA(ring->mmio_base));
}
 
/**
* intel_lr_context_deferred_create() - create the LRC specific bits of a context
* @ctx: LR context to create.
* @ring: engine to be used with the context.
*
* This function can be called more than once, with different engines, if we plan
* to use the context with them. The context backing objects and the ringbuffers
* (specially the ringbuffer backing objects) suck a lot of memory up, and that's why
* the creation is a deferred call: it's better to make sure first that we need to use
* a given ring with the context.
*
* Return: non-zero on error.
*/
int intel_lr_context_deferred_create(struct intel_context *ctx,
struct intel_engine_cs *ring)
{
const bool is_global_default_ctx = (ctx == ring->default_context);
struct drm_device *dev = ring->dev;
struct drm_i915_gem_object *ctx_obj;
uint32_t context_size;
struct intel_ringbuffer *ringbuf;
int ret;
 
WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL);
if (ctx->engine[ring->id].state)
return 0;
 
context_size = round_up(get_lr_context_size(ring), 4096);
 
ctx_obj = i915_gem_alloc_context_obj(dev, context_size);
if (IS_ERR(ctx_obj)) {
ret = PTR_ERR(ctx_obj);
DRM_DEBUG_DRIVER("Alloc LRC backing obj failed: %d\n", ret);
return ret;
}
 
if (is_global_default_ctx) {
ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN, 0);
if (ret) {
DRM_DEBUG_DRIVER("Pin LRC backing obj failed: %d\n",
ret);
drm_gem_object_unreference(&ctx_obj->base);
return ret;
}
}
 
ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
if (!ringbuf) {
DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s\n",
ring->name);
ret = -ENOMEM;
goto error_unpin_ctx;
}
 
ringbuf->ring = ring;
ringbuf->FIXME_lrc_ctx = ctx;
 
ringbuf->size = 32 * PAGE_SIZE;
ringbuf->effective_size = ringbuf->size;
ringbuf->head = 0;
ringbuf->tail = 0;
ringbuf->space = ringbuf->size;
ringbuf->last_retired_head = -1;
 
if (ringbuf->obj == NULL) {
ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
if (ret) {
DRM_DEBUG_DRIVER(
"Failed to allocate ringbuffer obj %s: %d\n",
ring->name, ret);
goto error_free_rbuf;
}
 
if (is_global_default_ctx) {
ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
if (ret) {
DRM_ERROR(
"Failed to pin and map ringbuffer %s: %d\n",
ring->name, ret);
goto error_destroy_rbuf;
}
}
 
}
 
ret = populate_lr_context(ctx, ctx_obj, ring, ringbuf);
if (ret) {
DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
goto error;
}
 
ctx->engine[ring->id].ringbuf = ringbuf;
ctx->engine[ring->id].state = ctx_obj;
 
if (ctx == ring->default_context)
lrc_setup_hardware_status_page(ring, ctx_obj);
 
if (ring->id == RCS && !ctx->rcs_initialized) {
if (ring->init_context) {
ret = ring->init_context(ring, ctx);
if (ret)
DRM_ERROR("ring init context: %d\n", ret);
}
 
ret = intel_lr_context_render_state_init(ring, ctx);
if (ret) {
DRM_ERROR("Init render state failed: %d\n", ret);
ctx->engine[ring->id].ringbuf = NULL;
ctx->engine[ring->id].state = NULL;
goto error;
}
ctx->rcs_initialized = true;
}
 
return 0;
 
error:
if (is_global_default_ctx)
intel_unpin_ringbuffer_obj(ringbuf);
error_destroy_rbuf:
intel_destroy_ringbuffer_obj(ringbuf);
error_free_rbuf:
kfree(ringbuf);
error_unpin_ctx:
if (is_global_default_ctx)
i915_gem_object_ggtt_unpin(ctx_obj);
drm_gem_object_unreference(&ctx_obj->base);
return ret;
}
/drivers/video/drm/i915/intel_lrc.h
0,0 → 1,118
/*
* Copyright © 2014 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
 
#ifndef _INTEL_LRC_H_
#define _INTEL_LRC_H_
 
#define GEN8_LR_CONTEXT_ALIGN 4096
 
/* Execlists regs */
#define RING_ELSP(ring) ((ring)->mmio_base+0x230)
#define RING_EXECLIST_STATUS(ring) ((ring)->mmio_base+0x234)
#define RING_CONTEXT_CONTROL(ring) ((ring)->mmio_base+0x244)
#define RING_CONTEXT_STATUS_BUF(ring) ((ring)->mmio_base+0x370)
#define RING_CONTEXT_STATUS_PTR(ring) ((ring)->mmio_base+0x3a0)
 
/* Logical Rings */
void intel_logical_ring_stop(struct intel_engine_cs *ring);
void intel_logical_ring_cleanup(struct intel_engine_cs *ring);
int intel_logical_rings_init(struct drm_device *dev);
 
int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf);
void intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf);
/**
* intel_logical_ring_advance() - advance the ringbuffer tail
* @ringbuf: Ringbuffer to advance.
*
* The tail is only updated in our logical ringbuffer struct.
*/
static inline void intel_logical_ring_advance(struct intel_ringbuffer *ringbuf)
{
ringbuf->tail &= ringbuf->size - 1;
}
/**
* intel_logical_ring_emit() - write a DWORD to the ringbuffer.
* @ringbuf: Ringbuffer to write to.
* @data: DWORD to write.
*/
static inline void intel_logical_ring_emit(struct intel_ringbuffer *ringbuf,
u32 data)
{
iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
ringbuf->tail += 4;
}
int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf, int num_dwords);
 
/* Logical Ring Contexts */
int intel_lr_context_render_state_init(struct intel_engine_cs *ring,
struct intel_context *ctx);
void intel_lr_context_free(struct intel_context *ctx);
int intel_lr_context_deferred_create(struct intel_context *ctx,
struct intel_engine_cs *ring);
void intel_lr_context_unpin(struct intel_engine_cs *ring,
struct intel_context *ctx);
 
/* Execlists */
int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists);
int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
struct intel_engine_cs *ring,
struct intel_context *ctx,
struct drm_i915_gem_execbuffer2 *args,
struct list_head *vmas,
struct drm_i915_gem_object *batch_obj,
u64 exec_start, u32 flags);
u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj);
 
/**
* struct intel_ctx_submit_request - queued context submission request
* @ctx: Context to submit to the ELSP.
* @ring: Engine to submit it to.
* @tail: how far in the context's ringbuffer this request goes to.
* @execlist_link: link in the submission queue.
* @work: workqueue for processing this request in a bottom half.
* @elsp_submitted: no. of times this request has been sent to the ELSP.
*
* The ELSP only accepts two elements at a time, so we queue context/tail
* pairs on a given queue (ring->execlist_queue) until the hardware is
* available. The queue serves a double purpose: we also use it to keep track
* of the up to 2 contexts currently in the hardware (usually one in execution
* and the other queued up by the GPU): We only remove elements from the head
* of the queue when the hardware informs us that an element has been
* completed.
*
* All accesses to the queue are mediated by a spinlock (ring->execlist_lock).
*/
struct intel_ctx_submit_request {
struct intel_context *ctx;
struct intel_engine_cs *ring;
u32 tail;
 
struct list_head execlist_link;
 
int elsp_submitted;
};
 
void intel_execlists_handle_ctx_events(struct intel_engine_cs *ring);
void intel_execlists_retire_requests(struct intel_engine_cs *ring);
 
#endif /* _INTEL_LRC_H_ */
/drivers/video/drm/i915/intel_lvds.c
76,7 → 76,7
u32 tmp;
 
power_domain = intel_display_port_power_domain(encoder);
if (!intel_display_power_enabled(dev_priv, power_domain))
if (!intel_display_power_is_enabled(dev_priv, power_domain))
return false;
 
tmp = I915_READ(lvds_encoder->reg);
823,8 → 823,7
struct intel_encoder *encoder;
struct intel_lvds_encoder *lvds_encoder;
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
base.head) {
for_each_intel_encoder(dev, encoder) {
if (encoder->type == INTEL_OUTPUT_LVDS) {
lvds_encoder = to_lvds_encoder(&encoder->base);
 
900,6 → 899,17
int pipe;
u8 pin;
 
/*
* Unlock registers and just leave them unlocked. Do this before
* checking quirk lists to avoid bogus WARNINGs.
*/
if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(PCH_PP_CONTROL,
I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS);
} else {
I915_WRITE(PP_CONTROL,
I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
}
if (!intel_lvds_supported(dev))
return;
 
1098,21 → 1108,10
lvds_encoder->a3_power = I915_READ(lvds_encoder->reg) &
LVDS_A3_POWER_MASK;
 
/*
* Unlock registers and just
* leave them unlocked
*/
if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(PCH_PP_CONTROL,
I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS);
} else {
I915_WRITE(PP_CONTROL,
I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
}
drm_connector_register(connector);
 
intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
intel_panel_setup_backlight(connector);
intel_panel_setup_backlight(connector, INVALID_PIPE);
 
return;
 
/drivers/video/drm/i915/intel_opregion.c
396,6 → 396,16
return -EINVAL;
}
 
/*
* If the vendor backlight interface is not in use and ACPI backlight interface
* is broken, do not bother processing backlight change requests from firmware.
*/
static bool should_ignore_backlight_request(void)
{
return acpi_video_backlight_support() &&
!acpi_video_verify_backlight_support();
}
 
static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
{
struct drm_i915_private *dev_priv = dev->dev_private;
404,11 → 414,7
 
DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
 
/*
* If the acpi_video interface is not supposed to be used, don't
* bother processing backlight level change requests from firmware.
*/
if (!acpi_video_verify_backlight_support()) {
if (should_ignore_backlight_request()) {
DRM_DEBUG_KMS("opregion backlight request ignored\n");
return 0;
}
/drivers/video/drm/i915/intel_panel.c
419,9 → 419,8
source_val = clamp(source_val, source_min, source_max);
 
/* avoid overflows */
target_val = (uint64_t)(source_val - source_min) *
(target_max - target_min);
do_div(target_val, source_max - source_min);
target_val = DIV_ROUND_CLOSEST_ULL((uint64_t)(source_val - source_min) *
(target_max - target_min), source_max - source_min);
target_val += target_min;
 
return target_val;
522,6 → 521,9
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
return 0;
 
return I915_READ(VLV_BLC_PWM_CTL(pipe)) & BACKLIGHT_DUTY_CYCLE_MASK;
}
 
537,15 → 539,17
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 val;
unsigned long flags;
struct intel_panel *panel = &connector->panel;
u32 val = 0;
 
spin_lock_irqsave(&dev_priv->backlight_lock, flags);
mutex_lock(&dev_priv->backlight_lock);
 
if (panel->backlight.enabled) {
val = dev_priv->display.get_backlight(connector);
val = intel_panel_compute_brightness(connector, val);
}
 
spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
mutex_unlock(&dev_priv->backlight_lock);
 
DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val);
return val;
604,6 → 608,9
enum pipe pipe = intel_get_pipe_from_connector(connector);
u32 tmp;
 
if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
return;
 
tmp = I915_READ(VLV_BLC_PWM_CTL(pipe)) & ~BACKLIGHT_DUTY_CYCLE_MASK;
I915_WRITE(VLV_BLC_PWM_CTL(pipe), tmp | level);
}
627,14 → 634,12
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel;
enum pipe pipe = intel_get_pipe_from_connector(connector);
u32 hw_level;
unsigned long flags;
 
if (!panel->backlight.present || pipe == INVALID_PIPE)
if (!panel->backlight.present)
return;
 
spin_lock_irqsave(&dev_priv->backlight_lock, flags);
mutex_lock(&dev_priv->backlight_lock);
 
WARN_ON(panel->backlight.max == 0);
 
644,7 → 649,7
if (panel->backlight.enabled)
intel_panel_actually_set_backlight(connector, hw_level);
 
spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
mutex_unlock(&dev_priv->backlight_lock);
}
 
/* set backlight brightness to level in range [0..max], assuming hw min is
658,12 → 663,17
struct intel_panel *panel = &connector->panel;
enum pipe pipe = intel_get_pipe_from_connector(connector);
u32 hw_level;
unsigned long flags;
 
/*
* INVALID_PIPE may occur during driver init because
* connection_mutex isn't held across the entire backlight
* setup + modeset readout, and the BIOS can issue the
* requests at any time.
*/
if (!panel->backlight.present || pipe == INVALID_PIPE)
return;
 
spin_lock_irqsave(&dev_priv->backlight_lock, flags);
mutex_lock(&dev_priv->backlight_lock);
 
WARN_ON(panel->backlight.max == 0);
 
674,7 → 684,7
if (panel->backlight.enabled)
intel_panel_actually_set_backlight(connector, hw_level);
 
spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
mutex_unlock(&dev_priv->backlight_lock);
}
 
static void pch_disable_backlight(struct intel_connector *connector)
716,6 → 726,9
enum pipe pipe = intel_get_pipe_from_connector(connector);
u32 tmp;
 
if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
return;
 
intel_panel_actually_set_backlight(connector, 0);
 
tmp = I915_READ(VLV_BLC_PWM_CTL2(pipe));
727,10 → 740,8
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel;
enum pipe pipe = intel_get_pipe_from_connector(connector);
unsigned long flags;
 
if (!panel->backlight.present || pipe == INVALID_PIPE)
if (!panel->backlight.present)
return;
 
/*
744,12 → 755,12
return;
}
 
spin_lock_irqsave(&dev_priv->backlight_lock, flags);
mutex_lock(&dev_priv->backlight_lock);
 
panel->backlight.enabled = false;
dev_priv->display.disable_backlight(connector);
 
spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
mutex_unlock(&dev_priv->backlight_lock);
}
 
static void bdw_enable_backlight(struct intel_connector *connector)
773,7 → 784,8
if (panel->backlight.active_low_pwm)
pch_ctl1 |= BLM_PCH_POLARITY;
 
/* BDW always uses the pch pwm controls. */
/* After LPT, override is the default. */
if (HAS_PCH_LPT(dev_priv))
pch_ctl1 |= BLM_PCH_OVERRIDE_ENABLE;
 
I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1);
903,6 → 915,9
enum pipe pipe = intel_get_pipe_from_connector(connector);
u32 ctl, ctl2;
 
if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
return;
 
ctl2 = I915_READ(VLV_BLC_PWM_CTL2(pipe));
if (ctl2 & BLM_PWM_ENABLE) {
DRM_DEBUG_KMS("backlight already enabled\n");
930,14 → 945,13
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel;
enum pipe pipe = intel_get_pipe_from_connector(connector);
unsigned long flags;
 
if (!panel->backlight.present || pipe == INVALID_PIPE)
if (!panel->backlight.present)
return;
 
DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
 
spin_lock_irqsave(&dev_priv->backlight_lock, flags);
mutex_lock(&dev_priv->backlight_lock);
 
WARN_ON(panel->backlight.max == 0);
 
948,7 → 962,7
dev_priv->display.enable_backlight(connector);
panel->backlight.enabled = true;
 
spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
mutex_unlock(&dev_priv->backlight_lock);
}
 
#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
955,6 → 969,7
static int intel_backlight_device_update_status(struct backlight_device *bd)
{
struct intel_connector *connector = bl_get_data(bd);
struct intel_panel *panel = &connector->panel;
struct drm_device *dev = connector->base.dev;
 
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
999,6 → 1014,9
if (WARN_ON(panel->backlight.device))
return -ENODEV;
 
if (!panel->backlight.present)
return 0;
 
WARN_ON(panel->backlight.max == 0);
 
memset(&props, 0, sizeof(props));
1013,6 → 1031,11
panel->backlight.level,
props.max_brightness);
 
if (panel->backlight.enabled)
props.power = FB_BLANK_UNBLANK;
else
props.power = FB_BLANK_POWERDOWN;
 
/*
* Note: using the same name independent of the connector prevents
* registration of multiple backlight devices in the driver.
1029,6 → 1052,10
panel->backlight.device = NULL;
return -ENODEV;
}
 
DRM_DEBUG_KMS("Connector %s backlight sysfs interface registered\n",
connector->base.name);
 
return 0;
}
 
1062,15 → 1089,28
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel;
int min;
 
WARN_ON(panel->backlight.max == 0);
 
/*
* XXX: If the vbt value is 255, it makes min equal to max, which leads
* to problems. There are such machines out there. Either our
* interpretation is wrong or the vbt has bogus data. Or both. Safeguard
* against this by letting the minimum be at most (arbitrarily chosen)
* 25% of the max.
*/
min = clamp_t(int, dev_priv->vbt.backlight.min_brightness, 0, 64);
if (min != dev_priv->vbt.backlight.min_brightness) {
DRM_DEBUG_KMS("clamping VBT min backlight %d/255 to %d/255\n",
dev_priv->vbt.backlight.min_brightness, min);
}
 
/* vbt value is a coefficient in range [0..255] */
return scale(dev_priv->vbt.backlight.min_brightness, 0, 255,
0, panel->backlight.max);
return scale(min, 0, 255, 0, panel->backlight.max);
}
 
static int bdw_setup_backlight(struct intel_connector *connector)
static int bdw_setup_backlight(struct intel_connector *connector, enum pipe unused)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
1096,7 → 1136,7
return 0;
}
 
static int pch_setup_backlight(struct intel_connector *connector)
static int pch_setup_backlight(struct intel_connector *connector, enum pipe unused)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
1123,7 → 1163,7
return 0;
}
 
static int i9xx_setup_backlight(struct intel_connector *connector)
static int i9xx_setup_backlight(struct intel_connector *connector, enum pipe unused)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
1155,7 → 1195,7
return 0;
}
 
static int i965_setup_backlight(struct intel_connector *connector)
static int i965_setup_backlight(struct intel_connector *connector, enum pipe unused)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
1185,16 → 1225,16
return 0;
}
 
static int vlv_setup_backlight(struct intel_connector *connector)
static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel;
enum pipe pipe;
enum pipe p;
u32 ctl, ctl2, val;
 
for_each_pipe(pipe) {
u32 cur_val = I915_READ(VLV_BLC_PWM_CTL(pipe));
for_each_pipe(dev_priv, p) {
u32 cur_val = I915_READ(VLV_BLC_PWM_CTL(p));
 
/* Skip if the modulation freq is already set */
if (cur_val & ~BACKLIGHT_DUTY_CYCLE_MASK)
1201,14 → 1241,17
continue;
 
cur_val &= BACKLIGHT_DUTY_CYCLE_MASK;
I915_WRITE(VLV_BLC_PWM_CTL(pipe), (0xf42 << 16) |
I915_WRITE(VLV_BLC_PWM_CTL(p), (0xf42 << 16) |
cur_val);
}
 
ctl2 = I915_READ(VLV_BLC_PWM_CTL2(PIPE_A));
if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
return -ENODEV;
 
ctl2 = I915_READ(VLV_BLC_PWM_CTL2(pipe));
panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965;
 
ctl = I915_READ(VLV_BLC_PWM_CTL(PIPE_A));
ctl = I915_READ(VLV_BLC_PWM_CTL(pipe));
panel->backlight.max = ctl >> 16;
if (!panel->backlight.max)
return -ENODEV;
1215,7 → 1258,7
 
panel->backlight.min = get_backlight_min_vbt(connector);
 
val = _vlv_get_backlight(dev, PIPE_A);
val = _vlv_get_backlight(dev, pipe);
panel->backlight.level = intel_panel_compute_brightness(connector, val);
 
panel->backlight.enabled = (ctl2 & BLM_PWM_ENABLE) &&
1224,13 → 1267,12
return 0;
}
 
int intel_panel_setup_backlight(struct drm_connector *connector)
int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_connector *intel_connector = to_intel_connector(connector);
struct intel_panel *panel = &intel_connector->panel;
unsigned long flags;
int ret;
 
if (!dev_priv->vbt.backlight.present) {
1243,9 → 1285,9
}
 
/* set level and max in panel struct */
spin_lock_irqsave(&dev_priv->backlight_lock, flags);
ret = dev_priv->display.setup_backlight(intel_connector);
spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
mutex_lock(&dev_priv->backlight_lock);
ret = dev_priv->display.setup_backlight(intel_connector, pipe);
mutex_unlock(&dev_priv->backlight_lock);
 
if (ret) {
DRM_DEBUG_KMS("failed to setup backlight for connector %s\n",
1253,15 → 1295,12
return ret;
}
 
intel_backlight_device_register(intel_connector);
 
panel->backlight.present = true;
 
DRM_DEBUG_KMS("backlight initialized, %s, brightness %u/%u, "
"sysfs interface %sregistered\n",
DRM_DEBUG_KMS("Connector %s backlight initialized, %s, brightness %u/%u\n",
connector->name,
panel->backlight.enabled ? "enabled" : "disabled",
panel->backlight.level, panel->backlight.max,
panel->backlight.device ? "" : "not ");
panel->backlight.level, panel->backlight.max);
 
return 0;
}
1272,7 → 1311,6
struct intel_panel *panel = &intel_connector->panel;
 
panel->backlight.present = false;
intel_backlight_device_unregister(intel_connector);
}
 
/* Set up chip specific backlight functions */
1280,7 → 1318,7
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (IS_BROADWELL(dev)) {
if (IS_BROADWELL(dev) || (INTEL_INFO(dev)->gen >= 9)) {
dev_priv->display.setup_backlight = bdw_setup_backlight;
dev_priv->display.enable_backlight = bdw_enable_backlight;
dev_priv->display.disable_backlight = pch_disable_backlight;
1335,3 → 1373,19
drm_mode_destroy(intel_connector->base.dev,
panel->downclock_mode);
}
 
void intel_backlight_register(struct drm_device *dev)
{
struct intel_connector *connector;
 
list_for_each_entry(connector, &dev->mode_config.connector_list, base.head)
intel_backlight_device_register(connector);
}
 
void intel_backlight_unregister(struct drm_device *dev)
{
struct intel_connector *connector;
 
list_for_each_entry(connector, &dev->mode_config.connector_list, base.head)
intel_backlight_device_unregister(connector);
}
/drivers/video/drm/i915/intel_pm.c
36,21 → 36,8
 
#define FORCEWAKE_ACK_TIMEOUT_MS 2
 
#define assert_spin_locked(x)
 
void getrawmonotonic(struct timespec *ts);
 
static inline void outb(u8 v, u16 port)
{
asm volatile("outb %0,%1" : : "a" (v), "dN" (port));
}
static inline u8 inb(u16 port)
{
u8 v;
asm volatile("inb %1,%0" : "=a" (v) : "dN" (port));
return v;
}
 
union ktime {
s64 tv64;
};
95,11 → 82,37
* i915.i915_enable_fbc parameter
*/
 
static void gen9_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
/*
* WaDisableSDEUnitClockGating:skl
* This seems to be a pre-production w/a.
*/
I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
 
/*
* WaDisableDgMirrorFixInHalfSliceChicken5:skl
* This is a pre-production w/a.
*/
I915_WRITE(GEN9_HALF_SLICE_CHICKEN5,
I915_READ(GEN9_HALF_SLICE_CHICKEN5) &
~GEN9_DG_MIRROR_FIX_ENABLE);
 
/* Wa4x4STCOptimizationDisable:skl */
I915_WRITE(CACHE_MODE_1,
_MASKED_BIT_ENABLE(GEN8_4x4_STC_OPTIMIZATION_DISABLE));
}
 
static void i8xx_disable_fbc(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 fbc_ctl;
 
dev_priv->fbc.enabled = false;
 
/* Disable compression */
fbc_ctl = I915_READ(FBC_CONTROL);
if ((fbc_ctl & FBC_CTL_EN) == 0)
128,6 → 141,8
int i;
u32 fbc_ctl;
 
dev_priv->fbc.enabled = true;
 
cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE;
if (fb->pitches[0] < cfb_pitch)
cfb_pitch = fb->pitches[0];
182,6 → 197,8
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 dpfc_ctl;
 
dev_priv->fbc.enabled = true;
 
dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane) | DPFC_SR_EN;
if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
dpfc_ctl |= DPFC_CTL_LIMIT_2X;
202,6 → 219,8
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpfc_ctl;
 
dev_priv->fbc.enabled = false;
 
/* Disable compression */
dpfc_ctl = I915_READ(DPFC_CONTROL);
if (dpfc_ctl & DPFC_CTL_EN) {
253,6 → 272,8
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 dpfc_ctl;
 
dev_priv->fbc.enabled = true;
 
dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane);
if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
dev_priv->fbc.threshold++;
293,6 → 314,8
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpfc_ctl;
 
dev_priv->fbc.enabled = false;
 
/* Disable compression */
dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
if (dpfc_ctl & DPFC_CTL_EN) {
319,6 → 342,8
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 dpfc_ctl;
 
dev_priv->fbc.enabled = true;
 
dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane);
if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
dev_priv->fbc.threshold++;
338,6 → 363,9
 
dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
 
if (dev_priv->fbc.false_color)
dpfc_ctl |= FBC_CTL_FALSE_COLOR;
 
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
 
if (IS_IVYBRIDGE(dev)) {
365,10 → 393,20
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (!dev_priv->display.fbc_enabled)
return false;
return dev_priv->fbc.enabled;
}
 
return dev_priv->display.fbc_enabled(dev);
void bdw_fbc_sw_flush(struct drm_device *dev, u32 value)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (!IS_GEN8(dev))
return;
 
if (!intel_fbc_enabled(dev))
return;
 
I915_WRITE(MSG_FBC_REND_STATE, value);
}
 
static void intel_fbc_work_fn(struct work_struct *__work)
607,6 → 645,12
DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
goto out_disable;
}
if (INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
to_intel_plane(crtc->primary)->rotation != BIT(DRM_ROTATE_0)) {
if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
DRM_DEBUG_KMS("Rotation unsupported, disabling\n");
goto out_disable;
}
 
/* If the kernel debugger is active, always disable compression */
if (in_dbg_master())
882,7 → 926,7
* A value of 5us seems to be a good balance; safe for very low end
* platforms but not overly aggressive on lower latency configs.
*/
static const int latency_ns = 5000;
static const int pessimal_latency_ns = 5000;
 
static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
{
1011,7 → 1055,7
.guard_size = 2,
.cacheline_size = I915_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params i830_wm_info = {
static const struct intel_watermark_params i830_a_wm_info = {
.fifo_size = I855GM_FIFO_SIZE,
.max_wm = I915_MAX_WM,
.default_wm = 1,
1018,6 → 1062,13
.guard_size = 2,
.cacheline_size = I830_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params i830_bc_wm_info = {
.fifo_size = I855GM_FIFO_SIZE,
.max_wm = I915_MAX_WM/2,
.default_wm = 1,
.guard_size = 2,
.cacheline_size = I830_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params i845_wm_info = {
.fifo_size = I830_FIFO_SIZE,
.max_wm = I915_MAX_WM,
1073,6 → 1124,17
wm_size = wm->max_wm;
if (wm_size <= 0)
wm_size = wm->default_wm;
 
/*
* Bspec seems to indicate that the value shouldn't be lower than
* 'burst size + 1'. Certainly 830 is quite unhappy with low values.
* Lets go for 8 which is the burst size since certain platforms
* already use a hardcoded 8 (which is what the spec says should be
* done).
*/
if (wm_size <= 8)
wm_size = 8;
 
return wm_size;
}
 
1297,33 → 1359,32
display, cursor);
}
 
static bool vlv_compute_drain_latency(struct drm_device *dev,
int plane,
int *plane_prec_mult,
int *plane_dl,
int *cursor_prec_mult,
int *cursor_dl)
static bool vlv_compute_drain_latency(struct drm_crtc *crtc,
int pixel_size,
int *prec_mult,
int *drain_latency)
{
struct drm_crtc *crtc;
int clock, pixel_size;
struct drm_device *dev = crtc->dev;
int entries;
int clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
 
crtc = intel_get_crtc_for_plane(dev, plane);
if (!intel_crtc_active(crtc))
if (WARN(clock == 0, "Pixel clock is zero!\n"))
return false;
 
clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
pixel_size = crtc->primary->fb->bits_per_pixel / 8; /* BPP */
if (WARN(pixel_size == 0, "Pixel size is zero!\n"))
return false;
 
entries = (clock / 1000) * pixel_size;
*plane_prec_mult = (entries > 128) ?
DRAIN_LATENCY_PRECISION_64 : DRAIN_LATENCY_PRECISION_32;
*plane_dl = (64 * (*plane_prec_mult) * 4) / entries;
entries = DIV_ROUND_UP(clock, 1000) * pixel_size;
if (IS_CHERRYVIEW(dev))
*prec_mult = (entries > 128) ? DRAIN_LATENCY_PRECISION_32 :
DRAIN_LATENCY_PRECISION_16;
else
*prec_mult = (entries > 128) ? DRAIN_LATENCY_PRECISION_64 :
DRAIN_LATENCY_PRECISION_32;
*drain_latency = (64 * (*prec_mult) * 4) / entries;
 
entries = (clock / 1000) * 4; /* BPP is always 4 for cursor */
*cursor_prec_mult = (entries > 128) ?
DRAIN_LATENCY_PRECISION_64 : DRAIN_LATENCY_PRECISION_32;
*cursor_dl = (64 * (*cursor_prec_mult) * 4) / entries;
if (*drain_latency > DRAIN_LATENCY_MASK)
*drain_latency = DRAIN_LATENCY_MASK;
 
return true;
}
1336,39 → 1397,51
* latency value.
*/
 
static void vlv_update_drain_latency(struct drm_device *dev)
static void vlv_update_drain_latency(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int planea_prec, planea_dl, planeb_prec, planeb_dl;
int cursora_prec, cursora_dl, cursorb_prec, cursorb_dl;
int plane_prec_mult, cursor_prec_mult; /* Precision multiplier is
either 16 or 32 */
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pixel_size;
int drain_latency;
enum pipe pipe = intel_crtc->pipe;
int plane_prec, prec_mult, plane_dl;
const int high_precision = IS_CHERRYVIEW(dev) ?
DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_64;
 
/* For plane A, Cursor A */
if (vlv_compute_drain_latency(dev, 0, &plane_prec_mult, &planea_dl,
&cursor_prec_mult, &cursora_dl)) {
cursora_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
DDL_CURSORA_PRECISION_32 : DDL_CURSORA_PRECISION_64;
planea_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
DDL_PLANEA_PRECISION_32 : DDL_PLANEA_PRECISION_64;
plane_dl = I915_READ(VLV_DDL(pipe)) & ~(DDL_PLANE_PRECISION_HIGH |
DRAIN_LATENCY_MASK | DDL_CURSOR_PRECISION_HIGH |
(DRAIN_LATENCY_MASK << DDL_CURSOR_SHIFT));
 
I915_WRITE(VLV_DDL1, cursora_prec |
(cursora_dl << DDL_CURSORA_SHIFT) |
planea_prec | planea_dl);
if (!intel_crtc_active(crtc)) {
I915_WRITE(VLV_DDL(pipe), plane_dl);
return;
}
 
/* For plane B, Cursor B */
if (vlv_compute_drain_latency(dev, 1, &plane_prec_mult, &planeb_dl,
&cursor_prec_mult, &cursorb_dl)) {
cursorb_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
DDL_CURSORB_PRECISION_32 : DDL_CURSORB_PRECISION_64;
planeb_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
DDL_PLANEB_PRECISION_32 : DDL_PLANEB_PRECISION_64;
/* Primary plane Drain Latency */
pixel_size = crtc->primary->fb->bits_per_pixel / 8; /* BPP */
if (vlv_compute_drain_latency(crtc, pixel_size, &prec_mult, &drain_latency)) {
plane_prec = (prec_mult == high_precision) ?
DDL_PLANE_PRECISION_HIGH :
DDL_PLANE_PRECISION_LOW;
plane_dl |= plane_prec | drain_latency;
}
 
I915_WRITE(VLV_DDL2, cursorb_prec |
(cursorb_dl << DDL_CURSORB_SHIFT) |
planeb_prec | planeb_dl);
/* Cursor Drain Latency
* BPP is always 4 for cursor
*/
pixel_size = 4;
 
/* Program cursor DL only if it is enabled */
if (intel_crtc->cursor_base &&
vlv_compute_drain_latency(crtc, pixel_size, &prec_mult, &drain_latency)) {
plane_prec = (prec_mult == high_precision) ?
DDL_CURSOR_PRECISION_HIGH :
DDL_CURSOR_PRECISION_LOW;
plane_dl |= plane_prec | (drain_latency << DDL_CURSOR_SHIFT);
}
 
I915_WRITE(VLV_DDL(pipe), plane_dl);
}
 
#define single_plane_enabled(mask) is_power_of_2(mask)
1384,17 → 1457,17
unsigned int enabled = 0;
bool cxsr_enabled;
 
vlv_update_drain_latency(dev);
vlv_update_drain_latency(crtc);
 
if (g4x_compute_wm0(dev, PIPE_A,
&valleyview_wm_info, latency_ns,
&valleyview_cursor_wm_info, latency_ns,
&valleyview_wm_info, pessimal_latency_ns,
&valleyview_cursor_wm_info, pessimal_latency_ns,
&planea_wm, &cursora_wm))
enabled |= 1 << PIPE_A;
 
if (g4x_compute_wm0(dev, PIPE_B,
&valleyview_wm_info, latency_ns,
&valleyview_cursor_wm_info, latency_ns,
&valleyview_wm_info, pessimal_latency_ns,
&valleyview_cursor_wm_info, pessimal_latency_ns,
&planeb_wm, &cursorb_wm))
enabled |= 1 << PIPE_B;
 
1416,7 → 1489,8
plane_sr = cursor_sr = 0;
}
 
DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
"B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
planea_wm, cursora_wm,
planeb_wm, cursorb_wm,
plane_sr, cursor_sr);
1425,7 → 1499,7
(plane_sr << DSPFW_SR_SHIFT) |
(cursorb_wm << DSPFW_CURSORB_SHIFT) |
(planeb_wm << DSPFW_PLANEB_SHIFT) |
planea_wm);
(planea_wm << DSPFW_PLANEA_SHIFT));
I915_WRITE(DSPFW2,
(I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
(cursora_wm << DSPFW_CURSORA_SHIFT));
1437,6 → 1511,118
intel_set_memory_cxsr(dev_priv, true);
}
 
static void cherryview_update_wm(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
static const int sr_latency_ns = 12000;
struct drm_i915_private *dev_priv = dev->dev_private;
int planea_wm, planeb_wm, planec_wm;
int cursora_wm, cursorb_wm, cursorc_wm;
int plane_sr, cursor_sr;
int ignore_plane_sr, ignore_cursor_sr;
unsigned int enabled = 0;
bool cxsr_enabled;
 
vlv_update_drain_latency(crtc);
 
if (g4x_compute_wm0(dev, PIPE_A,
&valleyview_wm_info, pessimal_latency_ns,
&valleyview_cursor_wm_info, pessimal_latency_ns,
&planea_wm, &cursora_wm))
enabled |= 1 << PIPE_A;
 
if (g4x_compute_wm0(dev, PIPE_B,
&valleyview_wm_info, pessimal_latency_ns,
&valleyview_cursor_wm_info, pessimal_latency_ns,
&planeb_wm, &cursorb_wm))
enabled |= 1 << PIPE_B;
 
if (g4x_compute_wm0(dev, PIPE_C,
&valleyview_wm_info, pessimal_latency_ns,
&valleyview_cursor_wm_info, pessimal_latency_ns,
&planec_wm, &cursorc_wm))
enabled |= 1 << PIPE_C;
 
if (single_plane_enabled(enabled) &&
g4x_compute_srwm(dev, ffs(enabled) - 1,
sr_latency_ns,
&valleyview_wm_info,
&valleyview_cursor_wm_info,
&plane_sr, &ignore_cursor_sr) &&
g4x_compute_srwm(dev, ffs(enabled) - 1,
2*sr_latency_ns,
&valleyview_wm_info,
&valleyview_cursor_wm_info,
&ignore_plane_sr, &cursor_sr)) {
cxsr_enabled = true;
} else {
cxsr_enabled = false;
intel_set_memory_cxsr(dev_priv, false);
plane_sr = cursor_sr = 0;
}
 
DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
"B: plane=%d, cursor=%d, C: plane=%d, cursor=%d, "
"SR: plane=%d, cursor=%d\n",
planea_wm, cursora_wm,
planeb_wm, cursorb_wm,
planec_wm, cursorc_wm,
plane_sr, cursor_sr);
 
I915_WRITE(DSPFW1,
(plane_sr << DSPFW_SR_SHIFT) |
(cursorb_wm << DSPFW_CURSORB_SHIFT) |
(planeb_wm << DSPFW_PLANEB_SHIFT) |
(planea_wm << DSPFW_PLANEA_SHIFT));
I915_WRITE(DSPFW2,
(I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
(cursora_wm << DSPFW_CURSORA_SHIFT));
I915_WRITE(DSPFW3,
(I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
(cursor_sr << DSPFW_CURSOR_SR_SHIFT));
I915_WRITE(DSPFW9_CHV,
(I915_READ(DSPFW9_CHV) & ~(DSPFW_PLANEC_MASK |
DSPFW_CURSORC_MASK)) |
(planec_wm << DSPFW_PLANEC_SHIFT) |
(cursorc_wm << DSPFW_CURSORC_SHIFT));
 
if (cxsr_enabled)
intel_set_memory_cxsr(dev_priv, true);
}
 
static void valleyview_update_sprite_wm(struct drm_plane *plane,
struct drm_crtc *crtc,
uint32_t sprite_width,
uint32_t sprite_height,
int pixel_size,
bool enabled, bool scaled)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe = to_intel_plane(plane)->pipe;
int sprite = to_intel_plane(plane)->plane;
int drain_latency;
int plane_prec;
int sprite_dl;
int prec_mult;
const int high_precision = IS_CHERRYVIEW(dev) ?
DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_64;
 
sprite_dl = I915_READ(VLV_DDL(pipe)) & ~(DDL_SPRITE_PRECISION_HIGH(sprite) |
(DRAIN_LATENCY_MASK << DDL_SPRITE_SHIFT(sprite)));
 
if (enabled && vlv_compute_drain_latency(crtc, pixel_size, &prec_mult,
&drain_latency)) {
plane_prec = (prec_mult == high_precision) ?
DDL_SPRITE_PRECISION_HIGH(sprite) :
DDL_SPRITE_PRECISION_LOW(sprite);
sprite_dl |= plane_prec |
(drain_latency << DDL_SPRITE_SHIFT(sprite));
}
 
I915_WRITE(VLV_DDL(pipe), sprite_dl);
}
 
static void g4x_update_wm(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
1448,14 → 1634,14
bool cxsr_enabled;
 
if (g4x_compute_wm0(dev, PIPE_A,
&g4x_wm_info, latency_ns,
&g4x_cursor_wm_info, latency_ns,
&g4x_wm_info, pessimal_latency_ns,
&g4x_cursor_wm_info, pessimal_latency_ns,
&planea_wm, &cursora_wm))
enabled |= 1 << PIPE_A;
 
if (g4x_compute_wm0(dev, PIPE_B,
&g4x_wm_info, latency_ns,
&g4x_cursor_wm_info, latency_ns,
&g4x_wm_info, pessimal_latency_ns,
&g4x_cursor_wm_info, pessimal_latency_ns,
&planeb_wm, &cursorb_wm))
enabled |= 1 << PIPE_B;
 
1472,7 → 1658,8
plane_sr = cursor_sr = 0;
}
 
DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
"B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
planea_wm, cursora_wm,
planeb_wm, cursorb_wm,
plane_sr, cursor_sr);
1481,7 → 1668,7
(plane_sr << DSPFW_SR_SHIFT) |
(cursorb_wm << DSPFW_CURSORB_SHIFT) |
(planeb_wm << DSPFW_PLANEB_SHIFT) |
planea_wm);
(planea_wm << DSPFW_PLANEA_SHIFT));
I915_WRITE(DSPFW2,
(I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
(cursora_wm << DSPFW_CURSORA_SHIFT));
1555,8 → 1742,11
 
/* 965 has limitations... */
I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
(8 << 16) | (8 << 8) | (8 << 0));
I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
(8 << DSPFW_CURSORB_SHIFT) |
(8 << DSPFW_PLANEB_SHIFT) |
(8 << DSPFW_PLANEA_SHIFT));
I915_WRITE(DSPFW2, (8 << DSPFW_CURSORA_SHIFT) |
(8 << DSPFW_PLANEC_SHIFT_OLD));
/* update cursor SR watermark */
I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
 
1581,7 → 1771,7
else if (!IS_GEN2(dev))
wm_info = &i915_wm_info;
else
wm_info = &i830_wm_info;
wm_info = &i830_a_wm_info;
 
fifo_size = dev_priv->display.get_fifo_size(dev, 0);
crtc = intel_get_crtc_for_plane(dev, 0);
1594,11 → 1784,17
adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
wm_info, fifo_size, cpp,
latency_ns);
pessimal_latency_ns);
enabled = crtc;
} else
} else {
planea_wm = fifo_size - wm_info->guard_size;
if (planea_wm > (long)wm_info->max_wm)
planea_wm = wm_info->max_wm;
}
 
if (IS_GEN2(dev))
wm_info = &i830_bc_wm_info;
 
fifo_size = dev_priv->display.get_fifo_size(dev, 1);
crtc = intel_get_crtc_for_plane(dev, 1);
if (intel_crtc_active(crtc)) {
1610,13 → 1806,16
adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
wm_info, fifo_size, cpp,
latency_ns);
pessimal_latency_ns);
if (enabled == NULL)
enabled = crtc;
else
enabled = NULL;
} else
} else {
planeb_wm = fifo_size - wm_info->guard_size;
if (planeb_wm > (long)wm_info->max_wm)
planeb_wm = wm_info->max_wm;
}
 
DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
 
1703,7 → 1902,7
planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
&i845_wm_info,
dev_priv->display.get_fifo_size(dev, 0),
4, latency_ns);
4, pessimal_latency_ns);
fwater_lo = I915_READ(FW_BLC) & ~0xfff;
fwater_lo |= (3<<8) | planea_wm;
 
1780,6 → 1979,14
return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2;
}
 
struct skl_pipe_wm_parameters {
bool active;
uint32_t pipe_htotal;
uint32_t pixel_rate; /* in KHz */
struct intel_plane_wm_parameters plane[I915_MAX_PLANES];
struct intel_plane_wm_parameters cursor;
};
 
struct ilk_pipe_wm_parameters {
bool active;
uint32_t pipe_htotal;
2091,11 → 2298,82
PIPE_WM_LINETIME_TIME(linetime);
}
 
static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[5])
static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8])
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
if (IS_GEN9(dev)) {
uint32_t val;
int ret, i;
int level, max_level = ilk_wm_max_level(dev);
 
/* read the first set of memory latencies[0:3] */
val = 0; /* data0 to be programmed to 0 for first set */
mutex_lock(&dev_priv->rps.hw_lock);
ret = sandybridge_pcode_read(dev_priv,
GEN9_PCODE_READ_MEM_LATENCY,
&val);
mutex_unlock(&dev_priv->rps.hw_lock);
 
if (ret) {
DRM_ERROR("SKL Mailbox read error = %d\n", ret);
return;
}
 
wm[0] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
wm[1] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
GEN9_MEM_LATENCY_LEVEL_MASK;
wm[2] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
GEN9_MEM_LATENCY_LEVEL_MASK;
wm[3] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
GEN9_MEM_LATENCY_LEVEL_MASK;
 
/* read the second set of memory latencies[4:7] */
val = 1; /* data0 to be programmed to 1 for second set */
mutex_lock(&dev_priv->rps.hw_lock);
ret = sandybridge_pcode_read(dev_priv,
GEN9_PCODE_READ_MEM_LATENCY,
&val);
mutex_unlock(&dev_priv->rps.hw_lock);
if (ret) {
DRM_ERROR("SKL Mailbox read error = %d\n", ret);
return;
}
 
wm[4] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
wm[5] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
GEN9_MEM_LATENCY_LEVEL_MASK;
wm[6] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
GEN9_MEM_LATENCY_LEVEL_MASK;
wm[7] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
GEN9_MEM_LATENCY_LEVEL_MASK;
 
/*
* punit doesn't take into account the read latency so we need
* to add 2us to the various latency levels we retrieve from
* the punit.
* - W0 is a bit special in that it's the only level that
* can't be disabled if we want to have display working, so
* we always add 2us there.
* - For levels >=1, punit returns 0us latency when they are
* disabled, so we respect that and don't add 2us then
*
* Additionally, if a level n (n > 1) has a 0us latency, all
* levels m (m >= n) need to be disabled. We make sure to
* sanitize the values out of the punit to satisfy this
* requirement.
*/
wm[0] += 2;
for (level = 1; level <= max_level; level++)
if (wm[level] != 0)
wm[level] += 2;
else {
for (i = level + 1; i <= max_level; i++)
wm[i] = 0;
 
break;
}
} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
uint64_t sskpd = I915_READ64(MCH_SSKPD);
 
wm[0] = (sskpd >> 56) & 0xFF;
2143,7 → 2421,9
int ilk_wm_max_level(const struct drm_device *dev)
{
/* how many WM levels are we expecting */
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
if (IS_GEN9(dev))
return 7;
else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
return 4;
else if (INTEL_INFO(dev)->gen >= 6)
return 3;
2153,7 → 2433,7
 
static void intel_print_wm_latency(struct drm_device *dev,
const char *name,
const uint16_t wm[5])
const uint16_t wm[8])
{
int level, max_level = ilk_wm_max_level(dev);
 
2166,8 → 2446,13
continue;
}
 
/* WM1+ latency values in 0.5us units */
if (level > 0)
/*
* - latencies are in us on gen9.
* - before then, WM1+ latency values are in 0.5us units
*/
if (IS_GEN9(dev))
latency *= 10;
else if (level > 0)
latency *= 5;
 
DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n",
2235,6 → 2520,14
snb_wm_latency_quirk(dev);
}
 
static void skl_setup_wm_latency(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
intel_read_wm_latency(dev, dev_priv->wm.skl_latency);
intel_print_wm_latency(dev, "Gen9 Plane", dev_priv->wm.skl_latency);
}
 
static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
struct ilk_pipe_wm_parameters *p)
{
2556,7 → 2849,7
#define WM_DIRTY_FBC (1 << 24)
#define WM_DIRTY_DDB (1 << 25)
 
static unsigned int ilk_compute_wm_dirty(struct drm_device *dev,
static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
const struct ilk_wm_values *old,
const struct ilk_wm_values *new)
{
2564,7 → 2857,7
enum pipe pipe;
int wm_lp;
 
for_each_pipe(pipe) {
for_each_pipe(dev_priv, pipe) {
if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) {
dirty |= WM_DIRTY_LINETIME(pipe);
/* Must disable LP1+ watermarks too */
2650,7 → 2943,7
unsigned int dirty;
uint32_t val;
 
dirty = ilk_compute_wm_dirty(dev, previous, results);
dirty = ilk_compute_wm_dirty(dev_priv, previous, results);
if (!dirty)
return;
 
2725,6 → 3018,769
return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
}
 
/*
* On gen9, we need to allocate Display Data Buffer (DDB) portions to the
* different active planes.
*/
 
#define SKL_DDB_SIZE 896 /* in blocks */
 
static void
skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
struct drm_crtc *for_crtc,
const struct intel_wm_config *config,
const struct skl_pipe_wm_parameters *params,
struct skl_ddb_entry *alloc /* out */)
{
struct drm_crtc *crtc;
unsigned int pipe_size, ddb_size;
int nth_active_pipe;
 
if (!params->active) {
alloc->start = 0;
alloc->end = 0;
return;
}
 
ddb_size = SKL_DDB_SIZE;
 
ddb_size -= 4; /* 4 blocks for bypass path allocation */
 
nth_active_pipe = 0;
for_each_crtc(dev, crtc) {
if (!intel_crtc_active(crtc))
continue;
 
if (crtc == for_crtc)
break;
 
nth_active_pipe++;
}
 
pipe_size = ddb_size / config->num_pipes_active;
alloc->start = nth_active_pipe * ddb_size / config->num_pipes_active;
alloc->end = alloc->start + pipe_size;
}
 
static unsigned int skl_cursor_allocation(const struct intel_wm_config *config)
{
if (config->num_pipes_active == 1)
return 32;
 
return 8;
}
 
static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg)
{
entry->start = reg & 0x3ff;
entry->end = (reg >> 16) & 0x3ff;
if (entry->end)
entry->end += 1;
}
 
void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
struct skl_ddb_allocation *ddb /* out */)
{
struct drm_device *dev = dev_priv->dev;
enum pipe pipe;
int plane;
u32 val;
 
for_each_pipe(dev_priv, pipe) {
for_each_plane(pipe, plane) {
val = I915_READ(PLANE_BUF_CFG(pipe, plane));
skl_ddb_entry_init_from_hw(&ddb->plane[pipe][plane],
val);
}
 
val = I915_READ(CUR_BUF_CFG(pipe));
skl_ddb_entry_init_from_hw(&ddb->cursor[pipe], val);
}
}
 
static unsigned int
skl_plane_relative_data_rate(const struct intel_plane_wm_parameters *p)
{
return p->horiz_pixels * p->vert_pixels * p->bytes_per_pixel;
}
 
/*
* We don't overflow 32 bits. Worst case is 3 planes enabled, each fetching
* a 8192x4096@32bpp framebuffer:
* 3 * 4096 * 8192 * 4 < 2^32
*/
static unsigned int
skl_get_total_relative_data_rate(struct intel_crtc *intel_crtc,
const struct skl_pipe_wm_parameters *params)
{
unsigned int total_data_rate = 0;
int plane;
 
for (plane = 0; plane < intel_num_planes(intel_crtc); plane++) {
const struct intel_plane_wm_parameters *p;
 
p = &params->plane[plane];
if (!p->enabled)
continue;
 
total_data_rate += skl_plane_relative_data_rate(p);
}
 
return total_data_rate;
}
 
static void
skl_allocate_pipe_ddb(struct drm_crtc *crtc,
const struct intel_wm_config *config,
const struct skl_pipe_wm_parameters *params,
struct skl_ddb_allocation *ddb /* out */)
{
struct drm_device *dev = crtc->dev;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum pipe pipe = intel_crtc->pipe;
struct skl_ddb_entry *alloc = &ddb->pipe[pipe];
uint16_t alloc_size, start, cursor_blocks;
unsigned int total_data_rate;
int plane;
 
skl_ddb_get_pipe_allocation_limits(dev, crtc, config, params, alloc);
alloc_size = skl_ddb_entry_size(alloc);
if (alloc_size == 0) {
memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
memset(&ddb->cursor[pipe], 0, sizeof(ddb->cursor[pipe]));
return;
}
 
cursor_blocks = skl_cursor_allocation(config);
ddb->cursor[pipe].start = alloc->end - cursor_blocks;
ddb->cursor[pipe].end = alloc->end;
 
alloc_size -= cursor_blocks;
alloc->end -= cursor_blocks;
 
/*
* Each active plane get a portion of the remaining space, in
* proportion to the amount of data they need to fetch from memory.
*
* FIXME: we may not allocate every single block here.
*/
total_data_rate = skl_get_total_relative_data_rate(intel_crtc, params);
 
start = alloc->start;
for (plane = 0; plane < intel_num_planes(intel_crtc); plane++) {
const struct intel_plane_wm_parameters *p;
unsigned int data_rate;
uint16_t plane_blocks;
 
p = &params->plane[plane];
if (!p->enabled)
continue;
 
data_rate = skl_plane_relative_data_rate(p);
 
/*
* promote the expression to 64 bits to avoid overflowing, the
* result is < available as data_rate / total_data_rate < 1
*/
plane_blocks = div_u64((uint64_t)alloc_size * data_rate,
total_data_rate);
 
ddb->plane[pipe][plane].start = start;
ddb->plane[pipe][plane].end = start + plane_blocks;
 
start += plane_blocks;
}
 
}
 
static uint32_t skl_pipe_pixel_rate(const struct intel_crtc_config *config)
{
/* TODO: Take into account the scalers once we support them */
return config->adjusted_mode.crtc_clock;
}
 
/*
* The max latency should be 257 (max the punit can code is 255 and we add 2us
* for the read latency) and bytes_per_pixel should always be <= 8, so that
* should allow pixel_rate up to ~2 GHz which seems sufficient since max
* 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
*/
static uint32_t skl_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
uint32_t latency)
{
uint32_t wm_intermediate_val, ret;
 
if (latency == 0)
return UINT_MAX;
 
wm_intermediate_val = latency * pixel_rate * bytes_per_pixel;
ret = DIV_ROUND_UP(wm_intermediate_val, 1000);
 
return ret;
}
 
static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
uint32_t horiz_pixels, uint8_t bytes_per_pixel,
uint32_t latency)
{
uint32_t ret, plane_bytes_per_line, wm_intermediate_val;
 
if (latency == 0)
return UINT_MAX;
 
plane_bytes_per_line = horiz_pixels * bytes_per_pixel;
wm_intermediate_val = latency * pixel_rate;
ret = DIV_ROUND_UP(wm_intermediate_val, pipe_htotal * 1000) *
plane_bytes_per_line;
 
return ret;
}
 
static bool skl_ddb_allocation_changed(const struct skl_ddb_allocation *new_ddb,
const struct intel_crtc *intel_crtc)
{
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
const struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb;
enum pipe pipe = intel_crtc->pipe;
 
if (memcmp(new_ddb->plane[pipe], cur_ddb->plane[pipe],
sizeof(new_ddb->plane[pipe])))
return true;
 
if (memcmp(&new_ddb->cursor[pipe], &cur_ddb->cursor[pipe],
sizeof(new_ddb->cursor[pipe])))
return true;
 
return false;
}
 
static void skl_compute_wm_global_parameters(struct drm_device *dev,
struct intel_wm_config *config)
{
struct drm_crtc *crtc;
struct drm_plane *plane;
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
config->num_pipes_active += intel_crtc_active(crtc);
 
/* FIXME: I don't think we need those two global parameters on SKL */
list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
struct intel_plane *intel_plane = to_intel_plane(plane);
 
config->sprites_enabled |= intel_plane->wm.enabled;
config->sprites_scaled |= intel_plane->wm.scaled;
}
}
 
static void skl_compute_wm_pipe_parameters(struct drm_crtc *crtc,
struct skl_pipe_wm_parameters *p)
{
struct drm_device *dev = crtc->dev;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum pipe pipe = intel_crtc->pipe;
struct drm_plane *plane;
int i = 1; /* Index for sprite planes start */
 
p->active = intel_crtc_active(crtc);
if (p->active) {
p->pipe_htotal = intel_crtc->config.adjusted_mode.crtc_htotal;
p->pixel_rate = skl_pipe_pixel_rate(&intel_crtc->config);
 
/*
* For now, assume primary and cursor planes are always enabled.
*/
p->plane[0].enabled = true;
p->plane[0].bytes_per_pixel =
crtc->primary->fb->bits_per_pixel / 8;
p->plane[0].horiz_pixels = intel_crtc->config.pipe_src_w;
p->plane[0].vert_pixels = intel_crtc->config.pipe_src_h;
 
p->cursor.enabled = true;
p->cursor.bytes_per_pixel = 4;
p->cursor.horiz_pixels = intel_crtc->cursor_width ?
intel_crtc->cursor_width : 64;
}
 
list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
struct intel_plane *intel_plane = to_intel_plane(plane);
 
if (intel_plane->pipe == pipe)
p->plane[i++] = intel_plane->wm;
}
}
 
static bool skl_compute_plane_wm(struct skl_pipe_wm_parameters *p,
struct intel_plane_wm_parameters *p_params,
uint16_t ddb_allocation,
uint32_t mem_value,
uint16_t *out_blocks, /* out */
uint8_t *out_lines /* out */)
{
uint32_t method1, method2, plane_bytes_per_line, res_blocks, res_lines;
uint32_t result_bytes;
 
if (mem_value == 0 || !p->active || !p_params->enabled)
return false;
 
method1 = skl_wm_method1(p->pixel_rate,
p_params->bytes_per_pixel,
mem_value);
method2 = skl_wm_method2(p->pixel_rate,
p->pipe_htotal,
p_params->horiz_pixels,
p_params->bytes_per_pixel,
mem_value);
 
plane_bytes_per_line = p_params->horiz_pixels *
p_params->bytes_per_pixel;
 
/* For now xtile and linear */
if (((ddb_allocation * 512) / plane_bytes_per_line) >= 1)
result_bytes = min(method1, method2);
else
result_bytes = method1;
 
res_blocks = DIV_ROUND_UP(result_bytes, 512) + 1;
res_lines = DIV_ROUND_UP(result_bytes, plane_bytes_per_line);
 
if (res_blocks > ddb_allocation || res_lines > 31)
return false;
 
*out_blocks = res_blocks;
*out_lines = res_lines;
 
return true;
}
 
static void skl_compute_wm_level(const struct drm_i915_private *dev_priv,
struct skl_ddb_allocation *ddb,
struct skl_pipe_wm_parameters *p,
enum pipe pipe,
int level,
int num_planes,
struct skl_wm_level *result)
{
uint16_t latency = dev_priv->wm.skl_latency[level];
uint16_t ddb_blocks;
int i;
 
for (i = 0; i < num_planes; i++) {
ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]);
 
result->plane_en[i] = skl_compute_plane_wm(p, &p->plane[i],
ddb_blocks,
latency,
&result->plane_res_b[i],
&result->plane_res_l[i]);
}
 
ddb_blocks = skl_ddb_entry_size(&ddb->cursor[pipe]);
result->cursor_en = skl_compute_plane_wm(p, &p->cursor, ddb_blocks,
latency, &result->cursor_res_b,
&result->cursor_res_l);
}
 
static uint32_t
skl_compute_linetime_wm(struct drm_crtc *crtc, struct skl_pipe_wm_parameters *p)
{
if (!intel_crtc_active(crtc))
return 0;
 
return DIV_ROUND_UP(8 * p->pipe_htotal * 1000, p->pixel_rate);
 
}
 
static void skl_compute_transition_wm(struct drm_crtc *crtc,
struct skl_pipe_wm_parameters *params,
struct skl_wm_level *trans_wm /* out */)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int i;
 
if (!params->active)
return;
 
/* Until we know more, just disable transition WMs */
for (i = 0; i < intel_num_planes(intel_crtc); i++)
trans_wm->plane_en[i] = false;
trans_wm->cursor_en = false;
}
 
static void skl_compute_pipe_wm(struct drm_crtc *crtc,
struct skl_ddb_allocation *ddb,
struct skl_pipe_wm_parameters *params,
struct skl_pipe_wm *pipe_wm)
{
struct drm_device *dev = crtc->dev;
const struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int level, max_level = ilk_wm_max_level(dev);
 
for (level = 0; level <= max_level; level++) {
skl_compute_wm_level(dev_priv, ddb, params, intel_crtc->pipe,
level, intel_num_planes(intel_crtc),
&pipe_wm->wm[level]);
}
pipe_wm->linetime = skl_compute_linetime_wm(crtc, params);
 
skl_compute_transition_wm(crtc, params, &pipe_wm->trans_wm);
}
 
static void skl_compute_wm_results(struct drm_device *dev,
struct skl_pipe_wm_parameters *p,
struct skl_pipe_wm *p_wm,
struct skl_wm_values *r,
struct intel_crtc *intel_crtc)
{
int level, max_level = ilk_wm_max_level(dev);
enum pipe pipe = intel_crtc->pipe;
uint32_t temp;
int i;
 
for (level = 0; level <= max_level; level++) {
for (i = 0; i < intel_num_planes(intel_crtc); i++) {
temp = 0;
 
temp |= p_wm->wm[level].plane_res_l[i] <<
PLANE_WM_LINES_SHIFT;
temp |= p_wm->wm[level].plane_res_b[i];
if (p_wm->wm[level].plane_en[i])
temp |= PLANE_WM_EN;
 
r->plane[pipe][i][level] = temp;
}
 
temp = 0;
 
temp |= p_wm->wm[level].cursor_res_l << PLANE_WM_LINES_SHIFT;
temp |= p_wm->wm[level].cursor_res_b;
 
if (p_wm->wm[level].cursor_en)
temp |= PLANE_WM_EN;
 
r->cursor[pipe][level] = temp;
 
}
 
/* transition WMs */
for (i = 0; i < intel_num_planes(intel_crtc); i++) {
temp = 0;
temp |= p_wm->trans_wm.plane_res_l[i] << PLANE_WM_LINES_SHIFT;
temp |= p_wm->trans_wm.plane_res_b[i];
if (p_wm->trans_wm.plane_en[i])
temp |= PLANE_WM_EN;
 
r->plane_trans[pipe][i] = temp;
}
 
temp = 0;
temp |= p_wm->trans_wm.cursor_res_l << PLANE_WM_LINES_SHIFT;
temp |= p_wm->trans_wm.cursor_res_b;
if (p_wm->trans_wm.cursor_en)
temp |= PLANE_WM_EN;
 
r->cursor_trans[pipe] = temp;
 
r->wm_linetime[pipe] = p_wm->linetime;
}
 
static void skl_ddb_entry_write(struct drm_i915_private *dev_priv, uint32_t reg,
const struct skl_ddb_entry *entry)
{
if (entry->end)
I915_WRITE(reg, (entry->end - 1) << 16 | entry->start);
else
I915_WRITE(reg, 0);
}
 
static void skl_write_wm_values(struct drm_i915_private *dev_priv,
const struct skl_wm_values *new)
{
struct drm_device *dev = dev_priv->dev;
struct intel_crtc *crtc;
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
int i, level, max_level = ilk_wm_max_level(dev);
enum pipe pipe = crtc->pipe;
 
if (!new->dirty[pipe])
continue;
 
I915_WRITE(PIPE_WM_LINETIME(pipe), new->wm_linetime[pipe]);
 
for (level = 0; level <= max_level; level++) {
for (i = 0; i < intel_num_planes(crtc); i++)
I915_WRITE(PLANE_WM(pipe, i, level),
new->plane[pipe][i][level]);
I915_WRITE(CUR_WM(pipe, level),
new->cursor[pipe][level]);
}
for (i = 0; i < intel_num_planes(crtc); i++)
I915_WRITE(PLANE_WM_TRANS(pipe, i),
new->plane_trans[pipe][i]);
I915_WRITE(CUR_WM_TRANS(pipe), new->cursor_trans[pipe]);
 
for (i = 0; i < intel_num_planes(crtc); i++)
skl_ddb_entry_write(dev_priv,
PLANE_BUF_CFG(pipe, i),
&new->ddb.plane[pipe][i]);
 
skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe),
&new->ddb.cursor[pipe]);
}
}
 
/*
* When setting up a new DDB allocation arrangement, we need to correctly
* sequence the times at which the new allocations for the pipes are taken into
* account or we'll have pipes fetching from space previously allocated to
* another pipe.
*
* Roughly the sequence looks like:
* 1. re-allocate the pipe(s) with the allocation being reduced and not
* overlapping with a previous light-up pipe (another way to put it is:
* pipes with their new allocation strickly included into their old ones).
* 2. re-allocate the other pipes that get their allocation reduced
* 3. allocate the pipes having their allocation increased
*
* Steps 1. and 2. are here to take care of the following case:
* - Initially DDB looks like this:
* | B | C |
* - enable pipe A.
* - pipe B has a reduced DDB allocation that overlaps with the old pipe C
* allocation
* | A | B | C |
*
* We need to sequence the re-allocation: C, B, A (and not B, C, A).
*/
 
static void
skl_wm_flush_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, int pass)
{
struct drm_device *dev = dev_priv->dev;
int plane;
 
DRM_DEBUG_KMS("flush pipe %c (pass %d)\n", pipe_name(pipe), pass);
 
for_each_plane(pipe, plane) {
I915_WRITE(PLANE_SURF(pipe, plane),
I915_READ(PLANE_SURF(pipe, plane)));
}
I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe)));
}
 
static bool
skl_ddb_allocation_included(const struct skl_ddb_allocation *old,
const struct skl_ddb_allocation *new,
enum pipe pipe)
{
uint16_t old_size, new_size;
 
old_size = skl_ddb_entry_size(&old->pipe[pipe]);
new_size = skl_ddb_entry_size(&new->pipe[pipe]);
 
return old_size != new_size &&
new->pipe[pipe].start >= old->pipe[pipe].start &&
new->pipe[pipe].end <= old->pipe[pipe].end;
}
 
static void skl_flush_wm_values(struct drm_i915_private *dev_priv,
struct skl_wm_values *new_values)
{
struct drm_device *dev = dev_priv->dev;
struct skl_ddb_allocation *cur_ddb, *new_ddb;
bool reallocated[I915_MAX_PIPES] = {false, false, false};
struct intel_crtc *crtc;
enum pipe pipe;
 
new_ddb = &new_values->ddb;
cur_ddb = &dev_priv->wm.skl_hw.ddb;
 
/*
* First pass: flush the pipes with the new allocation contained into
* the old space.
*
* We'll wait for the vblank on those pipes to ensure we can safely
* re-allocate the freed space without this pipe fetching from it.
*/
for_each_intel_crtc(dev, crtc) {
if (!crtc->active)
continue;
 
pipe = crtc->pipe;
 
if (!skl_ddb_allocation_included(cur_ddb, new_ddb, pipe))
continue;
 
skl_wm_flush_pipe(dev_priv, pipe, 1);
intel_wait_for_vblank(dev, pipe);
 
reallocated[pipe] = true;
}
 
 
/*
* Second pass: flush the pipes that are having their allocation
* reduced, but overlapping with a previous allocation.
*
* Here as well we need to wait for the vblank to make sure the freed
* space is not used anymore.
*/
for_each_intel_crtc(dev, crtc) {
if (!crtc->active)
continue;
 
pipe = crtc->pipe;
 
if (reallocated[pipe])
continue;
 
if (skl_ddb_entry_size(&new_ddb->pipe[pipe]) <
skl_ddb_entry_size(&cur_ddb->pipe[pipe])) {
skl_wm_flush_pipe(dev_priv, pipe, 2);
intel_wait_for_vblank(dev, pipe);
}
 
reallocated[pipe] = true;
}
 
/*
* Third pass: flush the pipes that got more space allocated.
*
* We don't need to actively wait for the update here, next vblank
* will just get more DDB space with the correct WM values.
*/
for_each_intel_crtc(dev, crtc) {
if (!crtc->active)
continue;
 
pipe = crtc->pipe;
 
/*
* At this point, only the pipes more space than before are
* left to re-allocate.
*/
if (reallocated[pipe])
continue;
 
skl_wm_flush_pipe(dev_priv, pipe, 3);
}
}
 
static bool skl_update_pipe_wm(struct drm_crtc *crtc,
struct skl_pipe_wm_parameters *params,
struct intel_wm_config *config,
struct skl_ddb_allocation *ddb, /* out */
struct skl_pipe_wm *pipe_wm /* out */)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
skl_compute_wm_pipe_parameters(crtc, params);
skl_allocate_pipe_ddb(crtc, config, params, ddb);
skl_compute_pipe_wm(crtc, ddb, params, pipe_wm);
 
if (!memcmp(&intel_crtc->wm.skl_active, pipe_wm, sizeof(*pipe_wm)))
return false;
 
intel_crtc->wm.skl_active = *pipe_wm;
return true;
}
 
static void skl_update_other_pipe_wm(struct drm_device *dev,
struct drm_crtc *crtc,
struct intel_wm_config *config,
struct skl_wm_values *r)
{
struct intel_crtc *intel_crtc;
struct intel_crtc *this_crtc = to_intel_crtc(crtc);
 
/*
* If the WM update hasn't changed the allocation for this_crtc (the
* crtc we are currently computing the new WM values for), other
* enabled crtcs will keep the same allocation and we don't need to
* recompute anything for them.
*/
if (!skl_ddb_allocation_changed(&r->ddb, this_crtc))
return;
 
/*
* Otherwise, because of this_crtc being freshly enabled/disabled, the
* other active pipes need new DDB allocation and WM values.
*/
list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
base.head) {
struct skl_pipe_wm_parameters params = {};
struct skl_pipe_wm pipe_wm = {};
bool wm_changed;
 
if (this_crtc->pipe == intel_crtc->pipe)
continue;
 
if (!intel_crtc->active)
continue;
 
wm_changed = skl_update_pipe_wm(&intel_crtc->base,
&params, config,
&r->ddb, &pipe_wm);
 
/*
* If we end up re-computing the other pipe WM values, it's
* because it was really needed, so we expect the WM values to
* be different.
*/
WARN_ON(!wm_changed);
 
skl_compute_wm_results(dev, &params, &pipe_wm, r, intel_crtc);
r->dirty[intel_crtc->pipe] = true;
}
}
 
static void skl_update_wm(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct skl_pipe_wm_parameters params = {};
struct skl_wm_values *results = &dev_priv->wm.skl_results;
struct skl_pipe_wm pipe_wm = {};
struct intel_wm_config config = {};
 
memset(results, 0, sizeof(*results));
 
skl_compute_wm_global_parameters(dev, &config);
 
if (!skl_update_pipe_wm(crtc, &params, &config,
&results->ddb, &pipe_wm))
return;
 
skl_compute_wm_results(dev, &params, &pipe_wm, results, intel_crtc);
results->dirty[intel_crtc->pipe] = true;
 
skl_update_other_pipe_wm(dev, crtc, &config, results);
skl_write_wm_values(dev_priv, results);
skl_flush_wm_values(dev_priv, results);
 
/* store the new configuration */
dev_priv->wm.skl_hw = *results;
}
 
static void
skl_update_sprite_wm(struct drm_plane *plane, struct drm_crtc *crtc,
uint32_t sprite_width, uint32_t sprite_height,
int pixel_size, bool enabled, bool scaled)
{
struct intel_plane *intel_plane = to_intel_plane(plane);
 
intel_plane->wm.enabled = enabled;
intel_plane->wm.scaled = scaled;
intel_plane->wm.horiz_pixels = sprite_width;
intel_plane->wm.vert_pixels = sprite_height;
intel_plane->wm.bytes_per_pixel = pixel_size;
 
skl_update_wm(crtc);
}
 
static void ilk_update_wm(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2799,6 → 3855,113
ilk_update_wm(crtc);
}
 
static void skl_pipe_wm_active_state(uint32_t val,
struct skl_pipe_wm *active,
bool is_transwm,
bool is_cursor,
int i,
int level)
{
bool is_enabled = (val & PLANE_WM_EN) != 0;
 
if (!is_transwm) {
if (!is_cursor) {
active->wm[level].plane_en[i] = is_enabled;
active->wm[level].plane_res_b[i] =
val & PLANE_WM_BLOCKS_MASK;
active->wm[level].plane_res_l[i] =
(val >> PLANE_WM_LINES_SHIFT) &
PLANE_WM_LINES_MASK;
} else {
active->wm[level].cursor_en = is_enabled;
active->wm[level].cursor_res_b =
val & PLANE_WM_BLOCKS_MASK;
active->wm[level].cursor_res_l =
(val >> PLANE_WM_LINES_SHIFT) &
PLANE_WM_LINES_MASK;
}
} else {
if (!is_cursor) {
active->trans_wm.plane_en[i] = is_enabled;
active->trans_wm.plane_res_b[i] =
val & PLANE_WM_BLOCKS_MASK;
active->trans_wm.plane_res_l[i] =
(val >> PLANE_WM_LINES_SHIFT) &
PLANE_WM_LINES_MASK;
} else {
active->trans_wm.cursor_en = is_enabled;
active->trans_wm.cursor_res_b =
val & PLANE_WM_BLOCKS_MASK;
active->trans_wm.cursor_res_l =
(val >> PLANE_WM_LINES_SHIFT) &
PLANE_WM_LINES_MASK;
}
}
}
 
static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct skl_wm_values *hw = &dev_priv->wm.skl_hw;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct skl_pipe_wm *active = &intel_crtc->wm.skl_active;
enum pipe pipe = intel_crtc->pipe;
int level, i, max_level;
uint32_t temp;
 
max_level = ilk_wm_max_level(dev);
 
hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
 
for (level = 0; level <= max_level; level++) {
for (i = 0; i < intel_num_planes(intel_crtc); i++)
hw->plane[pipe][i][level] =
I915_READ(PLANE_WM(pipe, i, level));
hw->cursor[pipe][level] = I915_READ(CUR_WM(pipe, level));
}
 
for (i = 0; i < intel_num_planes(intel_crtc); i++)
hw->plane_trans[pipe][i] = I915_READ(PLANE_WM_TRANS(pipe, i));
hw->cursor_trans[pipe] = I915_READ(CUR_WM_TRANS(pipe));
 
if (!intel_crtc_active(crtc))
return;
 
hw->dirty[pipe] = true;
 
active->linetime = hw->wm_linetime[pipe];
 
for (level = 0; level <= max_level; level++) {
for (i = 0; i < intel_num_planes(intel_crtc); i++) {
temp = hw->plane[pipe][i][level];
skl_pipe_wm_active_state(temp, active, false,
false, i, level);
}
temp = hw->cursor[pipe][level];
skl_pipe_wm_active_state(temp, active, false, true, i, level);
}
 
for (i = 0; i < intel_num_planes(intel_crtc); i++) {
temp = hw->plane_trans[pipe][i];
skl_pipe_wm_active_state(temp, active, true, false, i, 0);
}
 
temp = hw->cursor_trans[pipe];
skl_pipe_wm_active_state(temp, active, true, true, i, 0);
}
 
void skl_wm_get_hw_state(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb;
struct drm_crtc *crtc;
 
skl_ddb_get_hw_state(dev_priv, ddb);
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
skl_pipe_wm_get_hw_state(crtc);
}
 
static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
3307,7 → 4470,7
dev_priv->rps.min_freq_softlimit);
 
if (wait_for(((vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS))
& GENFREQSTATUS) == 0, 5))
& GENFREQSTATUS) == 0, 100))
DRM_ERROR("timed out waiting for Punit\n");
 
vlv_force_gfx_clock(dev_priv, false);
3356,10 → 4519,9
WARN_ON(val > dev_priv->rps.max_freq_softlimit);
WARN_ON(val < dev_priv->rps.min_freq_softlimit);
 
DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n",
vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
dev_priv->rps.cur_freq,
vlv_gpu_freq(dev_priv, val), val);
if (WARN_ONCE(IS_CHERRYVIEW(dev) && (val & 1),
"Odd GPU freq value\n"))
val &= ~1;
 
if (val != dev_priv->rps.cur_freq)
vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
3370,45 → 4532,13
trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv, val));
}
 
static void gen8_disable_rps_interrupts(struct drm_device *dev)
static void gen9_disable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
I915_WRITE(GEN6_PMINTRMSK, ~GEN8_PMINTR_REDIRECT_TO_NON_DISP);
I915_WRITE(GEN8_GT_IER(2), I915_READ(GEN8_GT_IER(2)) &
~dev_priv->pm_rps_events);
/* Complete PM interrupt masking here doesn't race with the rps work
* item again unmasking PM interrupts because that is using a different
* register (GEN8_GT_IMR(2)) to mask PM interrupts. The only risk is in
* leaving stale bits in GEN8_GT_IIR(2) and GEN8_GT_IMR(2) which
* gen8_enable_rps will clean up. */
 
spin_lock_irq(&dev_priv->irq_lock);
dev_priv->rps.pm_iir = 0;
spin_unlock_irq(&dev_priv->irq_lock);
 
I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events);
I915_WRITE(GEN6_RC_CONTROL, 0);
}
 
static void gen6_disable_rps_interrupts(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) &
~dev_priv->pm_rps_events);
/* Complete PM interrupt masking here doesn't race with the rps work
* item again unmasking PM interrupts because that is using a different
* register (PMIMR) to mask PM interrupts. The only risk is in leaving
* stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
 
spin_lock_irq(&dev_priv->irq_lock);
dev_priv->rps.pm_iir = 0;
spin_unlock_irq(&dev_priv->irq_lock);
 
I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events);
}
 
static void gen6_disable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
3415,11 → 4545,6
 
I915_WRITE(GEN6_RC_CONTROL, 0);
I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
 
if (IS_BROADWELL(dev))
gen8_disable_rps_interrupts(dev);
else
gen6_disable_rps_interrupts(dev);
}
 
static void cherryview_disable_rps(struct drm_device *dev)
3427,8 → 4552,6
struct drm_i915_private *dev_priv = dev->dev_private;
 
I915_WRITE(GEN6_RC_CONTROL, 0);
 
gen8_disable_rps_interrupts(dev);
}
 
static void valleyview_disable_rps(struct drm_device *dev)
3435,9 → 4558,13
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
/* we're doing forcewake before Disabling RC6,
* This what the BIOS expects when going into suspend */
gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
 
I915_WRITE(GEN6_RC_CONTROL, 0);
 
gen6_disable_rps_interrupts(dev);
gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
}
 
static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
3448,10 → 4575,15
else
mode = 0;
}
DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
if (HAS_RC6p(dev))
DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s RC6p %s RC6pp %s\n",
(mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
(mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
(mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
 
else
DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s\n",
(mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off");
}
 
static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
3468,7 → 4600,7
if (enable_rc6 >= 0) {
int mask;
 
if (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev))
if (HAS_RC6p(dev))
mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE |
INTEL_RC6pp_ENABLE;
else
3496,54 → 4628,92
return i915.enable_rc6;
}
 
static void gen8_enable_rps_interrupts(struct drm_device *dev)
static void gen6_init_rps_frequencies(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t rp_state_cap;
u32 ddcc_status = 0;
int ret;
 
spin_lock_irq(&dev_priv->irq_lock);
WARN_ON(dev_priv->rps.pm_iir);
gen8_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events);
spin_unlock_irq(&dev_priv->irq_lock);
}
 
static void gen6_enable_rps_interrupts(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
spin_lock_irq(&dev_priv->irq_lock);
WARN_ON(dev_priv->rps.pm_iir);
gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events);
spin_unlock_irq(&dev_priv->irq_lock);
}
 
static void parse_rp_state_cap(struct drm_i915_private *dev_priv, u32 rp_state_cap)
{
rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
/* All of these values are in units of 50MHz */
dev_priv->rps.cur_freq = 0;
/* static values from HW: RP0 < RPe < RP1 < RPn (min_freq) */
/* static values from HW: RP0 > RP1 > RPn (min_freq) */
dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff;
dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff;
dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
/* XXX: only BYT has a special efficient freq */
dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
/* hw_max = RP0 until we check for overclocking */
dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
 
dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
ret = sandybridge_pcode_read(dev_priv,
HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
&ddcc_status);
if (0 == ret)
dev_priv->rps.efficient_freq =
(ddcc_status >> 8) & 0xff;
}
 
/* Preserve min/max settings in case of re-init */
if (dev_priv->rps.max_freq_softlimit == 0)
dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
 
if (dev_priv->rps.min_freq_softlimit == 0)
dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
if (dev_priv->rps.min_freq_softlimit == 0) {
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
dev_priv->rps.min_freq_softlimit =
/* max(RPe, 450 MHz) */
max(dev_priv->rps.efficient_freq, (u8) 9);
else
dev_priv->rps.min_freq_softlimit =
dev_priv->rps.min_freq;
}
}
 
static void gen9_enable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
uint32_t rc6_mask = 0;
int unused;
 
/* 1a: Software RC state - RC0 */
I915_WRITE(GEN6_RC_STATE, 0);
 
/* 1b: Get forcewake during program sequence. Although the driver
* hasn't enabled a state yet where we need forcewake, BIOS may have.*/
gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
 
/* 2a: Disable RC states. */
I915_WRITE(GEN6_RC_CONTROL, 0);
 
/* 2b: Program RC6 thresholds.*/
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
for_each_ring(ring, dev_priv, unused)
I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
I915_WRITE(GEN6_RC_SLEEP, 0);
I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
 
/* 3a: Enable RC6 */
if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
DRM_INFO("RC6 %s\n", (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
"on" : "off");
I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
GEN6_RC_CTL_EI_MODE(1) |
rc6_mask);
 
gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
 
}
 
static void gen8_enable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
uint32_t rc6_mask = 0, rp_state_cap;
uint32_t rc6_mask = 0;
int unused;
 
/* 1a: Software RC state - RC0 */
3556,8 → 4726,8
/* 2a: Disable RC states. */
I915_WRITE(GEN6_RC_CONTROL, 0);
 
rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
parse_rp_state_cap(dev_priv, rp_state_cap);
/* Initialize rps frequencies */
gen6_init_rps_frequencies(dev);
 
/* 2b: Program RC6 thresholds.*/
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
3615,10 → 4785,9
 
/* 6: Ring frequency + overclocking (our driver does this later */
 
gen6_set_rps(dev, (I915_READ(GEN6_GT_PERF_STATUS) & 0xff00) >> 8);
dev_priv->rps.power = HIGH_POWER; /* force a reset */
gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
 
gen8_enable_rps_interrupts(dev);
 
gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
}
 
3626,8 → 4795,6
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring;
u32 rp_state_cap;
u32 gt_perf_status;
u32 rc6vids, pcu_mbox = 0, rc6_mask = 0;
u32 gtfifodbg;
int rc6_mode;
3651,11 → 4818,9
 
gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
 
rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
/* Initialize rps frequencies */
gen6_init_rps_frequencies(dev);
 
parse_rp_state_cap(dev_priv, rp_state_cap);
 
/* disable the counters and set deterministic thresholds */
I915_WRITE(GEN6_RC_CONTROL, 0);
 
3717,8 → 4882,6
dev_priv->rps.power = HIGH_POWER; /* force a reset */
gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
 
gen6_enable_rps_interrupts(dev);
 
rc6vids = 0;
ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
if (IS_GEN6(dev) && ret) {
3766,9 → 4929,9
* to use for memory access. We do this by specifying the IA frequency
* the PCU should use as a reference to determine the ring frequency.
*/
for (gpu_freq = dev_priv->rps.max_freq_softlimit; gpu_freq >= dev_priv->rps.min_freq_softlimit;
for (gpu_freq = dev_priv->rps.max_freq; gpu_freq >= dev_priv->rps.min_freq;
gpu_freq--) {
int diff = dev_priv->rps.max_freq_softlimit - gpu_freq;
int diff = dev_priv->rps.max_freq - gpu_freq;
unsigned int ia_freq = 0, ring_freq = 0;
 
if (INTEL_INFO(dev)->gen >= 8) {
3923,6 → 5086,7
 
pcbr = I915_READ(VLV_PCBR);
if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
paddr = (dev_priv->mm.stolen_base +
(gtt->stolen_size - pctx_size));
 
3929,6 → 5093,8
pctx_paddr = (paddr & (~4095));
I915_WRITE(VLV_PCBR, pctx_paddr);
}
 
DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
}
 
static void valleyview_setup_pctx(struct drm_device *dev)
3954,6 → 5120,8
goto out;
}
 
DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
 
/*
* From the Gunit register HAS:
* The Gfx driver is expected to program this register and ensure
3972,6 → 5140,7
I915_WRITE(VLV_PCBR, pctx_paddr);
 
out:
DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
dev_priv->vlv_pctx = pctx;
}
 
3989,11 → 5158,27
static void valleyview_init_gt_powersave(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 val;
 
valleyview_setup_pctx(dev);
 
mutex_lock(&dev_priv->rps.hw_lock);
 
val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
switch ((val >> 6) & 3) {
case 0:
case 1:
dev_priv->mem_freq = 800;
break;
case 2:
dev_priv->mem_freq = 1066;
break;
case 3:
dev_priv->mem_freq = 1333;
break;
}
DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
 
dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv);
dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
4028,11 → 5213,41
static void cherryview_init_gt_powersave(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 val;
 
cherryview_setup_pctx(dev);
 
mutex_lock(&dev_priv->rps.hw_lock);
 
mutex_lock(&dev_priv->dpio_lock);
val = vlv_cck_read(dev_priv, CCK_FUSE_REG);
mutex_unlock(&dev_priv->dpio_lock);
 
switch ((val >> 2) & 0x7) {
case 0:
case 1:
dev_priv->rps.cz_freq = 200;
dev_priv->mem_freq = 1600;
break;
case 2:
dev_priv->rps.cz_freq = 267;
dev_priv->mem_freq = 1600;
break;
case 3:
dev_priv->rps.cz_freq = 333;
dev_priv->mem_freq = 2000;
break;
case 4:
dev_priv->rps.cz_freq = 320;
dev_priv->mem_freq = 1600;
break;
case 5:
dev_priv->rps.cz_freq = 400;
dev_priv->mem_freq = 1600;
break;
}
DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
 
dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv);
dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
4054,6 → 5269,12
vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq),
dev_priv->rps.min_freq);
 
WARN_ONCE((dev_priv->rps.max_freq |
dev_priv->rps.efficient_freq |
dev_priv->rps.rp1_freq |
dev_priv->rps.min_freq) & 1,
"Odd GPU freq values\n");
 
/* Preserve min/max settings in case of re-init */
if (dev_priv->rps.max_freq_softlimit == 0)
dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
4111,8 → 5332,6
/* For now we assume BIOS is allocating and populating the PCBR */
pcbr = I915_READ(VLV_PCBR);
 
DRM_DEBUG_DRIVER("PCBR offset : 0x%x\n", pcbr);
 
/* 3: Enable RC6 */
if ((intel_enable_rc6(dev) & INTEL_RC6_ENABLE) &&
(pcbr >> VLV_PCBR_ADDR_SHIFT))
4142,7 → 5361,10
 
val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
 
DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
/* RPS code assumes GPLL is used */
WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
 
DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & GPLLENABLE ? "yes" : "no");
DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
 
dev_priv->rps.cur_freq = (val >> 8) & 0xff;
4156,8 → 5378,6
 
valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
 
gen8_enable_rps_interrupts(dev);
 
gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
}
 
4222,7 → 5442,10
 
val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
 
DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
/* RPS code assumes GPLL is used */
WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
 
DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & GPLLENABLE ? "yes" : "no");
DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
 
dev_priv->rps.cur_freq = (val >> 8) & 0xff;
4236,8 → 5459,6
 
valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
 
gen6_enable_rps_interrupts(dev);
 
gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
}
 
4984,6 → 6205,20
valleyview_cleanup_gt_powersave(dev);
}
 
static void gen6_suspend_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
// flush_delayed_work(&dev_priv->rps.delayed_resume_work);
 
/*
* TODO: disable RPS interrupts on GEN9+ too once RPS support
* is added for it.
*/
if (INTEL_INFO(dev)->gen < 9)
gen6_disable_rps_interrupts(dev);
}
 
/**
* intel_suspend_gt_powersave - suspend PM work and helper threads
* @dev: drm device
4996,13 → 6231,11
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
/* Interrupts should be disabled already to avoid re-arming. */
WARN_ON(intel_irqs_enabled(dev_priv));
if (INTEL_INFO(dev)->gen < 6)
return;
 
// flush_delayed_work(&dev_priv->rps.delayed_resume_work);
gen6_suspend_rps(dev);
 
cancel_work_sync(&dev_priv->rps.work);
 
/* Force GPU to min freq during suspend */
gen6_rps_idle(dev_priv);
}
5011,9 → 6244,6
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
/* Interrupts should be disabled already to avoid re-arming. */
WARN_ON(intel_irqs_enabled(dev_priv));
 
if (IS_IRONLAKE_M(dev)) {
ironlake_disable_drps(dev);
ironlake_disable_rc6(dev);
5021,12 → 6251,15
intel_suspend_gt_powersave(dev);
 
mutex_lock(&dev_priv->rps.hw_lock);
if (IS_CHERRYVIEW(dev))
if (INTEL_INFO(dev)->gen >= 9)
gen9_disable_rps(dev);
else if (IS_CHERRYVIEW(dev))
cherryview_disable_rps(dev);
else if (IS_VALLEYVIEW(dev))
valleyview_disable_rps(dev);
else
gen6_disable_rps(dev);
 
dev_priv->rps.enabled = false;
mutex_unlock(&dev_priv->rps.hw_lock);
}
5041,10 → 6274,19
 
mutex_lock(&dev_priv->rps.hw_lock);
 
/*
* TODO: reset/enable RPS interrupts on GEN9+ too, once RPS support is
* added for it.
*/
if (INTEL_INFO(dev)->gen < 9)
gen6_reset_rps_interrupts(dev);
 
if (IS_CHERRYVIEW(dev)) {
cherryview_enable_rps(dev);
} else if (IS_VALLEYVIEW(dev)) {
valleyview_enable_rps(dev);
} else if (INTEL_INFO(dev)->gen >= 9) {
gen9_enable_rps(dev);
} else if (IS_BROADWELL(dev)) {
gen8_enable_rps(dev);
__gen6_update_ring_freq(dev);
5053,6 → 6295,10
__gen6_update_ring_freq(dev);
}
dev_priv->rps.enabled = true;
 
if (INTEL_INFO(dev)->gen < 9)
gen6_enable_rps_interrupts(dev);
 
mutex_unlock(&dev_priv->rps.hw_lock);
 
intel_runtime_pm_put(dev_priv);
5091,8 → 6337,11
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (INTEL_INFO(dev)->gen < 6)
return;
 
gen6_suspend_rps(dev);
dev_priv->rps.enabled = false;
intel_enable_gt_powersave(dev);
}
 
static void ibx_init_clock_gating(struct drm_device *dev)
5112,7 → 6361,7
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
 
for_each_pipe(pipe) {
for_each_pipe(dev_priv, pipe) {
I915_WRITE(DSPCNTR(pipe),
I915_READ(DSPCNTR(pipe)) |
DISPPLANE_TRICKLE_FEED_DISABLE);
5227,7 → 6476,7
/* The below fixes the weird display corruption, a few pixels shifted
* downward, on (only) LVDS of some HP laptops with IVY.
*/
for_each_pipe(pipe) {
for_each_pipe(dev_priv, pipe) {
val = I915_READ(TRANS_CHICKEN2(pipe));
val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
5239,7 → 6488,7
I915_WRITE(TRANS_CHICKEN2(pipe), val);
}
/* WADP0ClockGatingDisable */
for_each_pipe(pipe) {
for_each_pipe(dev_priv, pipe) {
I915_WRITE(TRANS_CHICKEN1(pipe),
TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
}
5271,11 → 6520,6
I915_WRITE(_3D_CHICKEN,
_MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
 
/* WaSetupGtModeTdRowDispatch:snb */
if (IS_SNB_GT1(dev))
I915_WRITE(GEN6_GT_MODE,
_MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE));
 
/* WaDisable_RenderCache_OperationalFlush:snb */
I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
 
5288,7 → 6532,7
* to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
*/
I915_WRITE(GEN6_GT_MODE,
GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
_MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
 
ilk_init_lp_watermarks(dev);
 
5407,7 → 6651,7
}
}
 
static void gen8_init_clock_gating(struct drm_device *dev)
static void broadwell_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe pipe;
5416,41 → 6660,6
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
 
/* FIXME(BDW): Check all the w/a, some might only apply to
* pre-production hw. */
 
/* WaDisablePartialInstShootdown:bdw */
I915_WRITE(GEN8_ROW_CHICKEN,
_MASKED_BIT_ENABLE(PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE));
 
/* WaDisableThreadStallDopClockGating:bdw */
/* FIXME: Unclear whether we really need this on production bdw. */
I915_WRITE(GEN8_ROW_CHICKEN,
_MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE));
 
/*
* This GEN8_CENTROID_PIXEL_OPT_DIS W/A is only needed for
* pre-production hardware
*/
I915_WRITE(HALF_SLICE_CHICKEN3,
_MASKED_BIT_ENABLE(GEN8_CENTROID_PIXEL_OPT_DIS));
I915_WRITE(HALF_SLICE_CHICKEN3,
_MASKED_BIT_ENABLE(GEN8_SAMPLER_POWER_BYPASS_DIS));
I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_BWGTLB_DISABLE));
 
I915_WRITE(_3D_CHICKEN3,
_MASKED_BIT_ENABLE(_3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(2)));
 
I915_WRITE(COMMON_SLICE_CHICKEN2,
_MASKED_BIT_ENABLE(GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE));
 
I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
_MASKED_BIT_ENABLE(GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE));
 
/* WaDisableDopClockGating:bdw May not be needed for production */
I915_WRITE(GEN7_ROW_CHICKEN2,
_MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
 
/* WaSwitchSolVfFArbitrationPriority:bdw */
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
 
5459,20 → 6668,12
I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
 
/* WaPsrDPRSUnmaskVBlankInSRD:bdw */
for_each_pipe(pipe) {
for_each_pipe(dev_priv, pipe) {
I915_WRITE(CHICKEN_PIPESL_1(pipe),
I915_READ(CHICKEN_PIPESL_1(pipe)) |
BDW_DPRS_MASK_VBLANK_SRD);
}
 
/* Use Force Non-Coherent whenever executing a 3D context. This is a
* workaround for for a possible hang in the unlikely event a TLB
* invalidation occurs during a PSD flush.
*/
I915_WRITE(HDC_CHICKEN0,
I915_READ(HDC_CHICKEN0) |
_MASKED_BIT_ENABLE(HDC_FORCE_NON_COHERENT));
 
/* WaVSRefCountFullforceMissDisable:bdw */
/* WaDSRefCountFullforceMissDisable:bdw */
I915_WRITE(GEN7_FF_THREAD_MODE,
5479,17 → 6680,6
I915_READ(GEN7_FF_THREAD_MODE) &
~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
 
/*
* BSpec recommends 8x4 when MSAA is used,
* however in practice 16x4 seems fastest.
*
* Note that PS/WM thread counts depend on the WIZ hashing
* disable bit, which we don't touch here, but it's good
* to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
*/
I915_WRITE(GEN7_GT_MODE,
GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
 
I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
_MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
 
5497,9 → 6687,7
I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
 
/* Wa4x4STCOptimizationDisable:bdw */
I915_WRITE(CACHE_MODE_1,
_MASKED_BIT_ENABLE(GEN8_4x4_STC_OPTIMIZATION_DISABLE));
lpt_init_clock_gating(dev);
}
 
static void haswell_init_clock_gating(struct drm_device *dev)
5542,7 → 6730,7
* to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
*/
I915_WRITE(GEN7_GT_MODE,
GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
_MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
 
/* WaSwitchSolVfFArbitrationPriority:hsw */
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
5639,7 → 6827,7
* to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
*/
I915_WRITE(GEN7_GT_MODE,
GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
_MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
 
snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
snpcr &= ~GEN6_MBC_SNPCR_MASK;
5655,25 → 6843,7
static void valleyview_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 val;
 
mutex_lock(&dev_priv->rps.hw_lock);
val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
mutex_unlock(&dev_priv->rps.hw_lock);
switch ((val >> 6) & 3) {
case 0:
case 1:
dev_priv->mem_freq = 800;
break;
case 2:
dev_priv->mem_freq = 1066;
break;
case 3:
dev_priv->mem_freq = 1333;
break;
}
DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
 
I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
 
/* WaDisableEarlyCull:vlv */
5748,48 → 6918,11
static void cherryview_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 val;
 
mutex_lock(&dev_priv->rps.hw_lock);
val = vlv_punit_read(dev_priv, CCK_FUSE_REG);
mutex_unlock(&dev_priv->rps.hw_lock);
switch ((val >> 2) & 0x7) {
case 0:
case 1:
dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_200;
dev_priv->mem_freq = 1600;
break;
case 2:
dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_267;
dev_priv->mem_freq = 1600;
break;
case 3:
dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_333;
dev_priv->mem_freq = 2000;
break;
case 4:
dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_320;
dev_priv->mem_freq = 1600;
break;
case 5:
dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_400;
dev_priv->mem_freq = 1600;
break;
}
DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
 
I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
 
I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
 
/* WaDisablePartialInstShootdown:chv */
I915_WRITE(GEN8_ROW_CHICKEN,
_MASKED_BIT_ENABLE(PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE));
 
/* WaDisableThreadStallDopClockGating:chv */
I915_WRITE(GEN8_ROW_CHICKEN,
_MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE));
 
/* WaVSRefCountFullforceMissDisable:chv */
/* WaDSRefCountFullforceMissDisable:chv */
I915_WRITE(GEN7_FF_THREAD_MODE,
5807,24 → 6940,6
/* WaDisableSDEUnitClockGating:chv */
I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
 
/* WaDisableSamplerPowerBypass:chv (pre-production hw) */
I915_WRITE(HALF_SLICE_CHICKEN3,
_MASKED_BIT_ENABLE(GEN8_SAMPLER_POWER_BYPASS_DIS));
 
/* WaDisableGunitClockGating:chv (pre-production hw) */
I915_WRITE(VLV_GUNIT_CLOCK_GATE, I915_READ(VLV_GUNIT_CLOCK_GATE) |
GINT_DIS);
 
/* WaDisableFfDopClockGating:chv (pre-production hw) */
I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
_MASKED_BIT_ENABLE(GEN8_FF_DOP_CLOCK_GATE_DISABLE));
 
/* WaDisableDopClockGating:chv (pre-production hw) */
I915_WRITE(GEN7_ROW_CHICKEN2,
_MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE);
}
 
static void g4x_init_clock_gating(struct drm_device *dev)
5907,6 → 7022,9
 
/* On GEN3 we really need to make sure the ARB C3 LP bit is set */
I915_WRITE(MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
 
I915_WRITE(MI_ARB_STATE,
_MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
}
 
static void i85x_init_clock_gating(struct drm_device *dev)
5918,6 → 7036,9
/* interrupts should cause a wake up from C3 */
I915_WRITE(MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) |
_MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE));
 
I915_WRITE(MEM_MODE,
_MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE));
}
 
static void i830_init_clock_gating(struct drm_device *dev)
5925,6 → 7046,10
struct drm_i915_private *dev_priv = dev->dev_private;
 
I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
 
I915_WRITE(MEM_MODE,
_MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE) |
_MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE));
}
 
void intel_init_clock_gating(struct drm_device *dev)
5940,870 → 7065,22
lpt_suspend_hw(dev);
}
 
#define for_each_power_well(i, power_well, domain_mask, power_domains) \
for (i = 0; \
i < (power_domains)->power_well_count && \
((power_well) = &(power_domains)->power_wells[i]); \
i++) \
if ((power_well)->domains & (domain_mask))
 
#define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \
for (i = (power_domains)->power_well_count - 1; \
i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\
i--) \
if ((power_well)->domains & (domain_mask))
 
/**
* We should only use the power well if we explicitly asked the hardware to
* enable it, so check if it's enabled and also check if we've requested it to
* be enabled.
*/
static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
static void intel_init_fbc(struct drm_i915_private *dev_priv)
{
return I915_READ(HSW_PWR_WELL_DRIVER) ==
(HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
}
 
bool intel_display_power_enabled_unlocked(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
{
struct i915_power_domains *power_domains;
struct i915_power_well *power_well;
bool is_enabled;
int i;
 
if (dev_priv->pm.suspended)
return false;
 
power_domains = &dev_priv->power_domains;
 
is_enabled = true;
 
for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
if (power_well->always_on)
continue;
 
if (!power_well->hw_enabled) {
is_enabled = false;
break;
}
}
 
return is_enabled;
}
 
bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
{
struct i915_power_domains *power_domains;
bool ret;
 
power_domains = &dev_priv->power_domains;
 
mutex_lock(&power_domains->lock);
ret = intel_display_power_enabled_unlocked(dev_priv, domain);
mutex_unlock(&power_domains->lock);
 
return ret;
}
 
/*
* Starting with Haswell, we have a "Power Down Well" that can be turned off
* when not needed anymore. We have 4 registers that can request the power well
* to be enabled, and it will only be disabled if none of the registers is
* requesting it to be enabled.
*/
static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
 
/*
* After we re-enable the power well, if we touch VGA register 0x3d5
* we'll get unclaimed register interrupts. This stops after we write
* anything to the VGA MSR register. The vgacon module uses this
* register all the time, so if we unbind our driver and, as a
* consequence, bind vgacon, we'll get stuck in an infinite loop at
* console_unlock(). So make here we touch the VGA MSR register, making
* sure vgacon can keep working normally without triggering interrupts
* and error messages.
*/
// vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
// vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
 
if (IS_BROADWELL(dev))
gen8_irq_power_well_post_enable(dev_priv);
}
 
static void hsw_set_power_well(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well, bool enable)
{
bool is_enabled, enable_requested;
uint32_t tmp;
 
tmp = I915_READ(HSW_PWR_WELL_DRIVER);
is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
 
if (enable) {
if (!enable_requested)
I915_WRITE(HSW_PWR_WELL_DRIVER,
HSW_PWR_WELL_ENABLE_REQUEST);
 
if (!is_enabled) {
DRM_DEBUG_KMS("Enabling power well\n");
if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
HSW_PWR_WELL_STATE_ENABLED), 20))
DRM_ERROR("Timeout enabling power well\n");
}
 
hsw_power_well_post_enable(dev_priv);
} else {
if (enable_requested) {
I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
POSTING_READ(HSW_PWR_WELL_DRIVER);
DRM_DEBUG_KMS("Requesting to disable the power well\n");
}
}
}
 
static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
hsw_set_power_well(dev_priv, power_well, power_well->count > 0);
 
/*
* We're taking over the BIOS, so clear any requests made by it since
* the driver is in charge now.
*/
if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
I915_WRITE(HSW_PWR_WELL_BIOS, 0);
}
 
static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
hsw_set_power_well(dev_priv, power_well, true);
}
 
static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
hsw_set_power_well(dev_priv, power_well, false);
}
 
static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
}
 
static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
return true;
}
 
static void vlv_set_power_well(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well, bool enable)
{
enum punit_power_well power_well_id = power_well->data;
u32 mask;
u32 state;
u32 ctrl;
 
mask = PUNIT_PWRGT_MASK(power_well_id);
state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
PUNIT_PWRGT_PWR_GATE(power_well_id);
 
mutex_lock(&dev_priv->rps.hw_lock);
 
#define COND \
((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
 
if (COND)
goto out;
 
ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
ctrl &= ~mask;
ctrl |= state;
vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
 
if (wait_for(COND, 100))
DRM_ERROR("timout setting power well state %08x (%08x)\n",
state,
vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
 
#undef COND
 
out:
mutex_unlock(&dev_priv->rps.hw_lock);
}
 
static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
vlv_set_power_well(dev_priv, power_well, power_well->count > 0);
}
 
static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
vlv_set_power_well(dev_priv, power_well, true);
}
 
static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
vlv_set_power_well(dev_priv, power_well, false);
}
 
static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
int power_well_id = power_well->data;
bool enabled = false;
u32 mask;
u32 state;
u32 ctrl;
 
mask = PUNIT_PWRGT_MASK(power_well_id);
ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
 
mutex_lock(&dev_priv->rps.hw_lock);
 
state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
/*
* We only ever set the power-on and power-gate states, anything
* else is unexpected.
*/
WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
state != PUNIT_PWRGT_PWR_GATE(power_well_id));
if (state == ctrl)
enabled = true;
 
/*
* A transient state at this point would mean some unexpected party
* is poking at the power controls too.
*/
ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
WARN_ON(ctrl != state);
 
mutex_unlock(&dev_priv->rps.hw_lock);
 
return enabled;
}
 
static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
 
vlv_set_power_well(dev_priv, power_well, true);
 
spin_lock_irq(&dev_priv->irq_lock);
valleyview_enable_display_irqs(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
 
/*
* During driver initialization/resume we can avoid restoring the
* part of the HW/SW state that will be inited anyway explicitly.
*/
if (dev_priv->power_domains.initializing)
if (!HAS_FBC(dev_priv)) {
dev_priv->fbc.enabled = false;
return;
 
intel_hpd_init(dev_priv->dev);
 
i915_redisable_vga_power_on(dev_priv->dev);
}
 
static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
 
spin_lock_irq(&dev_priv->irq_lock);
valleyview_disable_display_irqs(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
 
vlv_set_power_well(dev_priv, power_well, false);
}
 
static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
 
/*
* Enable the CRI clock source so we can get at the
* display and the reference clock for VGA
* hotplug / manual detection.
*/
I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
 
vlv_set_power_well(dev_priv, power_well, true);
 
/*
* From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
* 6. De-assert cmn_reset/side_reset. Same as VLV X0.
* a. GUnit 0x2110 bit[0] set to 1 (def 0)
* b. The other bits such as sfr settings / modesel may all
* be set to 0.
*
* This should only be done on init and resume from S3 with
* both PLLs disabled, or we risk losing DPIO and PLL
* synchronization.
*/
I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
}
 
static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
struct drm_device *dev = dev_priv->dev;
enum pipe pipe;
 
WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
 
for_each_pipe(pipe)
assert_pll_disabled(dev_priv, pipe);
 
/* Assert common reset */
I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
 
vlv_set_power_well(dev_priv, power_well, false);
}
 
static void check_power_well_state(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
bool enabled = power_well->ops->is_enabled(dev_priv, power_well);
 
if (power_well->always_on || !i915.disable_power_well) {
if (!enabled)
goto mismatch;
 
return;
}
 
if (enabled != (power_well->count > 0))
goto mismatch;
 
return;
 
mismatch:
WARN(1, "state mismatch for '%s' (always_on %d hw state %d use-count %d disable_power_well %d\n",
power_well->name, power_well->always_on, enabled,
power_well->count, i915.disable_power_well);
}
 
void intel_display_power_get(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
{
struct i915_power_domains *power_domains;
struct i915_power_well *power_well;
int i;
 
intel_runtime_pm_get(dev_priv);
 
power_domains = &dev_priv->power_domains;
 
mutex_lock(&power_domains->lock);
 
for_each_power_well(i, power_well, BIT(domain), power_domains) {
if (!power_well->count++) {
DRM_DEBUG_KMS("enabling %s\n", power_well->name);
power_well->ops->enable(dev_priv, power_well);
power_well->hw_enabled = true;
}
 
check_power_well_state(dev_priv, power_well);
}
 
power_domains->domain_use_count[domain]++;
 
mutex_unlock(&power_domains->lock);
}
 
void intel_display_power_put(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
{
struct i915_power_domains *power_domains;
struct i915_power_well *power_well;
int i;
 
power_domains = &dev_priv->power_domains;
 
mutex_lock(&power_domains->lock);
 
WARN_ON(!power_domains->domain_use_count[domain]);
power_domains->domain_use_count[domain]--;
 
for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
WARN_ON(!power_well->count);
 
if (!--power_well->count && i915.disable_power_well) {
DRM_DEBUG_KMS("disabling %s\n", power_well->name);
power_well->hw_enabled = false;
power_well->ops->disable(dev_priv, power_well);
}
 
check_power_well_state(dev_priv, power_well);
}
 
mutex_unlock(&power_domains->lock);
 
intel_runtime_pm_put(dev_priv);
}
 
static struct i915_power_domains *hsw_pwr;
 
/* Display audio driver power well request */
int i915_request_power_well(void)
{
struct drm_i915_private *dev_priv;
 
if (!hsw_pwr)
return -ENODEV;
 
dev_priv = container_of(hsw_pwr, struct drm_i915_private,
power_domains);
intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
return 0;
}
EXPORT_SYMBOL_GPL(i915_request_power_well);
 
/* Display audio driver power well release */
int i915_release_power_well(void)
{
struct drm_i915_private *dev_priv;
 
if (!hsw_pwr)
return -ENODEV;
 
dev_priv = container_of(hsw_pwr, struct drm_i915_private,
power_domains);
intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
return 0;
}
EXPORT_SYMBOL_GPL(i915_release_power_well);
 
/*
* Private interface for the audio driver to get CDCLK in kHz.
*
* Caller must request power well using i915_request_power_well() prior to
* making the call.
*/
int i915_get_cdclk_freq(void)
{
struct drm_i915_private *dev_priv;
 
if (!hsw_pwr)
return -ENODEV;
 
dev_priv = container_of(hsw_pwr, struct drm_i915_private,
power_domains);
 
return intel_ddi_get_cdclk_freq(dev_priv);
}
EXPORT_SYMBOL_GPL(i915_get_cdclk_freq);
 
 
#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
 
#define HSW_ALWAYS_ON_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PIPE_A) | \
BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
BIT(POWER_DOMAIN_PORT_CRT) | \
BIT(POWER_DOMAIN_PLLS) | \
BIT(POWER_DOMAIN_INIT))
#define HSW_DISPLAY_POWER_DOMAINS ( \
(POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \
BIT(POWER_DOMAIN_INIT))
 
#define BDW_ALWAYS_ON_POWER_DOMAINS ( \
HSW_ALWAYS_ON_POWER_DOMAINS | \
BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
#define BDW_DISPLAY_POWER_DOMAINS ( \
(POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS) | \
BIT(POWER_DOMAIN_INIT))
 
#define VLV_ALWAYS_ON_POWER_DOMAINS BIT(POWER_DOMAIN_INIT)
#define VLV_DISPLAY_POWER_DOMAINS POWER_DOMAIN_MASK
 
#define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
BIT(POWER_DOMAIN_PORT_CRT) | \
BIT(POWER_DOMAIN_INIT))
 
#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
BIT(POWER_DOMAIN_INIT))
 
#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
BIT(POWER_DOMAIN_INIT))
 
#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
BIT(POWER_DOMAIN_INIT))
 
#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
BIT(POWER_DOMAIN_INIT))
 
static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
.sync_hw = i9xx_always_on_power_well_noop,
.enable = i9xx_always_on_power_well_noop,
.disable = i9xx_always_on_power_well_noop,
.is_enabled = i9xx_always_on_power_well_enabled,
};
 
static struct i915_power_well i9xx_always_on_power_well[] = {
{
.name = "always-on",
.always_on = 1,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
},
};
 
static const struct i915_power_well_ops hsw_power_well_ops = {
.sync_hw = hsw_power_well_sync_hw,
.enable = hsw_power_well_enable,
.disable = hsw_power_well_disable,
.is_enabled = hsw_power_well_enabled,
};
 
static struct i915_power_well hsw_power_wells[] = {
{
.name = "always-on",
.always_on = 1,
.domains = HSW_ALWAYS_ON_POWER_DOMAINS,
.ops = &i9xx_always_on_power_well_ops,
},
{
.name = "display",
.domains = HSW_DISPLAY_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
},
};
 
static struct i915_power_well bdw_power_wells[] = {
{
.name = "always-on",
.always_on = 1,
.domains = BDW_ALWAYS_ON_POWER_DOMAINS,
.ops = &i9xx_always_on_power_well_ops,
},
{
.name = "display",
.domains = BDW_DISPLAY_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
},
};
 
static const struct i915_power_well_ops vlv_display_power_well_ops = {
.sync_hw = vlv_power_well_sync_hw,
.enable = vlv_display_power_well_enable,
.disable = vlv_display_power_well_disable,
.is_enabled = vlv_power_well_enabled,
};
 
static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
.sync_hw = vlv_power_well_sync_hw,
.enable = vlv_dpio_cmn_power_well_enable,
.disable = vlv_dpio_cmn_power_well_disable,
.is_enabled = vlv_power_well_enabled,
};
 
static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
.sync_hw = vlv_power_well_sync_hw,
.enable = vlv_power_well_enable,
.disable = vlv_power_well_disable,
.is_enabled = vlv_power_well_enabled,
};
 
static struct i915_power_well vlv_power_wells[] = {
{
.name = "always-on",
.always_on = 1,
.domains = VLV_ALWAYS_ON_POWER_DOMAINS,
.ops = &i9xx_always_on_power_well_ops,
},
{
.name = "display",
.domains = VLV_DISPLAY_POWER_DOMAINS,
.data = PUNIT_POWER_WELL_DISP2D,
.ops = &vlv_display_power_well_ops,
},
{
.name = "dpio-tx-b-01",
.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
.ops = &vlv_dpio_power_well_ops,
.data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
},
{
.name = "dpio-tx-b-23",
.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
.ops = &vlv_dpio_power_well_ops,
.data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
},
{
.name = "dpio-tx-c-01",
.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
.ops = &vlv_dpio_power_well_ops,
.data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
},
{
.name = "dpio-tx-c-23",
.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
.ops = &vlv_dpio_power_well_ops,
.data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
},
{
.name = "dpio-common",
.domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
.data = PUNIT_POWER_WELL_DPIO_CMN_BC,
.ops = &vlv_dpio_cmn_power_well_ops,
},
};
 
static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
enum punit_power_well power_well_id)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_well *power_well;
int i;
 
for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
if (power_well->data == power_well_id)
return power_well;
}
 
return NULL;
}
 
#define set_power_wells(power_domains, __power_wells) ({ \
(power_domains)->power_wells = (__power_wells); \
(power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \
})
 
int intel_power_domains_init(struct drm_i915_private *dev_priv)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
 
mutex_init(&power_domains->lock);
 
/*
* The enabling order will be from lower to higher indexed wells,
* the disabling order is reversed.
*/
if (IS_HASWELL(dev_priv->dev)) {
set_power_wells(power_domains, hsw_power_wells);
hsw_pwr = power_domains;
} else if (IS_BROADWELL(dev_priv->dev)) {
set_power_wells(power_domains, bdw_power_wells);
hsw_pwr = power_domains;
} else if (IS_VALLEYVIEW(dev_priv->dev)) {
set_power_wells(power_domains, vlv_power_wells);
} else {
set_power_wells(power_domains, i9xx_always_on_power_well);
}
 
return 0;
}
 
void intel_power_domains_remove(struct drm_i915_private *dev_priv)
{
hsw_pwr = NULL;
}
 
static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_well *power_well;
int i;
 
mutex_lock(&power_domains->lock);
for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
power_well->ops->sync_hw(dev_priv, power_well);
power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
power_well);
}
mutex_unlock(&power_domains->lock);
}
 
static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
{
struct i915_power_well *cmn =
lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
struct i915_power_well *disp2d =
lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
 
/* nothing to do if common lane is already off */
if (!cmn->ops->is_enabled(dev_priv, cmn))
return;
 
/* If the display might be already active skip this */
if (disp2d->ops->is_enabled(dev_priv, disp2d) &&
I915_READ(DPIO_CTL) & DPIO_CMNRST)
return;
 
DRM_DEBUG_KMS("toggling display PHY side reset\n");
 
/* cmnlane needs DPLL registers */
disp2d->ops->enable(dev_priv, disp2d);
 
/*
* From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
* Need to assert and de-assert PHY SB reset by gating the
* common lane power, then un-gating it.
* Simply ungating isn't enough to reset the PHY enough to get
* ports and lanes running.
*/
cmn->ops->disable(dev_priv, cmn);
}
 
void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
struct i915_power_domains *power_domains = &dev_priv->power_domains;
 
power_domains->initializing = true;
 
if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
mutex_lock(&power_domains->lock);
vlv_cmnlane_wa(dev_priv);
mutex_unlock(&power_domains->lock);
}
 
/* For now, we need the power well to be always enabled. */
intel_display_set_init_power(dev_priv, true);
intel_power_domains_resume(dev_priv);
power_domains->initializing = false;
}
 
void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv)
{
intel_runtime_pm_get(dev_priv);
}
 
void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv)
{
intel_runtime_pm_put(dev_priv);
}
 
void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
struct device *device = &dev->pdev->dev;
 
if (!HAS_RUNTIME_PM(dev))
return;
 
// pm_runtime_get_sync(device);
WARN(dev_priv->pm.suspended, "Device still suspended.\n");
}
 
void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
struct device *device = &dev->pdev->dev;
 
if (!HAS_RUNTIME_PM(dev))
return;
 
WARN(dev_priv->pm.suspended, "Getting nosync-ref while suspended.\n");
// pm_runtime_get_noresume(device);
}
 
void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
struct device *device = &dev->pdev->dev;
 
if (!HAS_RUNTIME_PM(dev))
return;
 
}
 
void intel_init_runtime_pm(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
struct device *device = &dev->pdev->dev;
 
if (!HAS_RUNTIME_PM(dev))
return;
 
 
/*
* RPM depends on RC6 to save restore the GT HW context, so make RC6 a
* requirement.
*/
if (!intel_enable_rc6(dev)) {
DRM_INFO("RC6 disabled, disabling runtime PM support\n");
return;
}
 
 
}
 
void intel_fini_runtime_pm(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
struct device *device = &dev->pdev->dev;
 
if (!HAS_RUNTIME_PM(dev))
return;
 
if (!intel_enable_rc6(dev))
return;
 
}
 
/* Set up chip specific power management-related functions */
void intel_init_pm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (HAS_FBC(dev)) {
if (INTEL_INFO(dev)->gen >= 7) {
if (INTEL_INFO(dev_priv)->gen >= 7) {
dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
dev_priv->display.enable_fbc = gen7_enable_fbc;
dev_priv->display.disable_fbc = ironlake_disable_fbc;
} else if (INTEL_INFO(dev)->gen >= 5) {
} else if (INTEL_INFO(dev_priv)->gen >= 5) {
dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
dev_priv->display.enable_fbc = ironlake_enable_fbc;
dev_priv->display.disable_fbc = ironlake_disable_fbc;
} else if (IS_GM45(dev)) {
} else if (IS_GM45(dev_priv)) {
dev_priv->display.fbc_enabled = g4x_fbc_enabled;
dev_priv->display.enable_fbc = g4x_enable_fbc;
dev_priv->display.disable_fbc = g4x_disable_fbc;
6815,8 → 7092,17
/* This value was pulled out of someone's hat */
I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
}
 
dev_priv->fbc.enabled = dev_priv->display.fbc_enabled(dev_priv->dev);
}
 
/* Set up chip specific power management-related functions */
void intel_init_pm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
intel_init_fbc(dev_priv);
 
/* For cxsr */
if (IS_PINEVIEW(dev))
i915_pineview_get_mem_freq(dev);
6824,7 → 7110,13
i915_ironlake_get_mem_freq(dev);
 
/* For FIFO watermark updates */
if (HAS_PCH_SPLIT(dev)) {
if (INTEL_INFO(dev)->gen >= 9) {
skl_setup_wm_latency(dev);
 
dev_priv->display.init_clock_gating = gen9_init_clock_gating;
dev_priv->display.update_wm = skl_update_wm;
dev_priv->display.update_sprite_wm = skl_update_sprite_wm;
} else if (HAS_PCH_SPLIT(dev)) {
ilk_setup_wm_latency(dev);
 
if ((IS_GEN5(dev) && dev_priv->wm.pri_latency[1] &&
6847,13 → 7139,15
else if (IS_HASWELL(dev))
dev_priv->display.init_clock_gating = haswell_init_clock_gating;
else if (INTEL_INFO(dev)->gen == 8)
dev_priv->display.init_clock_gating = gen8_init_clock_gating;
dev_priv->display.init_clock_gating = broadwell_init_clock_gating;
} else if (IS_CHERRYVIEW(dev)) {
dev_priv->display.update_wm = valleyview_update_wm;
dev_priv->display.update_wm = cherryview_update_wm;
dev_priv->display.update_sprite_wm = valleyview_update_sprite_wm;
dev_priv->display.init_clock_gating =
cherryview_init_clock_gating;
} else if (IS_VALLEYVIEW(dev)) {
dev_priv->display.update_wm = valleyview_update_wm;
dev_priv->display.update_sprite_wm = valleyview_update_sprite_wm;
dev_priv->display.init_clock_gating =
valleyview_init_clock_gating;
} else if (IS_PINEVIEW(dev)) {
6903,7 → 7197,7
}
}
 
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val)
{
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
 
6913,6 → 7207,7
}
 
I915_WRITE(GEN6_PCODE_DATA, *val);
I915_WRITE(GEN6_PCODE_DATA1, 0);
I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
 
if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
6927,7 → 7222,7
return 0;
}
 
int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val)
{
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
 
6950,98 → 7245,66
return 0;
}
 
static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
static int vlv_gpu_freq_div(unsigned int czclk_freq)
{
int div;
 
/* 4 x czclk */
switch (dev_priv->mem_freq) {
case 800:
div = 10;
break;
case 1066:
div = 12;
break;
case 1333:
div = 16;
break;
switch (czclk_freq) {
case 200:
return 10;
case 267:
return 12;
case 320:
case 333:
return 16;
case 400:
return 20;
default:
return -1;
}
}
 
return DIV_ROUND_CLOSEST(dev_priv->mem_freq * (val + 6 - 0xbd), 4 * div);
static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
{
int div, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->mem_freq, 4);
 
div = vlv_gpu_freq_div(czclk_freq);
if (div < 0)
return div;
 
return DIV_ROUND_CLOSEST(czclk_freq * (val + 6 - 0xbd), div);
}
 
static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val)
{
int mul;
int mul, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->mem_freq, 4);
 
/* 4 x czclk */
switch (dev_priv->mem_freq) {
case 800:
mul = 10;
break;
case 1066:
mul = 12;
break;
case 1333:
mul = 16;
break;
default:
return -1;
}
mul = vlv_gpu_freq_div(czclk_freq);
if (mul < 0)
return mul;
 
return DIV_ROUND_CLOSEST(4 * mul * val, dev_priv->mem_freq) + 0xbd - 6;
return DIV_ROUND_CLOSEST(mul * val, czclk_freq) + 0xbd - 6;
}
 
static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
{
int div, freq;
int div, czclk_freq = dev_priv->rps.cz_freq;
 
switch (dev_priv->rps.cz_freq) {
case 200:
div = 5;
break;
case 267:
div = 6;
break;
case 320:
case 333:
case 400:
div = 8;
break;
default:
return -1;
}
div = vlv_gpu_freq_div(czclk_freq) / 2;
if (div < 0)
return div;
 
freq = (DIV_ROUND_CLOSEST((dev_priv->rps.cz_freq * val), 2 * div) / 2);
 
return freq;
return DIV_ROUND_CLOSEST(czclk_freq * val, 2 * div) / 2;
}
 
static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
{
int mul, opcode;
int mul, czclk_freq = dev_priv->rps.cz_freq;
 
switch (dev_priv->rps.cz_freq) {
case 200:
mul = 5;
break;
case 267:
mul = 6;
break;
case 320:
case 333:
case 400:
mul = 8;
break;
default:
return -1;
}
mul = vlv_gpu_freq_div(czclk_freq) / 2;
if (mul < 0)
return mul;
 
opcode = (DIV_ROUND_CLOSEST((val * 2 * mul), dev_priv->rps.cz_freq) * 2);
 
return opcode;
/* CHV needs even values */
return DIV_ROUND_CLOSEST(val * 2 * mul, czclk_freq) * 2;
}
 
int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val)
7078,5 → 7341,4
intel_gen6_powersave_work);
 
dev_priv->pm.suspended = false;
dev_priv->pm._irqs_disabled = false;
}
/drivers/video/drm/i915/intel_psr.c
0,0 → 1,481
/*
* Copyright © 2014 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
 
/**
* DOC: Panel Self Refresh (PSR/SRD)
*
* Since Haswell Display controller supports Panel Self-Refresh on display
* panels witch have a remote frame buffer (RFB) implemented according to PSR
* spec in eDP1.3. PSR feature allows the display to go to lower standby states
* when system is idle but display is on as it eliminates display refresh
* request to DDR memory completely as long as the frame buffer for that
* display is unchanged.
*
* Panel Self Refresh must be supported by both Hardware (source) and
* Panel (sink).
*
* PSR saves power by caching the framebuffer in the panel RFB, which allows us
* to power down the link and memory controller. For DSI panels the same idea
* is called "manual mode".
*
* The implementation uses the hardware-based PSR support which automatically
* enters/exits self-refresh mode. The hardware takes care of sending the
* required DP aux message and could even retrain the link (that part isn't
* enabled yet though). The hardware also keeps track of any frontbuffer
* changes to know when to exit self-refresh mode again. Unfortunately that
* part doesn't work too well, hence why the i915 PSR support uses the
* software frontbuffer tracking to make sure it doesn't miss a screen
* update. For this integration intel_psr_invalidate() and intel_psr_flush()
* get called by the frontbuffer tracking code. Note that because of locking
* issues the self-refresh re-enable code is done from a work queue, which
* must be correctly synchronized/cancelled when shutting down the pipe."
*/
 
#include <drm/drmP.h>
 
#include "intel_drv.h"
#include "i915_drv.h"
 
static bool is_edp_psr(struct intel_dp *intel_dp)
{
return intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
}
 
bool intel_psr_is_enabled(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (!HAS_PSR(dev))
return false;
 
return I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
}
 
static void intel_psr_write_vsc(struct intel_dp *intel_dp,
struct edp_vsc_psr *vsc_psr)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
u32 ctl_reg = HSW_TVIDEO_DIP_CTL(crtc->config.cpu_transcoder);
u32 data_reg = HSW_TVIDEO_DIP_VSC_DATA(crtc->config.cpu_transcoder);
uint32_t *data = (uint32_t *) vsc_psr;
unsigned int i;
 
/* As per BSPec (Pipe Video Data Island Packet), we need to disable
the video DIP being updated before program video DIP data buffer
registers for DIP being updated. */
I915_WRITE(ctl_reg, 0);
POSTING_READ(ctl_reg);
 
for (i = 0; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4) {
if (i < sizeof(struct edp_vsc_psr))
I915_WRITE(data_reg + i, *data++);
else
I915_WRITE(data_reg + i, 0);
}
 
I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW);
POSTING_READ(ctl_reg);
}
 
static void intel_psr_setup_vsc(struct intel_dp *intel_dp)
{
struct edp_vsc_psr psr_vsc;
 
/* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
memset(&psr_vsc, 0, sizeof(psr_vsc));
psr_vsc.sdp_header.HB0 = 0;
psr_vsc.sdp_header.HB1 = 0x7;
psr_vsc.sdp_header.HB2 = 0x2;
psr_vsc.sdp_header.HB3 = 0x8;
intel_psr_write_vsc(intel_dp, &psr_vsc);
}
 
static void intel_psr_enable_sink(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t aux_clock_divider;
int precharge = 0x3;
bool only_standby = false;
static const uint8_t aux_msg[] = {
[0] = DP_AUX_NATIVE_WRITE << 4,
[1] = DP_SET_POWER >> 8,
[2] = DP_SET_POWER & 0xff,
[3] = 1 - 1,
[4] = DP_SET_POWER_D0,
};
int i;
 
BUILD_BUG_ON(sizeof(aux_msg) > 20);
 
aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
 
if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
only_standby = true;
 
/* Enable PSR in sink */
if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby)
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE);
else
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
 
/* Setup AUX registers */
for (i = 0; i < sizeof(aux_msg); i += 4)
I915_WRITE(EDP_PSR_AUX_DATA1(dev) + i,
intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
 
I915_WRITE(EDP_PSR_AUX_CTL(dev),
DP_AUX_CH_CTL_TIME_OUT_400us |
(sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
(precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
(aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
}
 
static void intel_psr_enable_source(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t max_sleep_time = 0x1f;
uint32_t idle_frames = 1;
uint32_t val = 0x0;
const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
bool only_standby = false;
 
if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
only_standby = true;
 
if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby) {
val |= EDP_PSR_LINK_STANDBY;
val |= EDP_PSR_TP2_TP3_TIME_0us;
val |= EDP_PSR_TP1_TIME_0us;
val |= EDP_PSR_SKIP_AUX_EXIT;
val |= IS_BROADWELL(dev) ? BDW_PSR_SINGLE_FRAME : 0;
} else
val |= EDP_PSR_LINK_DISABLE;
 
I915_WRITE(EDP_PSR_CTL(dev), val |
(IS_BROADWELL(dev) ? 0 : link_entry_time) |
max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
EDP_PSR_ENABLE);
}
 
static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = dig_port->base.base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
lockdep_assert_held(&dev_priv->psr.lock);
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
 
dev_priv->psr.source_ok = false;
 
if (IS_HASWELL(dev) && dig_port->port != PORT_A) {
DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
return false;
}
 
if (!i915.enable_psr) {
DRM_DEBUG_KMS("PSR disable by flag\n");
return false;
}
 
/* Below limitations aren't valid for Broadwell */
if (IS_BROADWELL(dev))
goto out;
 
if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
S3D_ENABLE) {
DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
return false;
}
 
if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
return false;
}
 
out:
dev_priv->psr.source_ok = true;
return true;
}
 
static void intel_psr_do_enable(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
WARN_ON(dev_priv->psr.active);
lockdep_assert_held(&dev_priv->psr.lock);
 
/* Enable/Re-enable PSR on the host */
intel_psr_enable_source(intel_dp);
 
dev_priv->psr.active = true;
}
 
/**
* intel_psr_enable - Enable PSR
* @intel_dp: Intel DP
*
* This function can only be called after the pipe is fully trained and enabled.
*/
void intel_psr_enable(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (!HAS_PSR(dev)) {
DRM_DEBUG_KMS("PSR not supported on this platform\n");
return;
}
 
if (!is_edp_psr(intel_dp)) {
DRM_DEBUG_KMS("PSR not supported by this panel\n");
return;
}
 
mutex_lock(&dev_priv->psr.lock);
if (dev_priv->psr.enabled) {
DRM_DEBUG_KMS("PSR already in use\n");
goto unlock;
}
 
if (!intel_psr_match_conditions(intel_dp))
goto unlock;
 
dev_priv->psr.busy_frontbuffer_bits = 0;
 
intel_psr_setup_vsc(intel_dp);
 
/* Avoid continuous PSR exit by masking memup and hpd */
I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
 
/* Enable PSR on the panel */
intel_psr_enable_sink(intel_dp);
 
dev_priv->psr.enabled = intel_dp;
unlock:
mutex_unlock(&dev_priv->psr.lock);
}
 
/**
* intel_psr_disable - Disable PSR
* @intel_dp: Intel DP
*
* This function needs to be called before disabling pipe.
*/
void intel_psr_disable(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
mutex_lock(&dev_priv->psr.lock);
if (!dev_priv->psr.enabled) {
mutex_unlock(&dev_priv->psr.lock);
return;
}
 
if (dev_priv->psr.active) {
I915_WRITE(EDP_PSR_CTL(dev),
I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
 
/* Wait till PSR is idle */
if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
DRM_ERROR("Timed out waiting for PSR Idle State\n");
 
dev_priv->psr.active = false;
} else {
WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
}
 
dev_priv->psr.enabled = NULL;
mutex_unlock(&dev_priv->psr.lock);
 
cancel_delayed_work_sync(&dev_priv->psr.work);
}
 
static void intel_psr_work(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, typeof(*dev_priv), psr.work.work);
struct intel_dp *intel_dp = dev_priv->psr.enabled;
 
/* We have to make sure PSR is ready for re-enable
* otherwise it keeps disabled until next full enable/disable cycle.
* PSR might take some time to get fully disabled
* and be ready for re-enable.
*/
if (wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev_priv->dev)) &
EDP_PSR_STATUS_STATE_MASK) == 0, 50)) {
DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
return;
}
 
mutex_lock(&dev_priv->psr.lock);
intel_dp = dev_priv->psr.enabled;
 
if (!intel_dp)
goto unlock;
 
/*
* The delayed work can race with an invalidate hence we need to
* recheck. Since psr_flush first clears this and then reschedules we
* won't ever miss a flush when bailing out here.
*/
if (dev_priv->psr.busy_frontbuffer_bits)
goto unlock;
 
intel_psr_do_enable(intel_dp);
unlock:
mutex_unlock(&dev_priv->psr.lock);
}
 
static void intel_psr_exit(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (dev_priv->psr.active) {
u32 val = I915_READ(EDP_PSR_CTL(dev));
 
WARN_ON(!(val & EDP_PSR_ENABLE));
 
I915_WRITE(EDP_PSR_CTL(dev), val & ~EDP_PSR_ENABLE);
 
dev_priv->psr.active = false;
}
 
}
 
/**
* intel_psr_invalidate - Invalidade PSR
* @dev: DRM device
* @frontbuffer_bits: frontbuffer plane tracking bits
*
* Since the hardware frontbuffer tracking has gaps we need to integrate
* with the software frontbuffer tracking. This function gets called every
* time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
* disabled if the frontbuffer mask contains a buffer relevant to PSR.
*
* Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
*/
void intel_psr_invalidate(struct drm_device *dev,
unsigned frontbuffer_bits)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
enum pipe pipe;
 
mutex_lock(&dev_priv->psr.lock);
if (!dev_priv->psr.enabled) {
mutex_unlock(&dev_priv->psr.lock);
return;
}
 
crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
pipe = to_intel_crtc(crtc)->pipe;
 
intel_psr_exit(dev);
 
frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
 
dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
mutex_unlock(&dev_priv->psr.lock);
}
 
/**
* intel_psr_flush - Flush PSR
* @dev: DRM device
* @frontbuffer_bits: frontbuffer plane tracking bits
*
* Since the hardware frontbuffer tracking has gaps we need to integrate
* with the software frontbuffer tracking. This function gets called every
* time frontbuffer rendering has completed and flushed out to memory. PSR
* can be enabled again if no other frontbuffer relevant to PSR is dirty.
*
* Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
*/
void intel_psr_flush(struct drm_device *dev,
unsigned frontbuffer_bits)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
enum pipe pipe;
 
mutex_lock(&dev_priv->psr.lock);
if (!dev_priv->psr.enabled) {
mutex_unlock(&dev_priv->psr.lock);
return;
}
 
crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
pipe = to_intel_crtc(crtc)->pipe;
dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
 
/*
* On Haswell sprite plane updates don't result in a psr invalidating
* signal in the hardware. Which means we need to manually fake this in
* software for all flushes, not just when we've seen a preceding
* invalidation through frontbuffer rendering.
*/
if (IS_HASWELL(dev) &&
(frontbuffer_bits & INTEL_FRONTBUFFER_SPRITE(pipe)))
intel_psr_exit(dev);
 
if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
schedule_delayed_work(&dev_priv->psr.work,
msecs_to_jiffies(100));
mutex_unlock(&dev_priv->psr.lock);
}
 
/**
* intel_psr_init - Init basic PSR work and mutex.
* @dev: DRM device
*
* This function is called only once at driver load to initialize basic
* PSR stuff.
*/
void intel_psr_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
INIT_DELAYED_WORK(&dev_priv->psr.work, intel_psr_work);
mutex_init(&dev_priv->psr.lock);
}
/drivers/video/drm/i915/intel_renderstate.h
24,17 → 24,12
#ifndef _INTEL_RENDERSTATE_H
#define _INTEL_RENDERSTATE_H
 
#include <linux/types.h>
#include "i915_drv.h"
 
struct intel_renderstate_rodata {
const u32 *reloc;
const u32 *batch;
const u32 batch_items;
};
 
extern const struct intel_renderstate_rodata gen6_null_state;
extern const struct intel_renderstate_rodata gen7_null_state;
extern const struct intel_renderstate_rodata gen8_null_state;
extern const struct intel_renderstate_rodata gen9_null_state;
 
#define RO_RENDERSTATE(_g) \
const struct intel_renderstate_rodata gen ## _g ## _null_state = { \
/drivers/video/drm/i915/intel_renderstate_gen8.c
1,18 → 1,136
#include "intel_renderstate.h"
 
static const u32 gen8_null_state_relocs[] = {
0x00000048,
0x00000050,
0x00000060,
0x000003ec,
0x00000798,
0x000007a4,
0x000007ac,
0x000007bc,
-1,
};
 
static const u32 gen8_null_state_batch[] = {
0x7a000004,
0x01000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x69040000,
0x61020001,
0x78140000,
0x04000000,
0x7820000a,
0x00000000,
0x00000000,
0x80000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78130002,
0x00000000,
0x00000000,
0x02001808,
0x781f0002,
0x00000000,
0x00000000,
0x00000000,
0x78510009,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78100007,
0x00000000,
0x00000000,
0x00010000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x781b0007,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000800,
0x00000000,
0x78110008,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x781e0003,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x781d0007,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78120002,
0x00000000,
0x00000000,
0x00000000,
0x78500003,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x781c0002,
0x00000000,
0x00000000,
0x00000000,
0x780c0000,
0x00000000,
0x78520003,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78300000,
0x08010040,
0x78310000,
0x1e000000,
0x78320000,
0x1e000000,
0x78330000,
0x1e000000,
0x79190002,
0x00000000,
0x00000000,
0x00000000,
0x791a0002,
0x00000000,
0x00000000,
0x00000000,
0x791b0002,
0x00000000,
0x00000000,
0x00000000,
0x79120000,
0x00000000,
0x79130000,
23,48 → 141,435
0x00000000,
0x79160000,
0x00000000,
0x78150009,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78190009,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x781a0009,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78160009,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78170009,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78490001,
0x00000000,
0x00000000,
0x784a0000,
0x00000000,
0x784b0000,
0x00000004,
0x79170101,
0x00000000,
0x00000080,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x79180006,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x79180006,
0x20000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x79180006,
0x40000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x79180006,
0x60000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x6101000e,
0x00000001,
0x00000001, /* reloc */
0x00000000,
0x00000001,
0x00000000,
0x00000001, /* reloc */
0x00000000,
0x00000001, /* reloc */
0x00000000,
0x00000001,
0x00000000,
0x00000000,
0x00000001, /* reloc */
0x00000000,
0xfffff001,
0x00001001,
0xfffff001,
0x00001001,
0x78230000,
0x000006e0,
0x78210000,
0x00000700,
0x78300000,
0x08010040,
0x78330000,
0x08000000,
0x78310000,
0x08000000,
0x78320000,
0x08000000,
0x78240000,
0x00000641,
0x780e0000,
0x00000601,
0x00000001,
0x00001001,
0x61020001,
0x00000000,
0x00000000,
0x79000002,
0x00000000,
0x00000000,
0x00000000,
0x78050006,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x79040002,
0x00000000,
0x00000000,
0x00000000,
0x79040002,
0x40000000,
0x00000000,
0x00000000,
0x79040002,
0x80000000,
0x00000000,
0x00000000,
0x79040002,
0xc0000000,
0x00000000,
0x00000000,
0x79080001,
0x00000000,
0x00000000,
0x790a0001,
0x00000000,
0x00000000,
0x78060003,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78070003,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78040001,
0x00000000,
0x00000000,
0x79110000,
0x00000000,
0x780d0000,
0x00000000,
0x78180000,
0x00000001,
0x78520003,
0x79060000,
0x00000000,
0x7907001f,
0x00000000,
0x00000000,
0x00000000,
0x78190009,
0x00000000,
0x00000000,
0x00000000,
75,7 → 580,6
0x00000000,
0x00000000,
0x00000000,
0x781b0007,
0x00000000,
0x00000000,
0x00000000,
84,15 → 588,11
0x00000000,
0x00000000,
0x00000000,
0x78270000,
0x00000000,
0x782c0000,
0x00000000,
0x781c0002,
0x00000000,
0x00000000,
0x00000000,
0x78160009,
0x00000000,
0x00000000,
0x00000000,
99,11 → 599,11
0x00000000,
0x00000000,
0x00000000,
0x7902000f,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78110008,
0x00000000,
0x00000000,
0x00000000,
113,12 → 613,10
0x00000000,
0x00000000,
0x00000000,
0x78290000,
0x00000000,
0x782e0000,
0x00000000,
0x781a0009,
0x00000000,
0x790c000f,
0x00000000,
0x00000000,
0x00000000,
128,7 → 626,6
0x00000000,
0x00000000,
0x00000000,
0x781d0007,
0x00000000,
0x00000000,
0x00000000,
136,153 → 633,153
0x00000000,
0x00000000,
0x00000000,
0x780a0003,
0x00000000,
0x78280000,
0x00000000,
0x782d0000,
0x00000000,
0x78260000,
0x00000000,
0x782b0000,
0x78080083,
0x00004000,
0x00000000,
0x78150009,
0x00000000,
0x00000000,
0x04004000,
0x00000000,
0x00000000,
0x00000000,
0x08004000,
0x00000000,
0x00000000,
0x00000000,
0x0c004000,
0x00000000,
0x00000000,
0x78100007,
0x00000000,
0x10004000,
0x00000000,
0x00000000,
0x00000000,
0x14004000,
0x00000000,
0x00000000,
0x00000000,
0x18004000,
0x00000000,
0x781e0003,
0x00000000,
0x00000000,
0x1c004000,
0x00000000,
0x00000000,
0x78120002,
0x00000000,
0x20004000,
0x00000000,
0x00000000,
0x781f0002,
0x30400820,
0x00000000,
0x24004000,
0x00000000,
0x78510009,
0x00000000,
0x00000000,
0x28004000,
0x00000000,
0x00000000,
0x00000000,
0x2c004000,
0x00000000,
0x00000000,
0x00000000,
0x30004000,
0x00000000,
0x00000000,
0x78500003,
0x00210000,
0x00000000,
0x34004000,
0x00000000,
0x00000000,
0x78130002,
0x00000000,
0x38004000,
0x00000000,
0x00000000,
0x782a0000,
0x00000480,
0x782f0000,
0x00000540,
0x78140000,
0x00000800,
0x78170009,
0x00000000,
0x3c004000,
0x00000000,
0x00000000,
0x00000000,
0x40004000,
0x00000000,
0x00000000,
0x00000000,
0x44004000,
0x00000000,
0x00000000,
0x00000000,
0x7820000a,
0x00000580,
0x48004000,
0x00000000,
0x08080000,
0x00000000,
0x00000000,
0x1f000002,
0x00060000,
0x4c004000,
0x00000000,
0x00000000,
0x00000000,
0x50004000,
0x00000000,
0x784d0000,
0x40000000,
0x784f0000,
0x80000100,
0x780f0000,
0x00000740,
0x78050006,
0x00000000,
0x00000000,
0x54004000,
0x00000000,
0x00000000,
0x00000000,
0x58004000,
0x00000000,
0x00000000,
0x78070003,
0x00000000,
0x5c004000,
0x00000000,
0x00000000,
0x00000000,
0x78060003,
0x60004000,
0x00000000,
0x00000000,
0x00000000,
0x64004000,
0x00000000,
0x78040001,
0x00000000,
0x00000001,
0x79000002,
0xffffffff,
0x00000000,
0x68004000,
0x00000000,
0x78080003,
0x00006000,
0x000005e0, /* reloc */
0x00000000,
0x00000000,
0x78090005,
0x6c004000,
0x00000000,
0x00000000,
0x00000000,
0x70004000,
0x00000000,
0x00000000,
0x00000000,
0x74004000,
0x00000000,
0x00000000,
0x00000000,
0x78004000,
0x00000000,
0x00000000,
0x00000000,
0x7c004000,
0x00000000,
0x00000000,
0x00000000,
0x80004000,
0x00000000,
0x00000000,
0x00000000,
0x78090043,
0x02000000,
0x22220000,
0x02f60000,
0x11230000,
0x02850004,
0x11230000,
0x784b0000,
0x0000000f,
0x78490001,
0x00000000,
0x00000000,
0x7b000005,
0x00000000,
0x00000003,
0x00000000,
0x00000001,
0x00000000,
0x00000000,
0x05000000, /* cmds end */
0x00000000,
0x00000000,
0x00000000,
297,8 → 794,6
0x00000000,
0x00000000,
0x00000000,
0x000004c0, /* state start */
0x00000500,
0x00000000,
0x00000000,
0x00000000,
345,52 → 840,61
0x00000000,
0x00000000,
0x00000000,
0x680b0001,
0x78260000,
0x00000000,
0x78270000,
0x00000000,
0x78280000,
0x00000000,
0x00000092,
0x78290000,
0x00000000,
0x782a0000,
0x00000000,
0x780e0000,
0x00000dc1,
0x78240000,
0x00000e01,
0x784f0000,
0x80000100,
0x784d0000,
0x40000000,
0x782b0000,
0x00000000,
0x782c0000,
0x00000000,
0x782d0000,
0x00000000,
0x782e0000,
0x00000000,
0x782f0000,
0x00000000,
0x780f0000,
0x00000000,
0x78230000,
0x00000e60,
0x78210000,
0x00000e80,
0x7b000005,
0x00000004,
0x00000001,
0x00000000,
0x00000001,
0x00000000,
0x00000000,
0x05000000, /* cmds end */
0x00000000,
0x0060005a,
0x21403ae8,
0x3a0000c0,
0x008d0040,
0x0060005a,
0x21603ae8,
0x3a0000c0,
0x008d0080,
0x0060005a,
0x21803ae8,
0x3a0000d0,
0x008d0040,
0x0060005a,
0x21a03ae8,
0x3a0000d0,
0x008d0080,
0x02800031,
0x2e0022e8,
0x0e000140,
0x08840001,
0x05800031,
0x200022e0,
0x0e000e00,
0x90031000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000, /* state start */
0x00000000,
0x3f800000,
0x3f800000,
0x3f800000,
0x3f800000,
0x00000000,
0x00000000,
0x00000000,
410,38 → 914,6
0x00000000,
0x00000000,
0x00000000,
0x06200000,
0x00000002,
0x06200000,
0x00000002,
0x06200000,
0x00000002,
0x06200000,
0x00000002,
0x06200000,
0x00000002,
0x06200000,
0x00000002,
0x06200000,
0x00000002,
0x06200000,
0x00000002,
0x06200000,
0x00000002,
0x06200000,
0x00000002,
0x06200000,
0x00000002,
0x06200000,
0x00000002,
0x06200000,
0x00000002,
0x06200000,
0x00000002,
0x06200000,
0x00000002,
0x06200000,
0x00000002,
0x00000000,
0x00000000,
0x00000000,
449,8 → 921,6
0x00000000,
0x00000000,
0x00000000,
0xf99a130c,
0x799a130c,
0x00000000,
0x00000000,
0x00000000,
466,14 → 936,22
0x00000000,
0x00000000,
0x00000000,
0x3f800000,
0x00000000,
0x3f800000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000, /* state end */
};
 
/drivers/video/drm/i915/intel_renderstate_gen9.c
0,0 → 1,974
#include "intel_renderstate.h"
 
static const u32 gen9_null_state_relocs[] = {
0x000007a8,
0x000007b4,
0x000007bc,
0x000007cc,
-1,
};
 
static const u32 gen9_null_state_batch[] = {
0x7a000004,
0x01000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x69040300,
0x78140000,
0x04000000,
0x7820000a,
0x00000000,
0x00000000,
0x80000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78130002,
0x00000000,
0x00000000,
0x02001808,
0x781f0004,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78510009,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78100007,
0x00000000,
0x00000000,
0x00010000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x781b0007,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000800,
0x00000000,
0x78110008,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x781e0003,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x781d0009,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78120002,
0x00000000,
0x00000000,
0x00000000,
0x78500003,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x781c0002,
0x00000000,
0x00000000,
0x00000000,
0x780c0000,
0x00000000,
0x78520003,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78300000,
0x08010040,
0x78310000,
0x1e000000,
0x78320000,
0x1e000000,
0x78330000,
0x1e000000,
0x79190002,
0x00000000,
0x00000000,
0x00000000,
0x791a0002,
0x00000000,
0x00000000,
0x00000000,
0x791b0002,
0x00000000,
0x00000000,
0x00000000,
0x79120000,
0x00000000,
0x79130000,
0x00000000,
0x79140000,
0x00000000,
0x79150000,
0x00000000,
0x79160000,
0x00000000,
0x78150009,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78190009,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x781a0009,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78160009,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78170009,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78490001,
0x00000000,
0x00000000,
0x784a0000,
0x00000000,
0x784b0000,
0x00000004,
0x79170101,
0x00000000,
0x00000080,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x79180006,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x79180006,
0x20000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x79180006,
0x40000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x79180006,
0x60000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x61010011,
0x00000001, /* reloc */
0x00000000,
0x00000000,
0x00000001, /* reloc */
0x00000000,
0x00000001, /* reloc */
0x00000000,
0x00000001,
0x00000000,
0x00000001, /* reloc */
0x00000000,
0x00001001,
0x00001001,
0x00000001,
0x00001001,
0x00000000,
0x00000000,
0x00000000,
0x61020001,
0x00000000,
0x00000000,
0x79000002,
0x00000000,
0x00000000,
0x00000000,
0x78050006,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x79040002,
0x00000000,
0x00000000,
0x00000000,
0x79040002,
0x40000000,
0x00000000,
0x00000000,
0x79040002,
0x80000000,
0x00000000,
0x00000000,
0x79040002,
0xc0000000,
0x00000000,
0x00000000,
0x79080001,
0x00000000,
0x00000000,
0x790a0001,
0x00000000,
0x00000000,
0x78060003,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78070003,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78040001,
0x00000000,
0x00000000,
0x79110000,
0x00000000,
0x780d0000,
0x00000000,
0x79060000,
0x00000000,
0x7907001f,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x7902000f,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x790c000f,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x780a0003,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78080083,
0x00004000,
0x00000000,
0x00000000,
0x00000000,
0x04004000,
0x00000000,
0x00000000,
0x00000000,
0x08004000,
0x00000000,
0x00000000,
0x00000000,
0x0c004000,
0x00000000,
0x00000000,
0x00000000,
0x10004000,
0x00000000,
0x00000000,
0x00000000,
0x14004000,
0x00000000,
0x00000000,
0x00000000,
0x18004000,
0x00000000,
0x00000000,
0x00000000,
0x1c004000,
0x00000000,
0x00000000,
0x00000000,
0x20004000,
0x00000000,
0x00000000,
0x00000000,
0x24004000,
0x00000000,
0x00000000,
0x00000000,
0x28004000,
0x00000000,
0x00000000,
0x00000000,
0x2c004000,
0x00000000,
0x00000000,
0x00000000,
0x30004000,
0x00000000,
0x00000000,
0x00000000,
0x34004000,
0x00000000,
0x00000000,
0x00000000,
0x38004000,
0x00000000,
0x00000000,
0x00000000,
0x3c004000,
0x00000000,
0x00000000,
0x00000000,
0x40004000,
0x00000000,
0x00000000,
0x00000000,
0x44004000,
0x00000000,
0x00000000,
0x00000000,
0x48004000,
0x00000000,
0x00000000,
0x00000000,
0x4c004000,
0x00000000,
0x00000000,
0x00000000,
0x50004000,
0x00000000,
0x00000000,
0x00000000,
0x54004000,
0x00000000,
0x00000000,
0x00000000,
0x58004000,
0x00000000,
0x00000000,
0x00000000,
0x5c004000,
0x00000000,
0x00000000,
0x00000000,
0x60004000,
0x00000000,
0x00000000,
0x00000000,
0x64004000,
0x00000000,
0x00000000,
0x00000000,
0x68004000,
0x00000000,
0x00000000,
0x00000000,
0x6c004000,
0x00000000,
0x00000000,
0x00000000,
0x70004000,
0x00000000,
0x00000000,
0x00000000,
0x74004000,
0x00000000,
0x00000000,
0x00000000,
0x78004000,
0x00000000,
0x00000000,
0x00000000,
0x7c004000,
0x00000000,
0x00000000,
0x00000000,
0x80004000,
0x00000000,
0x00000000,
0x00000000,
0x78090043,
0x02000000,
0x22220000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x78550003,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x680b0001,
0x780e0000,
0x00000e01,
0x78240000,
0x00000e41,
0x784f0000,
0x80000100,
0x784d0000,
0x40000000,
0x782b0000,
0x00000000,
0x782c0000,
0x00000000,
0x782d0000,
0x00000000,
0x782e0000,
0x00000000,
0x782f0000,
0x00000000,
0x780f0000,
0x00000000,
0x78230000,
0x00000ea0,
0x78210000,
0x00000ec0,
0x78260000,
0x00000000,
0x78270000,
0x00000000,
0x78280000,
0x00000000,
0x78290000,
0x00000000,
0x782a0000,
0x00000000,
0x7b000005,
0x00000004,
0x00000001,
0x00000000,
0x00000001,
0x00000000,
0x00000000,
0x05000000, /* cmds end */
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000, /* state start */
0x00000000,
0x3f800000,
0x3f800000,
0x3f800000,
0x3f800000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000, /* state end */
};
 
RO_RENDERSTATE(9);
/drivers/video/drm/i915/intel_ringbuffer.c
33,14 → 33,24
#include "i915_trace.h"
#include "intel_drv.h"
 
/* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
* but keeps the logic simple. Indeed, the whole purpose of this macro is just
* to give some inclination as to some of the magic values used in the various
* workarounds!
*/
#define CACHELINE_BYTES 64
bool
intel_ring_initialized(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
 
static inline int __ring_space(int head, int tail, int size)
if (!dev)
return false;
 
if (i915.enable_execlists) {
struct intel_context *dctx = ring->default_context;
struct intel_ringbuffer *ringbuf = dctx->engine[ring->id].ringbuf;
 
return ringbuf->obj;
} else
return ring->buffer && ring->buffer->obj;
}
 
int __intel_ring_space(int head, int tail, int size)
{
int space = head - (tail + I915_RING_FREE_SPACE);
if (space < 0)
48,12 → 58,13
return space;
}
 
static inline int ring_space(struct intel_ringbuffer *ringbuf)
int intel_ring_space(struct intel_ringbuffer *ringbuf)
{
return __ring_space(ringbuf->head & HEAD_ADDR, ringbuf->tail, ringbuf->size);
return __intel_ring_space(ringbuf->head & HEAD_ADDR,
ringbuf->tail, ringbuf->size);
}
 
static bool intel_ring_stopped(struct intel_engine_cs *ring)
bool intel_ring_stopped(struct intel_engine_cs *ring)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
return dev_priv->gpu_error.stop_rings & intel_ring_flag(ring);
351,6 → 362,7
flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR;
/*
* TLB invalidate requires a post-sync write.
*/
357,6 → 369,8
flags |= PIPE_CONTROL_QW_WRITE;
flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
 
flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD;
 
/* Workaround: we must issue a pipe_control with CS-stall bit
* set before a pipe_control command that has the state cache
* invalidate bit set. */
433,7 → 447,14
return ret;
}
 
return gen8_emit_pipe_control(ring, flags, scratch_addr);
ret = gen8_emit_pipe_control(ring, flags, scratch_addr);
if (ret)
return ret;
 
if (!invalidate_domains && flush_domains)
return gen7_ring_fbc_flush(ring, FBC_REND_NUKE);
 
return 0;
}
 
static void ring_write_tail(struct intel_engine_cs *ring,
476,8 → 497,13
 
if (!IS_GEN2(ring->dev)) {
I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING));
if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) {
if (wait_for((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) {
DRM_ERROR("%s :timed out trying to stop ring\n", ring->name);
/* Sometimes we observe that the idle flag is not
* set even though the ring is empty. So double
* check before giving up.
*/
if (I915_READ_HEAD(ring) != I915_READ_TAIL(ring))
return false;
}
}
540,6 → 566,14
* also enforces ordering), otherwise the hw might lose the new ring
* register values. */
I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj));
 
/* WaClearRingBufHeadRegAtInit:ctg,elk */
if (I915_READ_HEAD(ring))
DRM_DEBUG("%s initialization failed [head=%08x], fudging\n",
ring->name, I915_READ_HEAD(ring));
I915_WRITE_HEAD(ring, 0);
(void)I915_READ_HEAD(ring);
 
I915_WRITE_CTL(ring,
((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES)
| RING_VALID);
558,10 → 592,9
goto out;
}
 
 
ringbuf->head = I915_READ_HEAD(ring);
ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
ringbuf->space = ring_space(ringbuf);
ringbuf->space = intel_ring_space(ringbuf);
ringbuf->last_retired_head = -1;
 
memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
572,9 → 605,26
return ret;
}
 
static int
init_pipe_control(struct intel_engine_cs *ring)
void
intel_fini_pipe_control(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
 
if (ring->scratch.obj == NULL)
return;
 
if (INTEL_INFO(dev)->gen >= 5) {
kunmap(sg_page(ring->scratch.obj->pages->sgl));
i915_gem_object_ggtt_unpin(ring->scratch.obj);
}
 
drm_gem_object_unreference(&ring->scratch.obj->base);
ring->scratch.obj = NULL;
}
 
int
intel_init_pipe_control(struct intel_engine_cs *ring)
{
int ret;
 
if (ring->scratch.obj)
596,7 → 646,7
goto err_unref;
 
ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(ring->scratch.obj);
ring->scratch.cpu_page = (void*)MapIoMem((addr_t)sg_page(ring->scratch.obj->pages->sgl),4096, PG_SW|0x100);
ring->scratch.cpu_page = kmap(sg_page(ring->scratch.obj->pages->sgl));
if (ring->scratch.cpu_page == NULL) {
ret = -ENOMEM;
goto err_unpin;
614,6 → 664,170
return ret;
}
 
static int intel_ring_workarounds_emit(struct intel_engine_cs *ring,
struct intel_context *ctx)
{
int ret, i;
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_workarounds *w = &dev_priv->workarounds;
 
if (WARN_ON(w->count == 0))
return 0;
 
ring->gpu_caches_dirty = true;
ret = intel_ring_flush_all_caches(ring);
if (ret)
return ret;
 
ret = intel_ring_begin(ring, (w->count * 2 + 2));
if (ret)
return ret;
 
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
for (i = 0; i < w->count; i++) {
intel_ring_emit(ring, w->reg[i].addr);
intel_ring_emit(ring, w->reg[i].value);
}
intel_ring_emit(ring, MI_NOOP);
 
intel_ring_advance(ring);
 
ring->gpu_caches_dirty = true;
ret = intel_ring_flush_all_caches(ring);
if (ret)
return ret;
 
DRM_DEBUG_DRIVER("Number of Workarounds emitted: %d\n", w->count);
 
return 0;
}
 
static int wa_add(struct drm_i915_private *dev_priv,
const u32 addr, const u32 mask, const u32 val)
{
const u32 idx = dev_priv->workarounds.count;
 
if (WARN_ON(idx >= I915_MAX_WA_REGS))
return -ENOSPC;
 
dev_priv->workarounds.reg[idx].addr = addr;
dev_priv->workarounds.reg[idx].value = val;
dev_priv->workarounds.reg[idx].mask = mask;
 
dev_priv->workarounds.count++;
 
return 0;
}
 
#define WA_REG(addr, mask, val) { \
const int r = wa_add(dev_priv, (addr), (mask), (val)); \
if (r) \
return r; \
}
 
#define WA_SET_BIT_MASKED(addr, mask) \
WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask))
 
#define WA_CLR_BIT_MASKED(addr, mask) \
WA_REG(addr, (mask), _MASKED_BIT_DISABLE(mask))
 
#define WA_SET_FIELD_MASKED(addr, mask, value) \
WA_REG(addr, mask, _MASKED_FIELD(mask, value))
 
#define WA_SET_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) | (mask))
#define WA_CLR_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) & ~(mask))
 
#define WA_WRITE(addr, val) WA_REG(addr, 0xffffffff, val)
 
static int bdw_init_workarounds(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
/* WaDisablePartialInstShootdown:bdw */
/* WaDisableThreadStallDopClockGating:bdw (pre-production) */
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE |
STALL_DOP_GATING_DISABLE);
 
/* WaDisableDopClockGating:bdw */
WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
DOP_CLOCK_GATING_DISABLE);
 
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
GEN8_SAMPLER_POWER_BYPASS_DIS);
 
/* Use Force Non-Coherent whenever executing a 3D context. This is a
* workaround for for a possible hang in the unlikely event a TLB
* invalidation occurs during a PSD flush.
*/
/* WaDisableFenceDestinationToSLM:bdw (GT3 pre-production) */
WA_SET_BIT_MASKED(HDC_CHICKEN0,
HDC_FORCE_NON_COHERENT |
(IS_BDW_GT3(dev) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
 
/* Wa4x4STCOptimizationDisable:bdw */
WA_SET_BIT_MASKED(CACHE_MODE_1,
GEN8_4x4_STC_OPTIMIZATION_DISABLE);
 
/*
* BSpec recommends 8x4 when MSAA is used,
* however in practice 16x4 seems fastest.
*
* Note that PS/WM thread counts depend on the WIZ hashing
* disable bit, which we don't touch here, but it's good
* to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
*/
WA_SET_FIELD_MASKED(GEN7_GT_MODE,
GEN6_WIZ_HASHING_MASK,
GEN6_WIZ_HASHING_16x4);
 
return 0;
}
 
static int chv_init_workarounds(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
/* WaDisablePartialInstShootdown:chv */
/* WaDisableThreadStallDopClockGating:chv */
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE |
STALL_DOP_GATING_DISABLE);
 
/* Use Force Non-Coherent whenever executing a 3D context. This is a
* workaround for a possible hang in the unlikely event a TLB
* invalidation occurs during a PSD flush.
*/
/* WaForceEnableNonCoherent:chv */
/* WaHdcDisableFetchWhenMasked:chv */
WA_SET_BIT_MASKED(HDC_CHICKEN0,
HDC_FORCE_NON_COHERENT |
HDC_DONOT_FETCH_MEM_WHEN_MASKED);
 
return 0;
}
 
int init_workarounds_ring(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
WARN_ON(ring->id != RCS);
 
dev_priv->workarounds.count = 0;
 
if (IS_BROADWELL(dev))
return bdw_init_workarounds(ring);
 
if (IS_CHERRYVIEW(dev))
return chv_init_workarounds(ring);
 
return 0;
}
 
static int init_render_ring(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
632,7 → 846,7
*
* WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
*/
if (INTEL_INFO(dev)->gen >= 6)
if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 9)
I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
 
/* Required for the hardware to program scanline values for waiting */
648,7 → 862,7
_MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
 
if (INTEL_INFO(dev)->gen >= 5) {
ret = init_pipe_control(ring);
ret = intel_init_pipe_control(ring);
if (ret)
return ret;
}
669,7 → 883,7
if (HAS_L3_DPF(dev))
I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
 
return ret;
return init_workarounds_ring(ring);
}
 
static void render_ring_cleanup(struct intel_engine_cs *ring)
683,18 → 897,9
dev_priv->semaphore_obj = NULL;
}
 
if (ring->scratch.obj == NULL)
return;
 
if (INTEL_INFO(dev)->gen >= 5) {
// kunmap(sg_page(ring->scratch.obj->pages->sgl));
i915_gem_object_ggtt_unpin(ring->scratch.obj);
intel_fini_pipe_control(ring);
}
 
drm_gem_object_unreference(&ring->scratch.obj->base);
ring->scratch.obj = NULL;
}
 
static int gen8_rcs_signal(struct intel_engine_cs *signaller,
unsigned int num_dwords)
{
1015,7 → 1220,7
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
 
if (!dev->irq_enabled)
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return false;
 
spin_lock_irqsave(&dev_priv->irq_lock, flags);
1046,7 → 1251,7
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
 
if (!dev->irq_enabled)
if (!intel_irqs_enabled(dev_priv))
return false;
 
spin_lock_irqsave(&dev_priv->irq_lock, flags);
1083,7 → 1288,7
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
 
if (!dev->irq_enabled)
if (!intel_irqs_enabled(dev_priv))
return false;
 
spin_lock_irqsave(&dev_priv->irq_lock, flags);
1217,7 → 1422,7
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
 
if (!dev->irq_enabled)
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return false;
 
spin_lock_irqsave(&dev_priv->irq_lock, flags);
1260,7 → 1465,7
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
 
if (!dev->irq_enabled)
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return false;
 
spin_lock_irqsave(&dev_priv->irq_lock, flags);
1280,9 → 1485,6
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
 
if (!dev->irq_enabled)
return;
 
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (--ring->irq_refcount == 0) {
I915_WRITE_IMR(ring, ~0);
1298,7 → 1500,7
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
 
if (!dev->irq_enabled)
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return false;
 
spin_lock_irqsave(&dev_priv->irq_lock, flags);
1449,7 → 1651,7
if (obj == NULL)
return;
 
// kunmap(sg_page(obj->pages->sgl));
kunmap(sg_page(obj->pages->sgl));
i915_gem_object_ggtt_unpin(obj);
drm_gem_object_unreference(&obj->base);
ring->status_page.obj = NULL;
1497,7 → 1699,7
}
 
ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
ring->status_page.page_addr = (void*)MapIoMem((addr_t)sg_page(obj->pages->sgl),4096,PG_SW|0x100);
ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
memset(ring->status_page.page_addr, 0, PAGE_SIZE);
 
DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
1523,27 → 1725,51
return 0;
}
 
static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
{
if (!ringbuf->obj)
return;
 
iounmap(ringbuf->virtual_start);
ringbuf->virtual_start = NULL;
i915_gem_object_ggtt_unpin(ringbuf->obj);
drm_gem_object_unreference(&ringbuf->obj->base);
ringbuf->obj = NULL;
}
 
static int intel_alloc_ringbuffer_obj(struct drm_device *dev,
int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
struct intel_ringbuffer *ringbuf)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj;
struct drm_i915_gem_object *obj = ringbuf->obj;
int ret;
 
if (ringbuf->obj)
ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
if (ret)
return ret;
 
ret = i915_gem_object_set_to_gtt_domain(obj, true);
if (ret) {
i915_gem_object_ggtt_unpin(obj);
return ret;
}
 
ringbuf->virtual_start = ioremap_wc(dev_priv->gtt.mappable_base +
i915_gem_obj_ggtt_offset(obj), ringbuf->size);
if (ringbuf->virtual_start == NULL) {
i915_gem_object_ggtt_unpin(obj);
return -EINVAL;
}
 
return 0;
}
 
void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
{
drm_gem_object_unreference(&ringbuf->obj->base);
ringbuf->obj = NULL;
}
 
int intel_alloc_ringbuffer_obj(struct drm_device *dev,
struct intel_ringbuffer *ringbuf)
{
struct drm_i915_gem_object *obj;
 
obj = NULL;
if (!HAS_LLC(dev))
obj = i915_gem_object_create_stolen(dev, ringbuf->size);
1555,30 → 1781,9
/* mark ring buffers as read-only from GPU side by default */
obj->gt_ro = 1;
 
ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
if (ret)
goto err_unref;
ringbuf->obj = obj;
 
ret = i915_gem_object_set_to_gtt_domain(obj, true);
if (ret)
goto err_unpin;
 
ringbuf->virtual_start =
ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
ringbuf->size);
if (ringbuf->virtual_start == NULL) {
ret = -EINVAL;
goto err_unpin;
}
 
ringbuf->obj = obj;
return 0;
 
err_unpin:
i915_gem_object_ggtt_unpin(obj);
err_unref:
drm_gem_object_unreference(&obj->base);
return ret;
}
 
static int intel_init_ring_buffer(struct drm_device *dev,
1597,7 → 1802,9
ring->dev = dev;
INIT_LIST_HEAD(&ring->active_list);
INIT_LIST_HEAD(&ring->request_list);
INIT_LIST_HEAD(&ring->execlist_queue);
ringbuf->size = 32 * PAGE_SIZE;
ringbuf->ring = ring;
memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno));
 
init_waitqueue_head(&ring->irq_queue);
1613,12 → 1820,23
goto error;
}
 
if (ringbuf->obj == NULL) {
ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
if (ret) {
DRM_ERROR("Failed to allocate ringbuffer %s: %d\n", ring->name, ret);
DRM_ERROR("Failed to allocate ringbuffer %s: %d\n",
ring->name, ret);
goto error;
}
 
ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
if (ret) {
DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
ring->name, ret);
intel_destroy_ringbuffer_obj(ringbuf);
goto error;
}
}
 
/* Workaround an erratum on the i830 which causes a hang if
* the TAIL pointer points to within the last 2 cachelines
* of the buffer.
1645,15 → 1863,19
 
void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
{
struct drm_i915_private *dev_priv = to_i915(ring->dev);
struct intel_ringbuffer *ringbuf = ring->buffer;
struct drm_i915_private *dev_priv;
struct intel_ringbuffer *ringbuf;
 
if (!intel_ring_initialized(ring))
return;
 
dev_priv = to_i915(ring->dev);
ringbuf = ring->buffer;
 
intel_stop_ring_buffer(ring);
WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0);
 
intel_unpin_ringbuffer_obj(ringbuf);
intel_destroy_ringbuffer_obj(ringbuf);
ring->preallocated_lazy_request = NULL;
ring->outstanding_lazy_seqno = 0;
1680,13 → 1902,14
ringbuf->head = ringbuf->last_retired_head;
ringbuf->last_retired_head = -1;
 
ringbuf->space = ring_space(ringbuf);
ringbuf->space = intel_ring_space(ringbuf);
if (ringbuf->space >= n)
return 0;
}
 
list_for_each_entry(request, &ring->request_list, list) {
if (__ring_space(request->tail, ringbuf->tail, ringbuf->size) >= n) {
if (__intel_ring_space(request->tail, ringbuf->tail,
ringbuf->size) >= n) {
seqno = request->seqno;
break;
}
1703,7 → 1926,7
ringbuf->head = ringbuf->last_retired_head;
ringbuf->last_retired_head = -1;
 
ringbuf->space = ring_space(ringbuf);
ringbuf->space = intel_ring_space(ringbuf);
return 0;
}
 
1732,13 → 1955,12
trace_i915_ring_wait_begin(ring);
do {
ringbuf->head = I915_READ_HEAD(ring);
ringbuf->space = ring_space(ringbuf);
ringbuf->space = intel_ring_space(ringbuf);
if (ringbuf->space >= n) {
ret = 0;
break;
}
 
 
msleep(1);
 
ret = i915_gem_check_wedge(&dev_priv->gpu_error,
1773,7 → 1995,7
iowrite32(MI_NOOP, virt++);
 
ringbuf->tail = 0;
ringbuf->space = ring_space(ringbuf);
ringbuf->space = intel_ring_space(ringbuf);
 
return 0;
}
1978,9 → 2200,7
u64 offset, u32 len,
unsigned flags)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
bool ppgtt = dev_priv->mm.aliasing_ppgtt != NULL &&
!(flags & I915_DISPATCH_SECURE);
bool ppgtt = USES_PPGTT(ring->dev) && !(flags & I915_DISPATCH_SECURE);
int ret;
 
ret = intel_ring_begin(ring, 4);
2009,8 → 2229,9
return ret;
 
intel_ring_emit(ring,
MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW |
(flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_HSW));
MI_BATCH_BUFFER_START |
(flags & I915_DISPATCH_SECURE ?
0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW));
/* bit0-7 is the length on GEN6+ */
intel_ring_emit(ring, offset);
intel_ring_advance(ring);
2045,6 → 2266,7
u32 invalidate, u32 flush)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t cmd;
int ret;
 
2075,8 → 2297,12
}
intel_ring_advance(ring);
 
if (IS_GEN7(dev) && !invalidate && flush)
if (!invalidate && flush) {
if (IS_GEN7(dev))
return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN);
else if (IS_BROADWELL(dev))
dev_priv->fbc.need_sw_cache_clean = true;
}
 
return 0;
}
2109,6 → 2335,8
dev_priv->semaphore_obj = obj;
}
}
 
ring->init_context = intel_ring_workarounds_emit;
ring->add_request = gen6_add_request;
ring->flush = gen8_render_ring_flush;
ring->irq_get = gen8_ring_get_irq;
2218,93 → 2446,6
return intel_init_ring_buffer(dev, ring);
}
 
#if 0
int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring = &dev_priv->ring[RCS];
struct intel_ringbuffer *ringbuf = ring->buffer;
int ret;
 
if (ringbuf == NULL) {
ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
if (!ringbuf)
return -ENOMEM;
ring->buffer = ringbuf;
}
 
ring->name = "render ring";
ring->id = RCS;
ring->mmio_base = RENDER_RING_BASE;
 
if (INTEL_INFO(dev)->gen >= 6) {
/* non-kms not supported on gen6+ */
ret = -ENODEV;
goto err_ringbuf;
}
 
/* Note: gem is not supported on gen5/ilk without kms (the corresponding
* gem_init ioctl returns with -ENODEV). Hence we do not need to set up
* the special gen5 functions. */
ring->add_request = i9xx_add_request;
if (INTEL_INFO(dev)->gen < 4)
ring->flush = gen2_render_ring_flush;
else
ring->flush = gen4_render_ring_flush;
ring->get_seqno = ring_get_seqno;
ring->set_seqno = ring_set_seqno;
if (IS_GEN2(dev)) {
ring->irq_get = i8xx_ring_get_irq;
ring->irq_put = i8xx_ring_put_irq;
} else {
ring->irq_get = i9xx_ring_get_irq;
ring->irq_put = i9xx_ring_put_irq;
}
ring->irq_enable_mask = I915_USER_INTERRUPT;
ring->write_tail = ring_write_tail;
if (INTEL_INFO(dev)->gen >= 4)
ring->dispatch_execbuffer = i965_dispatch_execbuffer;
else if (IS_I830(dev) || IS_845G(dev))
ring->dispatch_execbuffer = i830_dispatch_execbuffer;
else
ring->dispatch_execbuffer = i915_dispatch_execbuffer;
ring->init = init_render_ring;
ring->cleanup = render_ring_cleanup;
 
ring->dev = dev;
INIT_LIST_HEAD(&ring->active_list);
INIT_LIST_HEAD(&ring->request_list);
 
ringbuf->size = size;
ringbuf->effective_size = ringbuf->size;
if (IS_I830(ring->dev) || IS_845G(ring->dev))
ringbuf->effective_size -= 2 * CACHELINE_BYTES;
 
ringbuf->virtual_start = ioremap_wc(start, size);
if (ringbuf->virtual_start == NULL) {
DRM_ERROR("can not ioremap virtual address for"
" ring buffer\n");
ret = -ENOMEM;
goto err_ringbuf;
}
 
if (!I915_NEED_GFX_HWS(dev)) {
ret = init_phys_status_page(ring);
if (ret)
goto err_vstart;
}
 
return 0;
 
err_vstart:
iounmap(ringbuf->virtual_start);
err_ringbuf:
kfree(ringbuf);
ring->buffer = NULL;
return ret;
}
#endif
 
int intel_init_bsd_ring_buffer(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
/drivers/video/drm/i915/intel_ringbuffer.h
5,6 → 5,13
 
#define I915_CMD_HASH_ORDER 9
 
/* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
* but keeps the logic simple. Indeed, the whole purpose of this macro is just
* to give some inclination as to some of the magic values used in the various
* workarounds!
*/
#define CACHELINE_BYTES 64
 
/*
* Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
* Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use"
90,6 → 97,15
struct drm_i915_gem_object *obj;
void __iomem *virtual_start;
 
struct intel_engine_cs *ring;
 
/*
* FIXME: This backpointer is an artifact of the history of how the
* execlist patches came into being. It will get removed once the basic
* code has landed.
*/
struct intel_context *FIXME_lrc_ctx;
 
u32 head;
u32 tail;
int space;
132,6 → 148,9
 
int (*init)(struct intel_engine_cs *ring);
 
int (*init_context)(struct intel_engine_cs *ring,
struct intel_context *ctx);
 
void (*write_tail)(struct intel_engine_cs *ring,
u32 value);
int __must_check (*flush)(struct intel_engine_cs *ring,
214,6 → 233,19
unsigned int num_dwords);
} semaphore;
 
/* Execlists */
spinlock_t execlist_lock;
struct list_head execlist_queue;
struct list_head execlist_retired_req_list;
u8 next_context_status_buffer;
u32 irq_keep_mask; /* bitmask for interrupts that should not be masked */
int (*emit_request)(struct intel_ringbuffer *ringbuf);
int (*emit_flush)(struct intel_ringbuffer *ringbuf,
u32 invalidate_domains,
u32 flush_domains);
int (*emit_bb_start)(struct intel_ringbuffer *ringbuf,
u64 offset, unsigned flags);
 
/**
* List of objects currently involved in rendering from the
* ringbuffer.
287,11 → 319,7
u32 (*get_cmd_length_mask)(u32 cmd_header);
};
 
static inline bool
intel_ring_initialized(struct intel_engine_cs *ring)
{
return ring->buffer && ring->buffer->obj;
}
bool intel_ring_initialized(struct intel_engine_cs *ring);
 
static inline unsigned
intel_ring_flag(struct intel_engine_cs *ring)
355,6 → 383,13
#define I915_GEM_HWS_SCRATCH_INDEX 0x30
#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
 
void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
struct intel_ringbuffer *ringbuf);
void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
int intel_alloc_ringbuffer_obj(struct drm_device *dev,
struct intel_ringbuffer *ringbuf);
 
void intel_stop_ring_buffer(struct intel_engine_cs *ring);
void intel_cleanup_ring_buffer(struct intel_engine_cs *ring);
 
372,6 → 407,9
struct intel_ringbuffer *ringbuf = ring->buffer;
ringbuf->tail &= ringbuf->size - 1;
}
int __intel_ring_space(int head, int tail, int size);
int intel_ring_space(struct intel_ringbuffer *ringbuf);
bool intel_ring_stopped(struct intel_engine_cs *ring);
void __intel_ring_advance(struct intel_engine_cs *ring);
 
int __must_check intel_ring_idle(struct intel_engine_cs *ring);
379,6 → 417,9
int intel_ring_flush_all_caches(struct intel_engine_cs *ring);
int intel_ring_invalidate_all_caches(struct intel_engine_cs *ring);
 
void intel_fini_pipe_control(struct intel_engine_cs *ring);
int intel_init_pipe_control(struct intel_engine_cs *ring);
 
int intel_init_render_ring_buffer(struct drm_device *dev);
int intel_init_bsd_ring_buffer(struct drm_device *dev);
int intel_init_bsd2_ring_buffer(struct drm_device *dev);
388,6 → 429,8
u64 intel_ring_get_active_head(struct intel_engine_cs *ring);
void intel_ring_setup_status_page(struct intel_engine_cs *ring);
 
int init_workarounds_ring(struct intel_engine_cs *ring);
 
static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
{
return ringbuf->tail;
405,7 → 448,4
ring->trace_irq_seqno = seqno;
}
 
/* DRI warts */
int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size);
 
#endif /* _INTEL_RINGBUFFER_H_ */
/drivers/video/drm/i915/intel_runtime_pm.c
0,0 → 1,1406
/*
* Copyright © 2012-2014 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Eugeni Dodonov <eugeni.dodonov@intel.com>
* Daniel Vetter <daniel.vetter@ffwll.ch>
*
*/
 
//#include <linux/pm_runtime.h>
 
#include "i915_drv.h"
#include "intel_drv.h"
#include <drm/i915_powerwell.h>
#include <linux/vgaarb.h>
 
/**
* DOC: runtime pm
*
* The i915 driver supports dynamic enabling and disabling of entire hardware
* blocks at runtime. This is especially important on the display side where
* software is supposed to control many power gates manually on recent hardware,
* since on the GT side a lot of the power management is done by the hardware.
* But even there some manual control at the device level is required.
*
* Since i915 supports a diverse set of platforms with a unified codebase and
* hardware engineers just love to shuffle functionality around between power
* domains there's a sizeable amount of indirection required. This file provides
* generic functions to the driver for grabbing and releasing references for
* abstract power domains. It then maps those to the actual power wells
* present for a given platform.
*/
 
static struct i915_power_domains *hsw_pwr;
 
#define for_each_power_well(i, power_well, domain_mask, power_domains) \
for (i = 0; \
i < (power_domains)->power_well_count && \
((power_well) = &(power_domains)->power_wells[i]); \
i++) \
if ((power_well)->domains & (domain_mask))
 
#define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \
for (i = (power_domains)->power_well_count - 1; \
i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\
i--) \
if ((power_well)->domains & (domain_mask))
 
/*
* We should only use the power well if we explicitly asked the hardware to
* enable it, so check if it's enabled and also check if we've requested it to
* be enabled.
*/
static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
return I915_READ(HSW_PWR_WELL_DRIVER) ==
(HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
}
 
/**
* __intel_display_power_is_enabled - unlocked check for a power domain
* @dev_priv: i915 device instance
* @domain: power domain to check
*
* This is the unlocked version of intel_display_power_is_enabled() and should
* only be used from error capture and recovery code where deadlocks are
* possible.
*
* Returns:
* True when the power domain is enabled, false otherwise.
*/
bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
{
struct i915_power_domains *power_domains;
struct i915_power_well *power_well;
bool is_enabled;
int i;
 
if (dev_priv->pm.suspended)
return false;
 
power_domains = &dev_priv->power_domains;
 
is_enabled = true;
 
for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
if (power_well->always_on)
continue;
 
if (!power_well->hw_enabled) {
is_enabled = false;
break;
}
}
 
return is_enabled;
}
 
/**
* intel_display_power_is_enabled - unlocked check for a power domain
* @dev_priv: i915 device instance
* @domain: power domain to check
*
* This function can be used to check the hw power domain state. It is mostly
* used in hardware state readout functions. Everywhere else code should rely
* upon explicit power domain reference counting to ensure that the hardware
* block is powered up before accessing it.
*
* Callers must hold the relevant modesetting locks to ensure that concurrent
* threads can't disable the power well while the caller tries to read a few
* registers.
*
* Returns:
* True when the power domain is enabled, false otherwise.
*/
bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
{
struct i915_power_domains *power_domains;
bool ret;
 
power_domains = &dev_priv->power_domains;
 
mutex_lock(&power_domains->lock);
ret = __intel_display_power_is_enabled(dev_priv, domain);
mutex_unlock(&power_domains->lock);
 
return ret;
}
 
/**
* intel_display_set_init_power - set the initial power domain state
* @dev_priv: i915 device instance
* @enable: whether to enable or disable the initial power domain state
*
* For simplicity our driver load/unload and system suspend/resume code assumes
* that all power domains are always enabled. This functions controls the state
* of this little hack. While the initial power domain state is enabled runtime
* pm is effectively disabled.
*/
void intel_display_set_init_power(struct drm_i915_private *dev_priv,
bool enable)
{
if (dev_priv->power_domains.init_power_on == enable)
return;
 
if (enable)
intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
else
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
 
dev_priv->power_domains.init_power_on = enable;
}
 
/*
* Starting with Haswell, we have a "Power Down Well" that can be turned off
* when not needed anymore. We have 4 registers that can request the power well
* to be enabled, and it will only be disabled if none of the registers is
* requesting it to be enabled.
*/
static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
 
/*
* After we re-enable the power well, if we touch VGA register 0x3d5
* we'll get unclaimed register interrupts. This stops after we write
* anything to the VGA MSR register. The vgacon module uses this
* register all the time, so if we unbind our driver and, as a
* consequence, bind vgacon, we'll get stuck in an infinite loop at
* console_unlock(). So make here we touch the VGA MSR register, making
* sure vgacon can keep working normally without triggering interrupts
* and error messages.
*/
vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
 
if (IS_BROADWELL(dev) || (INTEL_INFO(dev)->gen >= 9))
gen8_irq_power_well_post_enable(dev_priv);
}
 
static void hsw_set_power_well(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well, bool enable)
{
bool is_enabled, enable_requested;
uint32_t tmp;
 
tmp = I915_READ(HSW_PWR_WELL_DRIVER);
is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
 
if (enable) {
if (!enable_requested)
I915_WRITE(HSW_PWR_WELL_DRIVER,
HSW_PWR_WELL_ENABLE_REQUEST);
 
if (!is_enabled) {
DRM_DEBUG_KMS("Enabling power well\n");
if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
HSW_PWR_WELL_STATE_ENABLED), 20))
DRM_ERROR("Timeout enabling power well\n");
hsw_power_well_post_enable(dev_priv);
}
 
} else {
if (enable_requested) {
I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
POSTING_READ(HSW_PWR_WELL_DRIVER);
DRM_DEBUG_KMS("Requesting to disable the power well\n");
}
}
}
 
static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
hsw_set_power_well(dev_priv, power_well, power_well->count > 0);
 
/*
* We're taking over the BIOS, so clear any requests made by it since
* the driver is in charge now.
*/
if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
I915_WRITE(HSW_PWR_WELL_BIOS, 0);
}
 
static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
hsw_set_power_well(dev_priv, power_well, true);
}
 
static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
hsw_set_power_well(dev_priv, power_well, false);
}
 
static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
}
 
static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
return true;
}
 
static void vlv_set_power_well(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well, bool enable)
{
enum punit_power_well power_well_id = power_well->data;
u32 mask;
u32 state;
u32 ctrl;
 
mask = PUNIT_PWRGT_MASK(power_well_id);
state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
PUNIT_PWRGT_PWR_GATE(power_well_id);
 
mutex_lock(&dev_priv->rps.hw_lock);
 
#define COND \
((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
 
if (COND)
goto out;
 
ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
ctrl &= ~mask;
ctrl |= state;
vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
 
if (wait_for(COND, 100))
DRM_ERROR("timout setting power well state %08x (%08x)\n",
state,
vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
 
#undef COND
 
out:
mutex_unlock(&dev_priv->rps.hw_lock);
}
 
static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
vlv_set_power_well(dev_priv, power_well, power_well->count > 0);
}
 
static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
vlv_set_power_well(dev_priv, power_well, true);
}
 
static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
vlv_set_power_well(dev_priv, power_well, false);
}
 
static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
int power_well_id = power_well->data;
bool enabled = false;
u32 mask;
u32 state;
u32 ctrl;
 
mask = PUNIT_PWRGT_MASK(power_well_id);
ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
 
mutex_lock(&dev_priv->rps.hw_lock);
 
state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
/*
* We only ever set the power-on and power-gate states, anything
* else is unexpected.
*/
WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
state != PUNIT_PWRGT_PWR_GATE(power_well_id));
if (state == ctrl)
enabled = true;
 
/*
* A transient state at this point would mean some unexpected party
* is poking at the power controls too.
*/
ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
WARN_ON(ctrl != state);
 
mutex_unlock(&dev_priv->rps.hw_lock);
 
return enabled;
}
 
static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
 
vlv_set_power_well(dev_priv, power_well, true);
 
spin_lock_irq(&dev_priv->irq_lock);
valleyview_enable_display_irqs(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
 
/*
* During driver initialization/resume we can avoid restoring the
* part of the HW/SW state that will be inited anyway explicitly.
*/
if (dev_priv->power_domains.initializing)
return;
 
intel_hpd_init(dev_priv);
 
i915_redisable_vga_power_on(dev_priv->dev);
}
 
static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
 
spin_lock_irq(&dev_priv->irq_lock);
valleyview_disable_display_irqs(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
 
vlv_set_power_well(dev_priv, power_well, false);
 
vlv_power_sequencer_reset(dev_priv);
}
 
static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
 
/*
* Enable the CRI clock source so we can get at the
* display and the reference clock for VGA
* hotplug / manual detection.
*/
I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
 
vlv_set_power_well(dev_priv, power_well, true);
 
/*
* From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
* 6. De-assert cmn_reset/side_reset. Same as VLV X0.
* a. GUnit 0x2110 bit[0] set to 1 (def 0)
* b. The other bits such as sfr settings / modesel may all
* be set to 0.
*
* This should only be done on init and resume from S3 with
* both PLLs disabled, or we risk losing DPIO and PLL
* synchronization.
*/
I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
}
 
static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
enum pipe pipe;
 
WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
 
for_each_pipe(dev_priv, pipe)
assert_pll_disabled(dev_priv, pipe);
 
/* Assert common reset */
I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
 
vlv_set_power_well(dev_priv, power_well, false);
}
 
static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
enum dpio_phy phy;
 
WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
 
/*
* Enable the CRI clock source so we can get at the
* display and the reference clock for VGA
* hotplug / manual detection.
*/
if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
phy = DPIO_PHY0;
I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
DPLL_REFA_CLK_ENABLE_VLV);
I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
} else {
phy = DPIO_PHY1;
I915_WRITE(DPLL(PIPE_C), I915_READ(DPLL(PIPE_C)) |
DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
}
udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
vlv_set_power_well(dev_priv, power_well, true);
 
/* Poll for phypwrgood signal */
if (wait_for(I915_READ(DISPLAY_PHY_STATUS) & PHY_POWERGOOD(phy), 1))
DRM_ERROR("Display PHY %d is not power up\n", phy);
 
I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) |
PHY_COM_LANE_RESET_DEASSERT(phy));
}
 
static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
enum dpio_phy phy;
 
WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
 
if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
phy = DPIO_PHY0;
assert_pll_disabled(dev_priv, PIPE_A);
assert_pll_disabled(dev_priv, PIPE_B);
} else {
phy = DPIO_PHY1;
assert_pll_disabled(dev_priv, PIPE_C);
}
 
I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) &
~PHY_COM_LANE_RESET_DEASSERT(phy));
 
vlv_set_power_well(dev_priv, power_well, false);
}
 
static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
enum pipe pipe = power_well->data;
bool enabled;
u32 state, ctrl;
 
mutex_lock(&dev_priv->rps.hw_lock);
 
state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe);
/*
* We only ever set the power-on and power-gate states, anything
* else is unexpected.
*/
WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
enabled = state == DP_SSS_PWR_ON(pipe);
 
/*
* A transient state at this point would mean some unexpected party
* is poking at the power controls too.
*/
ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe);
WARN_ON(ctrl << 16 != state);
 
mutex_unlock(&dev_priv->rps.hw_lock);
 
return enabled;
}
 
static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well,
bool enable)
{
enum pipe pipe = power_well->data;
u32 state;
u32 ctrl;
 
state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
 
mutex_lock(&dev_priv->rps.hw_lock);
 
#define COND \
((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state)
 
if (COND)
goto out;
 
ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
ctrl &= ~DP_SSC_MASK(pipe);
ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl);
 
if (wait_for(COND, 100))
DRM_ERROR("timout setting power well state %08x (%08x)\n",
state,
vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ));
 
#undef COND
 
out:
mutex_unlock(&dev_priv->rps.hw_lock);
}
 
static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0);
}
 
static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
WARN_ON_ONCE(power_well->data != PIPE_A &&
power_well->data != PIPE_B &&
power_well->data != PIPE_C);
 
chv_set_pipe_power_well(dev_priv, power_well, true);
 
if (power_well->data == PIPE_A) {
spin_lock_irq(&dev_priv->irq_lock);
valleyview_enable_display_irqs(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
 
/*
* During driver initialization/resume we can avoid restoring the
* part of the HW/SW state that will be inited anyway explicitly.
*/
if (dev_priv->power_domains.initializing)
return;
 
intel_hpd_init(dev_priv);
 
i915_redisable_vga_power_on(dev_priv->dev);
}
}
 
static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
WARN_ON_ONCE(power_well->data != PIPE_A &&
power_well->data != PIPE_B &&
power_well->data != PIPE_C);
 
if (power_well->data == PIPE_A) {
spin_lock_irq(&dev_priv->irq_lock);
valleyview_disable_display_irqs(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
}
 
chv_set_pipe_power_well(dev_priv, power_well, false);
 
if (power_well->data == PIPE_A)
vlv_power_sequencer_reset(dev_priv);
}
 
static void check_power_well_state(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
bool enabled = power_well->ops->is_enabled(dev_priv, power_well);
 
if (power_well->always_on || !i915.disable_power_well) {
if (!enabled)
goto mismatch;
 
return;
}
 
if (enabled != (power_well->count > 0))
goto mismatch;
 
return;
 
mismatch:
WARN(1, "state mismatch for '%s' (always_on %d hw state %d use-count %d disable_power_well %d\n",
power_well->name, power_well->always_on, enabled,
power_well->count, i915.disable_power_well);
}
 
/**
* intel_display_power_get - grab a power domain reference
* @dev_priv: i915 device instance
* @domain: power domain to reference
*
* This function grabs a power domain reference for @domain and ensures that the
* power domain and all its parents are powered up. Therefore users should only
* grab a reference to the innermost power domain they need.
*
* Any power domain reference obtained by this function must have a symmetric
* call to intel_display_power_put() to release the reference again.
*/
void intel_display_power_get(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
{
struct i915_power_domains *power_domains;
struct i915_power_well *power_well;
int i;
 
intel_runtime_pm_get(dev_priv);
 
power_domains = &dev_priv->power_domains;
 
mutex_lock(&power_domains->lock);
 
for_each_power_well(i, power_well, BIT(domain), power_domains) {
if (!power_well->count++) {
DRM_DEBUG_KMS("enabling %s\n", power_well->name);
power_well->ops->enable(dev_priv, power_well);
power_well->hw_enabled = true;
}
 
check_power_well_state(dev_priv, power_well);
}
 
power_domains->domain_use_count[domain]++;
 
mutex_unlock(&power_domains->lock);
}
 
/**
* intel_display_power_put - release a power domain reference
* @dev_priv: i915 device instance
* @domain: power domain to reference
*
* This function drops the power domain reference obtained by
* intel_display_power_get() and might power down the corresponding hardware
* block right away if this is the last reference.
*/
void intel_display_power_put(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
{
struct i915_power_domains *power_domains;
struct i915_power_well *power_well;
int i;
 
power_domains = &dev_priv->power_domains;
 
mutex_lock(&power_domains->lock);
 
WARN_ON(!power_domains->domain_use_count[domain]);
power_domains->domain_use_count[domain]--;
 
for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
WARN_ON(!power_well->count);
 
if (!--power_well->count && i915.disable_power_well) {
DRM_DEBUG_KMS("disabling %s\n", power_well->name);
power_well->hw_enabled = false;
power_well->ops->disable(dev_priv, power_well);
}
 
check_power_well_state(dev_priv, power_well);
}
 
mutex_unlock(&power_domains->lock);
 
intel_runtime_pm_put(dev_priv);
}
 
#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
 
#define HSW_ALWAYS_ON_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PIPE_A) | \
BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
BIT(POWER_DOMAIN_PORT_CRT) | \
BIT(POWER_DOMAIN_PLLS) | \
BIT(POWER_DOMAIN_INIT))
#define HSW_DISPLAY_POWER_DOMAINS ( \
(POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \
BIT(POWER_DOMAIN_INIT))
 
#define BDW_ALWAYS_ON_POWER_DOMAINS ( \
HSW_ALWAYS_ON_POWER_DOMAINS | \
BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
#define BDW_DISPLAY_POWER_DOMAINS ( \
(POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS) | \
BIT(POWER_DOMAIN_INIT))
 
#define VLV_ALWAYS_ON_POWER_DOMAINS BIT(POWER_DOMAIN_INIT)
#define VLV_DISPLAY_POWER_DOMAINS POWER_DOMAIN_MASK
 
#define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
BIT(POWER_DOMAIN_PORT_CRT) | \
BIT(POWER_DOMAIN_INIT))
 
#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
BIT(POWER_DOMAIN_INIT))
 
#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
BIT(POWER_DOMAIN_INIT))
 
#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
BIT(POWER_DOMAIN_INIT))
 
#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
BIT(POWER_DOMAIN_INIT))
 
#define CHV_PIPE_A_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PIPE_A) | \
BIT(POWER_DOMAIN_INIT))
 
#define CHV_PIPE_B_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PIPE_B) | \
BIT(POWER_DOMAIN_INIT))
 
#define CHV_PIPE_C_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PIPE_C) | \
BIT(POWER_DOMAIN_INIT))
 
#define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
BIT(POWER_DOMAIN_INIT))
 
#define CHV_DPIO_CMN_D_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
BIT(POWER_DOMAIN_INIT))
 
#define CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
BIT(POWER_DOMAIN_INIT))
 
#define CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
BIT(POWER_DOMAIN_INIT))
 
static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
.sync_hw = i9xx_always_on_power_well_noop,
.enable = i9xx_always_on_power_well_noop,
.disable = i9xx_always_on_power_well_noop,
.is_enabled = i9xx_always_on_power_well_enabled,
};
 
static const struct i915_power_well_ops chv_pipe_power_well_ops = {
.sync_hw = chv_pipe_power_well_sync_hw,
.enable = chv_pipe_power_well_enable,
.disable = chv_pipe_power_well_disable,
.is_enabled = chv_pipe_power_well_enabled,
};
 
static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
.sync_hw = vlv_power_well_sync_hw,
.enable = chv_dpio_cmn_power_well_enable,
.disable = chv_dpio_cmn_power_well_disable,
.is_enabled = vlv_power_well_enabled,
};
 
static struct i915_power_well i9xx_always_on_power_well[] = {
{
.name = "always-on",
.always_on = 1,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
},
};
 
static const struct i915_power_well_ops hsw_power_well_ops = {
.sync_hw = hsw_power_well_sync_hw,
.enable = hsw_power_well_enable,
.disable = hsw_power_well_disable,
.is_enabled = hsw_power_well_enabled,
};
 
static struct i915_power_well hsw_power_wells[] = {
{
.name = "always-on",
.always_on = 1,
.domains = HSW_ALWAYS_ON_POWER_DOMAINS,
.ops = &i9xx_always_on_power_well_ops,
},
{
.name = "display",
.domains = HSW_DISPLAY_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
},
};
 
static struct i915_power_well bdw_power_wells[] = {
{
.name = "always-on",
.always_on = 1,
.domains = BDW_ALWAYS_ON_POWER_DOMAINS,
.ops = &i9xx_always_on_power_well_ops,
},
{
.name = "display",
.domains = BDW_DISPLAY_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
},
};
 
static const struct i915_power_well_ops vlv_display_power_well_ops = {
.sync_hw = vlv_power_well_sync_hw,
.enable = vlv_display_power_well_enable,
.disable = vlv_display_power_well_disable,
.is_enabled = vlv_power_well_enabled,
};
 
static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
.sync_hw = vlv_power_well_sync_hw,
.enable = vlv_dpio_cmn_power_well_enable,
.disable = vlv_dpio_cmn_power_well_disable,
.is_enabled = vlv_power_well_enabled,
};
 
static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
.sync_hw = vlv_power_well_sync_hw,
.enable = vlv_power_well_enable,
.disable = vlv_power_well_disable,
.is_enabled = vlv_power_well_enabled,
};
 
static struct i915_power_well vlv_power_wells[] = {
{
.name = "always-on",
.always_on = 1,
.domains = VLV_ALWAYS_ON_POWER_DOMAINS,
.ops = &i9xx_always_on_power_well_ops,
},
{
.name = "display",
.domains = VLV_DISPLAY_POWER_DOMAINS,
.data = PUNIT_POWER_WELL_DISP2D,
.ops = &vlv_display_power_well_ops,
},
{
.name = "dpio-tx-b-01",
.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
.ops = &vlv_dpio_power_well_ops,
.data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
},
{
.name = "dpio-tx-b-23",
.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
.ops = &vlv_dpio_power_well_ops,
.data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
},
{
.name = "dpio-tx-c-01",
.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
.ops = &vlv_dpio_power_well_ops,
.data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
},
{
.name = "dpio-tx-c-23",
.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
.ops = &vlv_dpio_power_well_ops,
.data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
},
{
.name = "dpio-common",
.domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
.data = PUNIT_POWER_WELL_DPIO_CMN_BC,
.ops = &vlv_dpio_cmn_power_well_ops,
},
};
 
static struct i915_power_well chv_power_wells[] = {
{
.name = "always-on",
.always_on = 1,
.domains = VLV_ALWAYS_ON_POWER_DOMAINS,
.ops = &i9xx_always_on_power_well_ops,
},
#if 0
{
.name = "display",
.domains = VLV_DISPLAY_POWER_DOMAINS,
.data = PUNIT_POWER_WELL_DISP2D,
.ops = &vlv_display_power_well_ops,
},
#endif
{
.name = "pipe-a",
/*
* FIXME: pipe A power well seems to be the new disp2d well.
* At least all registers seem to be housed there. Figure
* out if this a a temporary situation in pre-production
* hardware or a permanent state of affairs.
*/
.domains = CHV_PIPE_A_POWER_DOMAINS | VLV_DISPLAY_POWER_DOMAINS,
.data = PIPE_A,
.ops = &chv_pipe_power_well_ops,
},
#if 0
{
.name = "pipe-b",
.domains = CHV_PIPE_B_POWER_DOMAINS,
.data = PIPE_B,
.ops = &chv_pipe_power_well_ops,
},
{
.name = "pipe-c",
.domains = CHV_PIPE_C_POWER_DOMAINS,
.data = PIPE_C,
.ops = &chv_pipe_power_well_ops,
},
#endif
{
.name = "dpio-common-bc",
/*
* XXX: cmnreset for one PHY seems to disturb the other.
* As a workaround keep both powered on at the same
* time for now.
*/
.domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS,
.data = PUNIT_POWER_WELL_DPIO_CMN_BC,
.ops = &chv_dpio_cmn_power_well_ops,
},
{
.name = "dpio-common-d",
/*
* XXX: cmnreset for one PHY seems to disturb the other.
* As a workaround keep both powered on at the same
* time for now.
*/
.domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS,
.data = PUNIT_POWER_WELL_DPIO_CMN_D,
.ops = &chv_dpio_cmn_power_well_ops,
},
#if 0
{
.name = "dpio-tx-b-01",
.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS,
.ops = &vlv_dpio_power_well_ops,
.data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
},
{
.name = "dpio-tx-b-23",
.domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS,
.ops = &vlv_dpio_power_well_ops,
.data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
},
{
.name = "dpio-tx-c-01",
.domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
.ops = &vlv_dpio_power_well_ops,
.data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
},
{
.name = "dpio-tx-c-23",
.domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
.ops = &vlv_dpio_power_well_ops,
.data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
},
{
.name = "dpio-tx-d-01",
.domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS |
CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS,
.ops = &vlv_dpio_power_well_ops,
.data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_01,
},
{
.name = "dpio-tx-d-23",
.domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS |
CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS,
.ops = &vlv_dpio_power_well_ops,
.data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_23,
},
#endif
};
 
static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
enum punit_power_well power_well_id)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_well *power_well;
int i;
 
for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
if (power_well->data == power_well_id)
return power_well;
}
 
return NULL;
}
 
#define set_power_wells(power_domains, __power_wells) ({ \
(power_domains)->power_wells = (__power_wells); \
(power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \
})
 
/**
* intel_power_domains_init - initializes the power domain structures
* @dev_priv: i915 device instance
*
* Initializes the power domain structures for @dev_priv depending upon the
* supported platform.
*/
int intel_power_domains_init(struct drm_i915_private *dev_priv)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
 
mutex_init(&power_domains->lock);
 
/*
* The enabling order will be from lower to higher indexed wells,
* the disabling order is reversed.
*/
if (IS_HASWELL(dev_priv->dev)) {
set_power_wells(power_domains, hsw_power_wells);
hsw_pwr = power_domains;
} else if (IS_BROADWELL(dev_priv->dev)) {
set_power_wells(power_domains, bdw_power_wells);
hsw_pwr = power_domains;
} else if (IS_CHERRYVIEW(dev_priv->dev)) {
set_power_wells(power_domains, chv_power_wells);
} else if (IS_VALLEYVIEW(dev_priv->dev)) {
set_power_wells(power_domains, vlv_power_wells);
} else {
set_power_wells(power_domains, i9xx_always_on_power_well);
}
 
return 0;
}
 
static void intel_runtime_pm_disable(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
struct device *device = &dev->pdev->dev;
 
if (!HAS_RUNTIME_PM(dev))
return;
 
if (!intel_enable_rc6(dev))
return;
 
/* Make sure we're not suspended first. */
pm_runtime_get_sync(device);
pm_runtime_disable(device);
}
 
/**
* intel_power_domains_fini - finalizes the power domain structures
* @dev_priv: i915 device instance
*
* Finalizes the power domain structures for @dev_priv depending upon the
* supported platform. This function also disables runtime pm and ensures that
* the device stays powered up so that the driver can be reloaded.
*/
void intel_power_domains_fini(struct drm_i915_private *dev_priv)
{
intel_runtime_pm_disable(dev_priv);
 
/* The i915.ko module is still not prepared to be loaded when
* the power well is not enabled, so just enable it in case
* we're going to unload/reload. */
intel_display_set_init_power(dev_priv, true);
 
hsw_pwr = NULL;
}
 
static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_well *power_well;
int i;
 
mutex_lock(&power_domains->lock);
for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
power_well->ops->sync_hw(dev_priv, power_well);
power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
power_well);
}
mutex_unlock(&power_domains->lock);
}
 
static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
{
struct i915_power_well *cmn =
lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
struct i915_power_well *disp2d =
lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
 
/* If the display might be already active skip this */
if (cmn->ops->is_enabled(dev_priv, cmn) &&
disp2d->ops->is_enabled(dev_priv, disp2d) &&
I915_READ(DPIO_CTL) & DPIO_CMNRST)
return;
 
DRM_DEBUG_KMS("toggling display PHY side reset\n");
 
/* cmnlane needs DPLL registers */
disp2d->ops->enable(dev_priv, disp2d);
 
/*
* From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
* Need to assert and de-assert PHY SB reset by gating the
* common lane power, then un-gating it.
* Simply ungating isn't enough to reset the PHY enough to get
* ports and lanes running.
*/
cmn->ops->disable(dev_priv, cmn);
}
 
/**
* intel_power_domains_init_hw - initialize hardware power domain state
* @dev_priv: i915 device instance
*
* This function initializes the hardware power domain state and enables all
* power domains using intel_display_set_init_power().
*/
void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
struct i915_power_domains *power_domains = &dev_priv->power_domains;
 
power_domains->initializing = true;
 
if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
mutex_lock(&power_domains->lock);
vlv_cmnlane_wa(dev_priv);
mutex_unlock(&power_domains->lock);
}
 
/* For now, we need the power well to be always enabled. */
intel_display_set_init_power(dev_priv, true);
intel_power_domains_resume(dev_priv);
power_domains->initializing = false;
}
 
/**
* intel_aux_display_runtime_get - grab an auxilliary power domain reference
* @dev_priv: i915 device instance
*
* This function grabs a power domain reference for the auxiliary power domain
* (for access to the GMBUS and DP AUX blocks) and ensures that it and all its
* parents are powered up. Therefore users should only grab a reference to the
* innermost power domain they need.
*
* Any power domain reference obtained by this function must have a symmetric
* call to intel_aux_display_runtime_put() to release the reference again.
*/
void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv)
{
intel_runtime_pm_get(dev_priv);
}
 
/**
* intel_aux_display_runtime_put - release an auxilliary power domain reference
* @dev_priv: i915 device instance
*
* This function drops the auxilliary power domain reference obtained by
* intel_aux_display_runtime_get() and might power down the corresponding
* hardware block right away if this is the last reference.
*/
void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv)
{
intel_runtime_pm_put(dev_priv);
}
 
/**
* intel_runtime_pm_get - grab a runtime pm reference
* @dev_priv: i915 device instance
*
* This function grabs a device-level runtime pm reference (mostly used for GEM
* code to ensure the GTT or GT is on) and ensures that it is powered up.
*
* Any runtime pm reference obtained by this function must have a symmetric
* call to intel_runtime_pm_put() to release the reference again.
*/
void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
struct device *device = &dev->pdev->dev;
 
if (!HAS_RUNTIME_PM(dev))
return;
 
pm_runtime_get_sync(device);
WARN(dev_priv->pm.suspended, "Device still suspended.\n");
}
 
/**
* intel_runtime_pm_get_noresume - grab a runtime pm reference
* @dev_priv: i915 device instance
*
* This function grabs a device-level runtime pm reference (mostly used for GEM
* code to ensure the GTT or GT is on).
*
* It will _not_ power up the device but instead only check that it's powered
* on. Therefore it is only valid to call this functions from contexts where
* the device is known to be powered up and where trying to power it up would
* result in hilarity and deadlocks. That pretty much means only the system
* suspend/resume code where this is used to grab runtime pm references for
* delayed setup down in work items.
*
* Any runtime pm reference obtained by this function must have a symmetric
* call to intel_runtime_pm_put() to release the reference again.
*/
void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
struct device *device = &dev->pdev->dev;
 
if (!HAS_RUNTIME_PM(dev))
return;
 
WARN(dev_priv->pm.suspended, "Getting nosync-ref while suspended.\n");
// pm_runtime_get_noresume(device);
}
 
/**
* intel_runtime_pm_put - release a runtime pm reference
* @dev_priv: i915 device instance
*
* This function drops the device-level runtime pm reference obtained by
* intel_runtime_pm_get() and might power down the corresponding
* hardware block right away if this is the last reference.
*/
void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
struct device *device = &dev->pdev->dev;
 
if (!HAS_RUNTIME_PM(dev))
return;
 
// pm_runtime_mark_last_busy(device);
// pm_runtime_put_autosuspend(device);
}
 
/**
* intel_runtime_pm_enable - enable runtime pm
* @dev_priv: i915 device instance
*
* This function enables runtime pm at the end of the driver load sequence.
*
* Note that this function does currently not enable runtime pm for the
* subordinate display power domains. That is only done on the first modeset
* using intel_display_set_init_power().
*/
void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
struct device *device = &dev->pdev->dev;
 
if (!HAS_RUNTIME_PM(dev))
return;
 
pm_runtime_set_active(device);
 
/*
* RPM depends on RC6 to save restore the GT HW context, so make RC6 a
* requirement.
*/
if (!intel_enable_rc6(dev)) {
DRM_INFO("RC6 disabled, disabling runtime PM support\n");
return;
}
 
// pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
// pm_runtime_mark_last_busy(device);
// pm_runtime_use_autosuspend(device);
 
// pm_runtime_put_autosuspend(device);
}
 
/* Display audio driver power well request */
int i915_request_power_well(void)
{
struct drm_i915_private *dev_priv;
 
if (!hsw_pwr)
return -ENODEV;
 
dev_priv = container_of(hsw_pwr, struct drm_i915_private,
power_domains);
intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
return 0;
}
EXPORT_SYMBOL_GPL(i915_request_power_well);
 
/* Display audio driver power well release */
int i915_release_power_well(void)
{
struct drm_i915_private *dev_priv;
 
if (!hsw_pwr)
return -ENODEV;
 
dev_priv = container_of(hsw_pwr, struct drm_i915_private,
power_domains);
intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
return 0;
}
EXPORT_SYMBOL_GPL(i915_release_power_well);
 
/*
* Private interface for the audio driver to get CDCLK in kHz.
*
* Caller must request power well using i915_request_power_well() prior to
* making the call.
*/
int i915_get_cdclk_freq(void)
{
struct drm_i915_private *dev_priv;
 
if (!hsw_pwr)
return -ENODEV;
 
dev_priv = container_of(hsw_pwr, struct drm_i915_private,
power_domains);
 
return intel_ddi_get_cdclk_freq(dev_priv);
}
EXPORT_SYMBOL_GPL(i915_get_cdclk_freq);
/drivers/video/drm/i915/intel_sdvo.c
1991,57 → 1991,10
return !list_empty(&connector->probed_modes);
}
 
static void
intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
{
struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
struct drm_device *dev = connector->dev;
 
if (intel_sdvo_connector->left)
drm_property_destroy(dev, intel_sdvo_connector->left);
if (intel_sdvo_connector->right)
drm_property_destroy(dev, intel_sdvo_connector->right);
if (intel_sdvo_connector->top)
drm_property_destroy(dev, intel_sdvo_connector->top);
if (intel_sdvo_connector->bottom)
drm_property_destroy(dev, intel_sdvo_connector->bottom);
if (intel_sdvo_connector->hpos)
drm_property_destroy(dev, intel_sdvo_connector->hpos);
if (intel_sdvo_connector->vpos)
drm_property_destroy(dev, intel_sdvo_connector->vpos);
if (intel_sdvo_connector->saturation)
drm_property_destroy(dev, intel_sdvo_connector->saturation);
if (intel_sdvo_connector->contrast)
drm_property_destroy(dev, intel_sdvo_connector->contrast);
if (intel_sdvo_connector->hue)
drm_property_destroy(dev, intel_sdvo_connector->hue);
if (intel_sdvo_connector->sharpness)
drm_property_destroy(dev, intel_sdvo_connector->sharpness);
if (intel_sdvo_connector->flicker_filter)
drm_property_destroy(dev, intel_sdvo_connector->flicker_filter);
if (intel_sdvo_connector->flicker_filter_2d)
drm_property_destroy(dev, intel_sdvo_connector->flicker_filter_2d);
if (intel_sdvo_connector->flicker_filter_adaptive)
drm_property_destroy(dev, intel_sdvo_connector->flicker_filter_adaptive);
if (intel_sdvo_connector->tv_luma_filter)
drm_property_destroy(dev, intel_sdvo_connector->tv_luma_filter);
if (intel_sdvo_connector->tv_chroma_filter)
drm_property_destroy(dev, intel_sdvo_connector->tv_chroma_filter);
if (intel_sdvo_connector->dot_crawl)
drm_property_destroy(dev, intel_sdvo_connector->dot_crawl);
if (intel_sdvo_connector->brightness)
drm_property_destroy(dev, intel_sdvo_connector->brightness);
}
 
static void intel_sdvo_destroy(struct drm_connector *connector)
{
struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
 
if (intel_sdvo_connector->tv_format)
drm_property_destroy(connector->dev,
intel_sdvo_connector->tv_format);
 
intel_sdvo_destroy_enhance_property(connector);
drm_connector_cleanup(connector);
kfree(intel_sdvo_connector);
}
/drivers/video/drm/i915/intel_sprite.c
37,6 → 37,20
#include <drm/i915_drm.h>
#include "i915_drv.h"
 
static bool
format_is_yuv(uint32_t format)
{
switch (format) {
case DRM_FORMAT_YUYV:
case DRM_FORMAT_UYVY:
case DRM_FORMAT_VYUY:
case DRM_FORMAT_YVYU:
return true;
default:
return false;
}
}
 
static int usecs_to_scanlines(const struct drm_display_mode *mode, int usecs)
{
/* paranoia */
46,7 → 60,23
return DIV_ROUND_UP(usecs * mode->crtc_clock, 1000 * mode->crtc_htotal);
}
 
static bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count)
/**
* intel_pipe_update_start() - start update of a set of display registers
* @crtc: the crtc of which the registers are going to be updated
* @start_vbl_count: vblank counter return pointer used for error checking
*
* Mark the start of an update to pipe registers that should be updated
* atomically regarding vblank. If the next vblank will happens within
* the next 100 us, this function waits until the vblank passes.
*
* After a successful call to this function, interrupts will be disabled
* until a subsequent call to intel_pipe_update_end(). That is done to
* avoid random delays. The value written to @start_vbl_count should be
* supplied to intel_pipe_update_end() for error checking.
*
* Return: true if the call was successful
*/
bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count)
{
struct drm_device *dev = crtc->base.dev;
const struct drm_display_mode *mode = &crtc->config.adjusted_mode;
53,10 → 83,9
enum pipe pipe = crtc->pipe;
long timeout = msecs_to_jiffies_timeout(1);
int scanline, min, max, vblank_start;
wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base);
DEFINE_WAIT(wait);
 
WARN_ON(!drm_modeset_is_locked(&crtc->base.mutex));
 
vblank_start = mode->crtc_vblank_start;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
vblank_start = DIV_ROUND_UP(vblank_start, 2);
81,7 → 110,7
* other CPUs can see the task state update by the time we
* read the scanline.
*/
prepare_to_wait(&crtc->vbl_wait, &wait, TASK_UNINTERRUPTIBLE);
prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
 
scanline = intel_get_crtc_scanline(crtc);
if (scanline < min || scanline > max)
100,7 → 129,7
// local_irq_disable();
}
 
finish_wait(&crtc->vbl_wait, &wait);
finish_wait(wq, &wait);
 
// drm_vblank_put(dev, pipe);
 
111,7 → 140,16
return true;
}
 
static void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count)
/**
* intel_pipe_update_end() - end update of a set of display registers
* @crtc: the crtc of which the registers were updated
* @start_vbl_count: start vblank counter (used for error checking)
*
* Mark the end of an update started with intel_pipe_update_start(). This
* re-enables interrupts and verifies the update was actually completed
* before a vblank using the value of @start_vbl_count.
*/
void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count)
{
struct drm_device *dev = crtc->base.dev;
enum pipe pipe = crtc->pipe;
138,6 → 176,226
}
 
static void
skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t x, uint32_t y,
uint32_t src_w, uint32_t src_h)
{
struct drm_device *dev = drm_plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(drm_plane);
const int pipe = intel_plane->pipe;
const int plane = intel_plane->plane + 1;
u32 plane_ctl, stride;
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
 
plane_ctl = I915_READ(PLANE_CTL(pipe, plane));
 
/* Mask out pixel format bits in case we change it */
plane_ctl &= ~PLANE_CTL_FORMAT_MASK;
plane_ctl &= ~PLANE_CTL_ORDER_RGBX;
plane_ctl &= ~PLANE_CTL_YUV422_ORDER_MASK;
plane_ctl &= ~PLANE_CTL_TILED_MASK;
plane_ctl &= ~PLANE_CTL_ALPHA_MASK;
plane_ctl &= ~PLANE_CTL_ROTATE_MASK;
 
/* Trickle feed has to be enabled */
plane_ctl &= ~PLANE_CTL_TRICKLE_FEED_DISABLE;
 
switch (fb->pixel_format) {
case DRM_FORMAT_RGB565:
plane_ctl |= PLANE_CTL_FORMAT_RGB_565;
break;
case DRM_FORMAT_XBGR8888:
plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
break;
case DRM_FORMAT_XRGB8888:
plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
break;
/*
* XXX: For ARBG/ABGR formats we default to expecting scanout buffers
* to be already pre-multiplied. We need to add a knob (or a different
* DRM_FORMAT) for user-space to configure that.
*/
case DRM_FORMAT_ABGR8888:
plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888 |
PLANE_CTL_ORDER_RGBX |
PLANE_CTL_ALPHA_SW_PREMULTIPLY;
break;
case DRM_FORMAT_ARGB8888:
plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888 |
PLANE_CTL_ALPHA_SW_PREMULTIPLY;
break;
case DRM_FORMAT_YUYV:
plane_ctl |= PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
break;
case DRM_FORMAT_YVYU:
plane_ctl |= PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
break;
case DRM_FORMAT_UYVY:
plane_ctl |= PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
break;
case DRM_FORMAT_VYUY:
plane_ctl |= PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
break;
default:
BUG();
}
 
switch (obj->tiling_mode) {
case I915_TILING_NONE:
stride = fb->pitches[0] >> 6;
break;
case I915_TILING_X:
plane_ctl |= PLANE_CTL_TILED_X;
stride = fb->pitches[0] >> 9;
break;
default:
BUG();
}
if (intel_plane->rotation == BIT(DRM_ROTATE_180))
plane_ctl |= PLANE_CTL_ROTATE_180;
 
plane_ctl |= PLANE_CTL_ENABLE;
plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE;
 
intel_update_sprite_watermarks(drm_plane, crtc, src_w, src_h,
pixel_size, true,
src_w != crtc_w || src_h != crtc_h);
 
/* Sizes are 0 based */
src_w--;
src_h--;
crtc_w--;
crtc_h--;
 
I915_WRITE(PLANE_OFFSET(pipe, plane), (y << 16) | x);
I915_WRITE(PLANE_STRIDE(pipe, plane), stride);
I915_WRITE(PLANE_POS(pipe, plane), (crtc_y << 16) | crtc_x);
I915_WRITE(PLANE_SIZE(pipe, plane), (crtc_h << 16) | crtc_w);
I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl);
I915_WRITE(PLANE_SURF(pipe, plane), i915_gem_obj_ggtt_offset(obj));
POSTING_READ(PLANE_SURF(pipe, plane));
}
 
static void
skl_disable_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc)
{
struct drm_device *dev = drm_plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(drm_plane);
const int pipe = intel_plane->pipe;
const int plane = intel_plane->plane + 1;
 
I915_WRITE(PLANE_CTL(pipe, plane),
I915_READ(PLANE_CTL(pipe, plane)) & ~PLANE_CTL_ENABLE);
 
/* Activate double buffered register update */
I915_WRITE(PLANE_CTL(pipe, plane), 0);
POSTING_READ(PLANE_CTL(pipe, plane));
 
intel_update_sprite_watermarks(drm_plane, crtc, 0, 0, 0, false, false);
}
 
static int
skl_update_colorkey(struct drm_plane *drm_plane,
struct drm_intel_sprite_colorkey *key)
{
struct drm_device *dev = drm_plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(drm_plane);
const int pipe = intel_plane->pipe;
const int plane = intel_plane->plane;
u32 plane_ctl;
 
I915_WRITE(PLANE_KEYVAL(pipe, plane), key->min_value);
I915_WRITE(PLANE_KEYMAX(pipe, plane), key->max_value);
I915_WRITE(PLANE_KEYMSK(pipe, plane), key->channel_mask);
 
plane_ctl = I915_READ(PLANE_CTL(pipe, plane));
plane_ctl &= ~PLANE_CTL_KEY_ENABLE_MASK;
if (key->flags & I915_SET_COLORKEY_DESTINATION)
plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
else if (key->flags & I915_SET_COLORKEY_SOURCE)
plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl);
 
POSTING_READ(PLANE_CTL(pipe, plane));
 
return 0;
}
 
static void
skl_get_colorkey(struct drm_plane *drm_plane,
struct drm_intel_sprite_colorkey *key)
{
struct drm_device *dev = drm_plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(drm_plane);
const int pipe = intel_plane->pipe;
const int plane = intel_plane->plane;
u32 plane_ctl;
 
key->min_value = I915_READ(PLANE_KEYVAL(pipe, plane));
key->max_value = I915_READ(PLANE_KEYMAX(pipe, plane));
key->channel_mask = I915_READ(PLANE_KEYMSK(pipe, plane));
 
plane_ctl = I915_READ(PLANE_CTL(pipe, plane));
 
switch (plane_ctl & PLANE_CTL_KEY_ENABLE_MASK) {
case PLANE_CTL_KEY_ENABLE_DESTINATION:
key->flags = I915_SET_COLORKEY_DESTINATION;
break;
case PLANE_CTL_KEY_ENABLE_SOURCE:
key->flags = I915_SET_COLORKEY_SOURCE;
break;
default:
key->flags = I915_SET_COLORKEY_NONE;
}
}
 
static void
chv_update_csc(struct intel_plane *intel_plane, uint32_t format)
{
struct drm_i915_private *dev_priv = intel_plane->base.dev->dev_private;
int plane = intel_plane->plane;
 
/* Seems RGB data bypasses the CSC always */
if (!format_is_yuv(format))
return;
 
/*
* BT.601 limited range YCbCr -> full range RGB
*
* |r| | 6537 4769 0| |cr |
* |g| = |-3330 4769 -1605| x |y-64|
* |b| | 0 4769 8263| |cb |
*
* Cb and Cr apparently come in as signed already, so no
* need for any offset. For Y we need to remove the offset.
*/
I915_WRITE(SPCSCYGOFF(plane), SPCSC_OOFF(0) | SPCSC_IOFF(-64));
I915_WRITE(SPCSCCBOFF(plane), SPCSC_OOFF(0) | SPCSC_IOFF(0));
I915_WRITE(SPCSCCROFF(plane), SPCSC_OOFF(0) | SPCSC_IOFF(0));
 
I915_WRITE(SPCSCC01(plane), SPCSC_C1(4769) | SPCSC_C0(6537));
I915_WRITE(SPCSCC23(plane), SPCSC_C1(-3330) | SPCSC_C0(0));
I915_WRITE(SPCSCC45(plane), SPCSC_C1(-1605) | SPCSC_C0(4769));
I915_WRITE(SPCSCC67(plane), SPCSC_C1(4769) | SPCSC_C0(0));
I915_WRITE(SPCSCC8(plane), SPCSC_C0(8263));
 
I915_WRITE(SPCSCYGICLAMP(plane), SPCSC_IMAX(940) | SPCSC_IMIN(64));
I915_WRITE(SPCSCCBICLAMP(plane), SPCSC_IMAX(448) | SPCSC_IMIN(-448));
I915_WRITE(SPCSCCRICLAMP(plane), SPCSC_IMAX(448) | SPCSC_IMIN(-448));
 
I915_WRITE(SPCSCYGOCLAMP(plane), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
I915_WRITE(SPCSCCBOCLAMP(plane), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
I915_WRITE(SPCSCCROCLAMP(plane), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
}
 
static void
vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
163,6 → 421,7
sprctl &= ~SP_PIXFORMAT_MASK;
sprctl &= ~SP_YUV_BYTE_ORDER_MASK;
sprctl &= ~SP_TILED;
sprctl &= ~SP_ROTATE_180;
 
switch (fb->pixel_format) {
case DRM_FORMAT_YUYV:
235,10 → 494,21
fb->pitches[0]);
linear_offset -= sprsurf_offset;
 
if (intel_plane->rotation == BIT(DRM_ROTATE_180)) {
sprctl |= SP_ROTATE_180;
 
x += src_w;
y += src_h;
linear_offset += src_h * fb->pitches[0] + src_w * pixel_size;
}
 
atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
 
intel_update_primary_plane(intel_crtc);
 
if (IS_CHERRYVIEW(dev) && pipe == PIPE_B)
chv_update_csc(intel_plane, fb->pixel_format);
 
I915_WRITE(SPSTRIDE(pipe, plane), fb->pitches[0]);
I915_WRITE(SPPOS(pipe, plane), (crtc_y << 16) | crtc_x);
 
247,6 → 517,8
else
I915_WRITE(SPLINOFF(pipe, plane), linear_offset);
 
I915_WRITE(SPCONSTALPHA(pipe, plane), 0);
 
I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w);
I915_WRITE(SPCNTR(pipe, plane), sprctl);
I915_WRITE(SPSURF(pipe, plane), i915_gem_obj_ggtt_offset(obj) +
364,6 → 636,7
sprctl &= ~SPRITE_RGB_ORDER_RGBX;
sprctl &= ~SPRITE_YUV_BYTE_ORDER_MASK;
sprctl &= ~SPRITE_TILED;
sprctl &= ~SPRITE_ROTATE_180;
 
switch (fb->pixel_format) {
case DRM_FORMAT_XBGR8888:
426,6 → 699,18
pixel_size, fb->pitches[0]);
linear_offset -= sprsurf_offset;
 
if (intel_plane->rotation == BIT(DRM_ROTATE_180)) {
sprctl |= SPRITE_ROTATE_180;
 
/* HSW and BDW does this automagically in hardware */
if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
x += src_w;
y += src_h;
linear_offset += src_h * fb->pitches[0] +
src_w * pixel_size;
}
}
 
atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
 
intel_update_primary_plane(intel_crtc);
571,6 → 856,7
dvscntr &= ~DVS_RGB_ORDER_XBGR;
dvscntr &= ~DVS_YUV_BYTE_ORDER_MASK;
dvscntr &= ~DVS_TILED;
dvscntr &= ~DVS_ROTATE_180;
 
switch (fb->pixel_format) {
case DRM_FORMAT_XBGR8888:
628,6 → 914,14
pixel_size, fb->pitches[0]);
linear_offset -= dvssurf_offset;
 
if (intel_plane->rotation == BIT(DRM_ROTATE_180)) {
dvscntr |= DVS_ROTATE_180;
 
x += src_w;
y += src_h;
linear_offset += src_h * fb->pitches[0] + src_w * pixel_size;
}
 
atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
 
intel_update_primary_plane(intel_crtc);
694,6 → 988,14
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
/*
* BDW signals flip done immediately if the plane
* is disabled, even if the plane enable is already
* armed to occur at the next vblank :(
*/
if (IS_BROADWELL(dev))
intel_wait_for_vblank(dev, intel_crtc->pipe);
 
/*
* FIXME IPS should be fine as long as one plane is
* enabled, but in practice it seems to have problems
* when going from primary only to sprite only and vice
781,20 → 1083,6
key->flags = I915_SET_COLORKEY_NONE;
}
 
static bool
format_is_yuv(uint32_t format)
{
switch (format) {
case DRM_FORMAT_YUYV:
case DRM_FORMAT_UYVY:
case DRM_FORMAT_VYUY:
case DRM_FORMAT_YVYU:
return true;
default:
return false;
}
}
 
static bool colorkey_enabled(struct intel_plane *intel_plane)
{
struct drm_intel_sprite_colorkey key;
805,57 → 1093,23
}
 
static int
intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb, int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h)
intel_check_sprite_plane(struct drm_plane *plane,
struct intel_plane_state *state)
{
struct drm_device *dev = plane->dev;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_crtc *intel_crtc = to_intel_crtc(state->crtc);
struct intel_plane *intel_plane = to_intel_plane(plane);
enum pipe pipe = intel_crtc->pipe;
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
struct drm_i915_gem_object *obj = intel_fb->obj;
struct drm_i915_gem_object *old_obj = intel_plane->obj;
int ret;
bool primary_enabled;
bool visible;
struct drm_framebuffer *fb = state->fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
int crtc_x, crtc_y;
unsigned int crtc_w, crtc_h;
uint32_t src_x, src_y, src_w, src_h;
struct drm_rect *src = &state->src;
struct drm_rect *dst = &state->dst;
struct drm_rect *orig_src = &state->orig_src;
const struct drm_rect *clip = &state->clip;
int hscale, vscale;
int max_scale, min_scale;
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
struct drm_rect src = {
/* sample coordinates in 16.16 fixed point */
.x1 = src_x,
.x2 = src_x + src_w,
.y1 = src_y,
.y2 = src_y + src_h,
};
struct drm_rect dst = {
/* integer pixels */
.x1 = crtc_x,
.x2 = crtc_x + crtc_w,
.y1 = crtc_y,
.y2 = crtc_y + crtc_h,
};
const struct drm_rect clip = {
.x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0,
.y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0,
};
const struct {
int crtc_x, crtc_y;
unsigned int crtc_w, crtc_h;
uint32_t src_x, src_y, src_w, src_h;
} orig = {
.crtc_x = crtc_x,
.crtc_y = crtc_y,
.crtc_w = crtc_w,
.crtc_h = crtc_h,
.src_x = src_x,
.src_y = src_y,
.src_w = src_w,
.src_h = src_h,
};
 
/* Don't modify another pipe's plane */
if (intel_plane->pipe != intel_crtc->pipe) {
887,49 → 1141,55
max_scale = intel_plane->max_downscale << 16;
min_scale = intel_plane->can_scale ? 1 : (1 << 16);
 
hscale = drm_rect_calc_hscale_relaxed(&src, &dst, min_scale, max_scale);
drm_rect_rotate(src, fb->width << 16, fb->height << 16,
intel_plane->rotation);
 
hscale = drm_rect_calc_hscale_relaxed(src, dst, min_scale, max_scale);
BUG_ON(hscale < 0);
 
vscale = drm_rect_calc_vscale_relaxed(&src, &dst, min_scale, max_scale);
vscale = drm_rect_calc_vscale_relaxed(src, dst, min_scale, max_scale);
BUG_ON(vscale < 0);
 
visible = drm_rect_clip_scaled(&src, &dst, &clip, hscale, vscale);
state->visible = drm_rect_clip_scaled(src, dst, clip, hscale, vscale);
 
crtc_x = dst.x1;
crtc_y = dst.y1;
crtc_w = drm_rect_width(&dst);
crtc_h = drm_rect_height(&dst);
crtc_x = dst->x1;
crtc_y = dst->y1;
crtc_w = drm_rect_width(dst);
crtc_h = drm_rect_height(dst);
 
if (visible) {
if (state->visible) {
/* check again in case clipping clamped the results */
hscale = drm_rect_calc_hscale(&src, &dst, min_scale, max_scale);
hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale);
if (hscale < 0) {
DRM_DEBUG_KMS("Horizontal scaling factor out of limits\n");
drm_rect_debug_print(&src, true);
drm_rect_debug_print(&dst, false);
drm_rect_debug_print(src, true);
drm_rect_debug_print(dst, false);
 
return hscale;
}
 
vscale = drm_rect_calc_vscale(&src, &dst, min_scale, max_scale);
vscale = drm_rect_calc_vscale(src, dst, min_scale, max_scale);
if (vscale < 0) {
DRM_DEBUG_KMS("Vertical scaling factor out of limits\n");
drm_rect_debug_print(&src, true);
drm_rect_debug_print(&dst, false);
drm_rect_debug_print(src, true);
drm_rect_debug_print(dst, false);
 
return vscale;
}
 
/* Make the source viewport size an exact multiple of the scaling factors. */
drm_rect_adjust_size(&src,
drm_rect_width(&dst) * hscale - drm_rect_width(&src),
drm_rect_height(&dst) * vscale - drm_rect_height(&src));
drm_rect_adjust_size(src,
drm_rect_width(dst) * hscale - drm_rect_width(src),
drm_rect_height(dst) * vscale - drm_rect_height(src));
 
drm_rect_rotate_inv(src, fb->width << 16, fb->height << 16,
intel_plane->rotation);
 
/* sanity check to make sure the src viewport wasn't enlarged */
WARN_ON(src.x1 < (int) src_x ||
src.y1 < (int) src_y ||
src.x2 > (int) (src_x + src_w) ||
src.y2 > (int) (src_y + src_h));
WARN_ON(src->x1 < (int) orig_src->x1 ||
src->y1 < (int) orig_src->y1 ||
src->x2 > (int) orig_src->x2 ||
src->y2 > (int) orig_src->y2);
 
/*
* Hardware doesn't handle subpixel coordinates.
937,10 → 1197,10
* increase the source viewport size, because that could
* push the downscaling factor out of bounds.
*/
src_x = src.x1 >> 16;
src_w = drm_rect_width(&src) >> 16;
src_y = src.y1 >> 16;
src_h = drm_rect_height(&src) >> 16;
src_x = src->x1 >> 16;
src_w = drm_rect_width(src) >> 16;
src_y = src->y1 >> 16;
src_h = drm_rect_height(src) >> 16;
 
if (format_is_yuv(fb->pixel_format)) {
src_x &= ~1;
954,12 → 1214,12
crtc_w &= ~1;
 
if (crtc_w == 0)
visible = false;
state->visible = false;
}
}
 
/* Check size restrictions when scaling */
if (visible && (src_w != crtc_w || src_h != crtc_h)) {
if (state->visible && (src_w != crtc_w || src_h != crtc_h)) {
unsigned int width_bytes;
 
WARN_ON(!intel_plane->can_scale);
967,12 → 1227,13
/* FIXME interlacing min height is 6 */
 
if (crtc_w < 3 || crtc_h < 3)
visible = false;
state->visible = false;
 
if (src_w < 3 || src_h < 3)
visible = false;
state->visible = false;
 
width_bytes = ((src_x * pixel_size) & 63) + src_w * pixel_size;
width_bytes = ((src_x * pixel_size) & 63) +
src_w * pixel_size;
 
if (src_w > 2048 || src_h > 2048 ||
width_bytes > 4096 || fb->pitches[0] > 4096) {
981,42 → 1242,90
}
}
 
dst.x1 = crtc_x;
dst.x2 = crtc_x + crtc_w;
dst.y1 = crtc_y;
dst.y2 = crtc_y + crtc_h;
if (state->visible) {
src->x1 = src_x;
src->x2 = src_x + src_w;
src->y1 = src_y;
src->y2 = src_y + src_h;
}
 
/*
* If the sprite is completely covering the primary plane,
* we can disable the primary and save power.
*/
primary_enabled = !drm_rect_equals(&dst, &clip) || colorkey_enabled(intel_plane);
WARN_ON(!primary_enabled && !visible && intel_crtc->active);
dst->x1 = crtc_x;
dst->x2 = crtc_x + crtc_w;
dst->y1 = crtc_y;
dst->y2 = crtc_y + crtc_h;
 
return 0;
}
 
static int
intel_prepare_sprite_plane(struct drm_plane *plane,
struct intel_plane_state *state)
{
struct drm_device *dev = plane->dev;
struct drm_crtc *crtc = state->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_plane *intel_plane = to_intel_plane(plane);
enum pipe pipe = intel_crtc->pipe;
struct drm_framebuffer *fb = state->fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct drm_i915_gem_object *old_obj = intel_plane->obj;
int ret;
 
if (old_obj != obj) {
mutex_lock(&dev->struct_mutex);
 
/* Note that this will apply the VT-d workaround for scanouts,
* which is more restrictive than required for sprites. (The
* primary plane requires 256KiB alignment with 64 PTE padding,
* the sprite planes only require 128KiB alignment and 32 PTE padding.
* the sprite planes only require 128KiB alignment and 32 PTE
* padding.
*/
ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
 
ret = intel_pin_and_fence_fb_obj(plane, fb, NULL);
if (ret == 0)
i915_gem_track_fb(old_obj, obj,
INTEL_FRONTBUFFER_SPRITE(pipe));
mutex_unlock(&dev->struct_mutex);
 
if (ret)
return ret;
}
 
intel_plane->crtc_x = orig.crtc_x;
intel_plane->crtc_y = orig.crtc_y;
intel_plane->crtc_w = orig.crtc_w;
intel_plane->crtc_h = orig.crtc_h;
intel_plane->src_x = orig.src_x;
intel_plane->src_y = orig.src_y;
intel_plane->src_w = orig.src_w;
intel_plane->src_h = orig.src_h;
return 0;
}
 
static void
intel_commit_sprite_plane(struct drm_plane *plane,
struct intel_plane_state *state)
{
struct drm_device *dev = plane->dev;
struct drm_crtc *crtc = state->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_plane *intel_plane = to_intel_plane(plane);
enum pipe pipe = intel_crtc->pipe;
struct drm_framebuffer *fb = state->fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct drm_i915_gem_object *old_obj = intel_plane->obj;
int crtc_x, crtc_y;
unsigned int crtc_w, crtc_h;
uint32_t src_x, src_y, src_w, src_h;
struct drm_rect *dst = &state->dst;
const struct drm_rect *clip = &state->clip;
bool primary_enabled;
 
/*
* If the sprite is completely covering the primary plane,
* we can disable the primary and save power.
*/
primary_enabled = !drm_rect_equals(dst, clip) || colorkey_enabled(intel_plane);
WARN_ON(!primary_enabled && !state->visible && intel_crtc->active);
 
intel_plane->crtc_x = state->orig_dst.x1;
intel_plane->crtc_y = state->orig_dst.y1;
intel_plane->crtc_w = drm_rect_width(&state->orig_dst);
intel_plane->crtc_h = drm_rect_height(&state->orig_dst);
intel_plane->src_x = state->orig_src.x1;
intel_plane->src_y = state->orig_src.y1;
intel_plane->src_w = drm_rect_width(&state->orig_src);
intel_plane->src_h = drm_rect_height(&state->orig_src);
intel_plane->obj = obj;
 
if (intel_crtc->active) {
1025,23 → 1334,37
intel_crtc->primary_enabled = primary_enabled;
 
// if (primary_was_enabled != primary_enabled)
// intel_crtc_wait_for_pending_flips(crtc);
 
if (primary_was_enabled && !primary_enabled)
intel_pre_disable_primary(crtc);
 
if (visible)
if (state->visible) {
crtc_x = state->dst.x1;
crtc_y = state->dst.y1;
crtc_w = drm_rect_width(&state->dst);
crtc_h = drm_rect_height(&state->dst);
src_x = state->src.x1;
src_y = state->src.y1;
src_w = drm_rect_width(&state->src);
src_h = drm_rect_height(&state->src);
intel_plane->update_plane(plane, crtc, fb, obj,
crtc_x, crtc_y, crtc_w, crtc_h,
src_x, src_y, src_w, src_h);
else
} else {
intel_plane->disable_plane(plane, crtc);
}
 
 
intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_SPRITE(pipe));
 
if (!primary_was_enabled && primary_enabled)
intel_post_enable_primary(crtc);
}
 
/* Unpin old obj after new one is active to avoid ugliness */
if (old_obj) {
if (old_obj && old_obj != obj) {
 
/*
* It's fairly common to simply update the position of
* an existing object. In that case, we don't need to
1048,7 → 1371,7
* wait for vblank to avoid ugliness, we only need to
* do the pin & ref bookkeeping.
*/
if (old_obj != obj && intel_crtc->active)
if (intel_crtc->active)
intel_wait_for_vblank(dev, intel_crtc->pipe);
 
mutex_lock(&dev->struct_mutex);
1055,7 → 1378,50
intel_unpin_fb_obj(old_obj);
mutex_unlock(&dev->struct_mutex);
}
}
 
static int
intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb, int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h)
{
struct intel_plane_state state;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int ret;
 
state.crtc = crtc;
state.fb = fb;
 
/* sample coordinates in 16.16 fixed point */
state.src.x1 = src_x;
state.src.x2 = src_x + src_w;
state.src.y1 = src_y;
state.src.y2 = src_y + src_h;
 
/* integer pixels */
state.dst.x1 = crtc_x;
state.dst.x2 = crtc_x + crtc_w;
state.dst.y1 = crtc_y;
state.dst.y2 = crtc_y + crtc_h;
 
state.clip.x1 = 0;
state.clip.y1 = 0;
state.clip.x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0;
state.clip.y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0;
state.orig_src = state.src;
state.orig_dst = state.dst;
 
ret = intel_check_sprite_plane(plane, &state);
if (ret)
return ret;
 
ret = intel_prepare_sprite_plane(plane, &state);
if (ret)
return ret;
 
intel_commit_sprite_plane(plane, &state);
return 0;
}
 
1169,14 → 1535,41
return ret;
}
 
void intel_plane_restore(struct drm_plane *plane)
int intel_plane_set_property(struct drm_plane *plane,
struct drm_property *prop,
uint64_t val)
{
struct drm_device *dev = plane->dev;
struct intel_plane *intel_plane = to_intel_plane(plane);
uint64_t old_val;
int ret = -ENOENT;
 
if (prop == dev->mode_config.rotation_property) {
/* exactly one rotation angle please */
if (hweight32(val & 0xf) != 1)
return -EINVAL;
 
if (intel_plane->rotation == val)
return 0;
 
old_val = intel_plane->rotation;
intel_plane->rotation = val;
ret = intel_plane_restore(plane);
if (ret)
intel_plane->rotation = old_val;
}
 
return ret;
}
 
int intel_plane_restore(struct drm_plane *plane)
{
struct intel_plane *intel_plane = to_intel_plane(plane);
 
if (!plane->crtc || !plane->fb)
return;
return 0;
 
intel_update_plane(plane, plane->crtc, plane->fb,
return plane->funcs->update_plane(plane, plane->crtc, plane->fb,
intel_plane->crtc_x, intel_plane->crtc_y,
intel_plane->crtc_w, intel_plane->crtc_h,
intel_plane->src_x, intel_plane->src_y,
1195,6 → 1588,7
.update_plane = intel_update_plane,
.disable_plane = intel_disable_plane,
.destroy = intel_destroy_plane,
.set_property = intel_plane_set_property,
};
 
static uint32_t ilk_plane_formats[] = {
1228,6 → 1622,18
DRM_FORMAT_VYUY,
};
 
static uint32_t skl_plane_formats[] = {
DRM_FORMAT_RGB565,
DRM_FORMAT_ABGR8888,
DRM_FORMAT_ARGB8888,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_YUYV,
DRM_FORMAT_YVYU,
DRM_FORMAT_UYVY,
DRM_FORMAT_VYUY,
};
 
int
intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
{
1291,7 → 1697,21
num_plane_formats = ARRAY_SIZE(snb_plane_formats);
}
break;
case 9:
/*
* FIXME: Skylake planes can be scaled (with some restrictions),
* but this is for another time.
*/
intel_plane->can_scale = false;
intel_plane->max_downscale = 1;
intel_plane->update_plane = skl_update_plane;
intel_plane->disable_plane = skl_disable_plane;
intel_plane->update_colorkey = skl_update_colorkey;
intel_plane->get_colorkey = skl_get_colorkey;
 
plane_formats = skl_plane_formats;
num_plane_formats = ARRAY_SIZE(skl_plane_formats);
break;
default:
kfree(intel_plane);
return -ENODEV;
1299,13 → 1719,28
 
intel_plane->pipe = pipe;
intel_plane->plane = plane;
intel_plane->rotation = BIT(DRM_ROTATE_0);
possible_crtcs = (1 << pipe);
ret = drm_plane_init(dev, &intel_plane->base, possible_crtcs,
ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs,
&intel_plane_funcs,
plane_formats, num_plane_formats,
false);
if (ret)
DRM_PLANE_TYPE_OVERLAY);
if (ret) {
kfree(intel_plane);
goto out;
}
 
if (!dev->mode_config.rotation_property)
dev->mode_config.rotation_property =
drm_mode_create_rotation_property(dev,
BIT(DRM_ROTATE_0) |
BIT(DRM_ROTATE_180));
 
if (dev->mode_config.rotation_property)
drm_object_attach_property(&intel_plane->base.base,
dev->mode_config.rotation_property,
intel_plane->rotation);
 
out:
return ret;
}
/drivers/video/drm/i915/intel_uncore.c
43,23 → 43,17
static void
assert_device_not_suspended(struct drm_i915_private *dev_priv)
{
WARN(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended,
WARN_ONCE(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended,
"Device suspended\n");
}
 
static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
{
u32 gt_thread_status_mask;
 
if (IS_HASWELL(dev_priv->dev))
gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW;
else
gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK;
 
/* w/a for a sporadic read returning 0 by waiting for the GT
* thread to wake up.
*/
if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500))
if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) &
GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500))
DRM_ERROR("GT thread status wait timed out\n");
}
 
101,7 → 95,7
{
u32 forcewake_ack;
 
if (IS_HASWELL(dev_priv->dev) || IS_GEN8(dev_priv->dev))
if (IS_HASWELL(dev_priv->dev) || IS_BROADWELL(dev_priv->dev))
forcewake_ack = FORCEWAKE_ACK_HSW;
else
forcewake_ack = FORCEWAKE_MT_ACK;
120,7 → 114,6
DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
 
/* WaRsForcewakeWaitTC0:ivb,hsw */
if (INTEL_INFO(dev_priv->dev)->gen < 8)
__gen6_gt_wait_for_thread_c0(dev_priv);
}
 
229,10 → 222,6
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out: waiting for media to ack.\n");
}
 
/* WaRsForcewakeWaitTC0:vlv */
if (!IS_CHERRYVIEW(dev_priv->dev))
__gen6_gt_wait_for_thread_c0(dev_priv);
}
 
static void __vlv_force_wake_put(struct drm_i915_private *dev_priv,
299,6 → 288,154
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
 
static void __gen9_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
{
__raw_i915_write32(dev_priv, FORCEWAKE_RENDER_GEN9,
_MASKED_BIT_DISABLE(0xffff));
 
__raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_GEN9,
_MASKED_BIT_DISABLE(0xffff));
 
__raw_i915_write32(dev_priv, FORCEWAKE_BLITTER_GEN9,
_MASKED_BIT_DISABLE(0xffff));
}
 
static void
__gen9_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
{
/* Check for Render Engine */
if (FORCEWAKE_RENDER & fw_engine) {
if (wait_for_atomic((__raw_i915_read32(dev_priv,
FORCEWAKE_ACK_RENDER_GEN9) &
FORCEWAKE_KERNEL) == 0,
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out: Render forcewake old ack to clear.\n");
 
__raw_i915_write32(dev_priv, FORCEWAKE_RENDER_GEN9,
_MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
 
if (wait_for_atomic((__raw_i915_read32(dev_priv,
FORCEWAKE_ACK_RENDER_GEN9) &
FORCEWAKE_KERNEL),
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out: waiting for Render to ack.\n");
}
 
/* Check for Media Engine */
if (FORCEWAKE_MEDIA & fw_engine) {
if (wait_for_atomic((__raw_i915_read32(dev_priv,
FORCEWAKE_ACK_MEDIA_GEN9) &
FORCEWAKE_KERNEL) == 0,
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out: Media forcewake old ack to clear.\n");
 
__raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_GEN9,
_MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
 
if (wait_for_atomic((__raw_i915_read32(dev_priv,
FORCEWAKE_ACK_MEDIA_GEN9) &
FORCEWAKE_KERNEL),
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out: waiting for Media to ack.\n");
}
 
/* Check for Blitter Engine */
if (FORCEWAKE_BLITTER & fw_engine) {
if (wait_for_atomic((__raw_i915_read32(dev_priv,
FORCEWAKE_ACK_BLITTER_GEN9) &
FORCEWAKE_KERNEL) == 0,
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out: Blitter forcewake old ack to clear.\n");
 
__raw_i915_write32(dev_priv, FORCEWAKE_BLITTER_GEN9,
_MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
 
if (wait_for_atomic((__raw_i915_read32(dev_priv,
FORCEWAKE_ACK_BLITTER_GEN9) &
FORCEWAKE_KERNEL),
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out: waiting for Blitter to ack.\n");
}
}
 
static void
__gen9_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
{
/* Check for Render Engine */
if (FORCEWAKE_RENDER & fw_engine)
__raw_i915_write32(dev_priv, FORCEWAKE_RENDER_GEN9,
_MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
 
/* Check for Media Engine */
if (FORCEWAKE_MEDIA & fw_engine)
__raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_GEN9,
_MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
 
/* Check for Blitter Engine */
if (FORCEWAKE_BLITTER & fw_engine)
__raw_i915_write32(dev_priv, FORCEWAKE_BLITTER_GEN9,
_MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
}
 
static void
gen9_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
{
unsigned long irqflags;
 
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 
if (FORCEWAKE_RENDER & fw_engine) {
if (dev_priv->uncore.fw_rendercount++ == 0)
dev_priv->uncore.funcs.force_wake_get(dev_priv,
FORCEWAKE_RENDER);
}
 
if (FORCEWAKE_MEDIA & fw_engine) {
if (dev_priv->uncore.fw_mediacount++ == 0)
dev_priv->uncore.funcs.force_wake_get(dev_priv,
FORCEWAKE_MEDIA);
}
 
if (FORCEWAKE_BLITTER & fw_engine) {
if (dev_priv->uncore.fw_blittercount++ == 0)
dev_priv->uncore.funcs.force_wake_get(dev_priv,
FORCEWAKE_BLITTER);
}
 
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
 
static void
gen9_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
{
unsigned long irqflags;
 
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 
if (FORCEWAKE_RENDER & fw_engine) {
WARN_ON(dev_priv->uncore.fw_rendercount == 0);
if (--dev_priv->uncore.fw_rendercount == 0)
dev_priv->uncore.funcs.force_wake_put(dev_priv,
FORCEWAKE_RENDER);
}
 
if (FORCEWAKE_MEDIA & fw_engine) {
WARN_ON(dev_priv->uncore.fw_mediacount == 0);
if (--dev_priv->uncore.fw_mediacount == 0)
dev_priv->uncore.funcs.force_wake_put(dev_priv,
FORCEWAKE_MEDIA);
}
 
if (FORCEWAKE_BLITTER & fw_engine) {
WARN_ON(dev_priv->uncore.fw_blittercount == 0);
if (--dev_priv->uncore.fw_blittercount == 0)
dev_priv->uncore.funcs.force_wake_put(dev_priv,
FORCEWAKE_BLITTER);
}
 
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
 
static void gen6_force_wake_timer(unsigned long arg)
{
struct drm_i915_private *dev_priv = (void *)arg;
334,9 → 471,12
else if (IS_GEN6(dev) || IS_GEN7(dev))
__gen6_gt_force_wake_reset(dev_priv);
 
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_GEN8(dev))
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev))
__gen7_gt_force_wake_mt_reset(dev_priv);
 
if (IS_GEN9(dev))
__gen9_gt_force_wake_mt_reset(dev_priv);
 
if (restore) { /* If reset with a user forcewake, try to restore */
unsigned fw = 0;
 
346,6 → 486,15
 
if (dev_priv->uncore.fw_mediacount)
fw |= FORCEWAKE_MEDIA;
} else if (IS_GEN9(dev)) {
if (dev_priv->uncore.fw_rendercount)
fw |= FORCEWAKE_RENDER;
 
if (dev_priv->uncore.fw_mediacount)
fw |= FORCEWAKE_MEDIA;
 
if (dev_priv->uncore.fw_blittercount)
fw |= FORCEWAKE_BLITTER;
} else {
if (dev_priv->uncore.forcewake_count)
fw = FORCEWAKE_ALL;
363,7 → 512,8
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
 
void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake)
static void __intel_uncore_early_sanitize(struct drm_device *dev,
bool restore_forcewake)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
389,6 → 539,12
intel_uncore_forcewake_reset(dev, restore_forcewake);
}
 
void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake)
{
__intel_uncore_early_sanitize(dev, restore_forcewake);
i915_check_and_clear_faults(dev);
}
 
void intel_uncore_sanitize(struct drm_device *dev)
{
/* BIOS often leaves RC6 enabled, but disable it for hw init */
410,6 → 566,10
 
intel_runtime_pm_get(dev_priv);
 
/* Redirect to Gen9 specific routine */
if (IS_GEN9(dev_priv->dev))
return gen9_force_wake_get(dev_priv, fw_engine);
 
/* Redirect to VLV specific routine */
if (IS_VALLEYVIEW(dev_priv->dev))
return vlv_force_wake_get(dev_priv, fw_engine);
431,6 → 591,12
if (!dev_priv->uncore.funcs.force_wake_put)
return;
 
/* Redirect to Gen9 specific routine */
if (IS_GEN9(dev_priv->dev)) {
gen9_force_wake_put(dev_priv, fw_engine);
goto out;
}
 
/* Redirect to VLV specific routine */
if (IS_VALLEYVIEW(dev_priv->dev)) {
vlv_force_wake_put(dev_priv, fw_engine);
504,6 → 670,38
REG_RANGE((reg), 0x14000, 0x14400) || \
REG_RANGE((reg), 0x22000, 0x24000))
 
#define FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) \
REG_RANGE((reg), 0xB00, 0x2000)
 
#define FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) \
(REG_RANGE((reg), 0x2000, 0x2700) || \
REG_RANGE((reg), 0x3000, 0x4000) || \
REG_RANGE((reg), 0x5200, 0x8000) || \
REG_RANGE((reg), 0x8140, 0x8160) || \
REG_RANGE((reg), 0x8300, 0x8500) || \
REG_RANGE((reg), 0x8C00, 0x8D00) || \
REG_RANGE((reg), 0xB000, 0xB480) || \
REG_RANGE((reg), 0xE000, 0xE900) || \
REG_RANGE((reg), 0x24400, 0x24800))
 
#define FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) \
(REG_RANGE((reg), 0x8130, 0x8140) || \
REG_RANGE((reg), 0x8800, 0x8A00) || \
REG_RANGE((reg), 0xD000, 0xD800) || \
REG_RANGE((reg), 0x12000, 0x14000) || \
REG_RANGE((reg), 0x1A000, 0x1EA00) || \
REG_RANGE((reg), 0x30000, 0x40000))
 
#define FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg) \
REG_RANGE((reg), 0x9400, 0x9800)
 
#define FORCEWAKE_GEN9_BLITTER_RANGE_OFFSET(reg) \
((reg) < 0x40000 &&\
!FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) && \
!FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) && \
!FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) && \
!FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg))
 
static void
ilk_dummy_write(struct drm_i915_private *dev_priv)
{
634,6 → 832,45
REG_READ_FOOTER; \
}
 
#define SKL_NEEDS_FORCE_WAKE(dev_priv, reg) \
((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg))
 
#define __gen9_read(x) \
static u##x \
gen9_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
REG_READ_HEADER(x); \
if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
val = __raw_i915_read##x(dev_priv, reg); \
} else { \
unsigned fwengine = 0; \
if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) { \
if (dev_priv->uncore.fw_rendercount == 0) \
fwengine = FORCEWAKE_RENDER; \
} else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) { \
if (dev_priv->uncore.fw_mediacount == 0) \
fwengine = FORCEWAKE_MEDIA; \
} else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) { \
if (dev_priv->uncore.fw_rendercount == 0) \
fwengine |= FORCEWAKE_RENDER; \
if (dev_priv->uncore.fw_mediacount == 0) \
fwengine |= FORCEWAKE_MEDIA; \
} else { \
if (dev_priv->uncore.fw_blittercount == 0) \
fwengine = FORCEWAKE_BLITTER; \
} \
if (fwengine) \
dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
val = __raw_i915_read##x(dev_priv, reg); \
if (fwengine) \
dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
} \
REG_READ_FOOTER; \
}
 
__gen9_read(8)
__gen9_read(16)
__gen9_read(32)
__gen9_read(64)
__chv_read(8)
__chv_read(16)
__chv_read(32)
655,6 → 892,7
__gen4_read(32)
__gen4_read(64)
 
#undef __gen9_read
#undef __chv_read
#undef __vlv_read
#undef __gen6_read
792,6 → 1030,69
REG_WRITE_FOOTER; \
}
 
static const u32 gen9_shadowed_regs[] = {
RING_TAIL(RENDER_RING_BASE),
RING_TAIL(GEN6_BSD_RING_BASE),
RING_TAIL(VEBOX_RING_BASE),
RING_TAIL(BLT_RING_BASE),
FORCEWAKE_BLITTER_GEN9,
FORCEWAKE_RENDER_GEN9,
FORCEWAKE_MEDIA_GEN9,
GEN6_RPNSWREQ,
GEN6_RC_VIDEO_FREQ,
/* TODO: Other registers are not yet used */
};
 
static bool is_gen9_shadowed(struct drm_i915_private *dev_priv, u32 reg)
{
int i;
for (i = 0; i < ARRAY_SIZE(gen9_shadowed_regs); i++)
if (reg == gen9_shadowed_regs[i])
return true;
 
return false;
}
 
#define __gen9_write(x) \
static void \
gen9_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, \
bool trace) { \
REG_WRITE_HEADER; \
if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg)) || \
is_gen9_shadowed(dev_priv, reg)) { \
__raw_i915_write##x(dev_priv, reg, val); \
} else { \
unsigned fwengine = 0; \
if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) { \
if (dev_priv->uncore.fw_rendercount == 0) \
fwengine = FORCEWAKE_RENDER; \
} else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) { \
if (dev_priv->uncore.fw_mediacount == 0) \
fwengine = FORCEWAKE_MEDIA; \
} else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) { \
if (dev_priv->uncore.fw_rendercount == 0) \
fwengine |= FORCEWAKE_RENDER; \
if (dev_priv->uncore.fw_mediacount == 0) \
fwengine |= FORCEWAKE_MEDIA; \
} else { \
if (dev_priv->uncore.fw_blittercount == 0) \
fwengine = FORCEWAKE_BLITTER; \
} \
if (fwengine) \
dev_priv->uncore.funcs.force_wake_get(dev_priv, \
fwengine); \
__raw_i915_write##x(dev_priv, reg, val); \
if (fwengine) \
dev_priv->uncore.funcs.force_wake_put(dev_priv, \
fwengine); \
} \
REG_WRITE_FOOTER; \
}
 
__gen9_write(8)
__gen9_write(16)
__gen9_write(32)
__gen9_write(64)
__chv_write(8)
__chv_write(16)
__chv_write(32)
817,6 → 1118,7
__gen4_write(32)
__gen4_write(64)
 
#undef __gen9_write
#undef __chv_write
#undef __gen8_write
#undef __hsw_write
826,6 → 1128,22
#undef REG_WRITE_FOOTER
#undef REG_WRITE_HEADER
 
#define ASSIGN_WRITE_MMIO_VFUNCS(x) \
do { \
dev_priv->uncore.funcs.mmio_writeb = x##_write8; \
dev_priv->uncore.funcs.mmio_writew = x##_write16; \
dev_priv->uncore.funcs.mmio_writel = x##_write32; \
dev_priv->uncore.funcs.mmio_writeq = x##_write64; \
} while (0)
 
#define ASSIGN_READ_MMIO_VFUNCS(x) \
do { \
dev_priv->uncore.funcs.mmio_readb = x##_read8; \
dev_priv->uncore.funcs.mmio_readw = x##_read16; \
dev_priv->uncore.funcs.mmio_readl = x##_read32; \
dev_priv->uncore.funcs.mmio_readq = x##_read64; \
} while (0)
 
void intel_uncore_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
833,12 → 1151,15
setup_timer(&dev_priv->uncore.force_wake_timer,
gen6_force_wake_timer, (unsigned long)dev_priv);
 
intel_uncore_early_sanitize(dev, false);
__intel_uncore_early_sanitize(dev, false);
 
if (IS_VALLEYVIEW(dev)) {
if (IS_GEN9(dev)) {
dev_priv->uncore.funcs.force_wake_get = __gen9_force_wake_get;
dev_priv->uncore.funcs.force_wake_put = __gen9_force_wake_put;
} else if (IS_VALLEYVIEW(dev)) {
dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get;
dev_priv->uncore.funcs.force_wake_put = __vlv_force_wake_put;
} else if (IS_HASWELL(dev) || IS_GEN8(dev)) {
} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
dev_priv->uncore.funcs.force_wake_get = __gen7_gt_force_wake_mt_get;
dev_priv->uncore.funcs.force_wake_put = __gen7_gt_force_wake_mt_put;
} else if (IS_IVYBRIDGE(dev)) {
881,77 → 1202,52
 
switch (INTEL_INFO(dev)->gen) {
default:
WARN_ON(1);
return;
case 9:
ASSIGN_WRITE_MMIO_VFUNCS(gen9);
ASSIGN_READ_MMIO_VFUNCS(gen9);
break;
case 8:
if (IS_CHERRYVIEW(dev)) {
dev_priv->uncore.funcs.mmio_writeb = chv_write8;
dev_priv->uncore.funcs.mmio_writew = chv_write16;
dev_priv->uncore.funcs.mmio_writel = chv_write32;
dev_priv->uncore.funcs.mmio_writeq = chv_write64;
dev_priv->uncore.funcs.mmio_readb = chv_read8;
dev_priv->uncore.funcs.mmio_readw = chv_read16;
dev_priv->uncore.funcs.mmio_readl = chv_read32;
dev_priv->uncore.funcs.mmio_readq = chv_read64;
ASSIGN_WRITE_MMIO_VFUNCS(chv);
ASSIGN_READ_MMIO_VFUNCS(chv);
 
} else {
dev_priv->uncore.funcs.mmio_writeb = gen8_write8;
dev_priv->uncore.funcs.mmio_writew = gen8_write16;
dev_priv->uncore.funcs.mmio_writel = gen8_write32;
dev_priv->uncore.funcs.mmio_writeq = gen8_write64;
dev_priv->uncore.funcs.mmio_readb = gen6_read8;
dev_priv->uncore.funcs.mmio_readw = gen6_read16;
dev_priv->uncore.funcs.mmio_readl = gen6_read32;
dev_priv->uncore.funcs.mmio_readq = gen6_read64;
ASSIGN_WRITE_MMIO_VFUNCS(gen8);
ASSIGN_READ_MMIO_VFUNCS(gen6);
}
break;
case 7:
case 6:
if (IS_HASWELL(dev)) {
dev_priv->uncore.funcs.mmio_writeb = hsw_write8;
dev_priv->uncore.funcs.mmio_writew = hsw_write16;
dev_priv->uncore.funcs.mmio_writel = hsw_write32;
dev_priv->uncore.funcs.mmio_writeq = hsw_write64;
ASSIGN_WRITE_MMIO_VFUNCS(hsw);
} else {
dev_priv->uncore.funcs.mmio_writeb = gen6_write8;
dev_priv->uncore.funcs.mmio_writew = gen6_write16;
dev_priv->uncore.funcs.mmio_writel = gen6_write32;
dev_priv->uncore.funcs.mmio_writeq = gen6_write64;
ASSIGN_WRITE_MMIO_VFUNCS(gen6);
}
 
if (IS_VALLEYVIEW(dev)) {
dev_priv->uncore.funcs.mmio_readb = vlv_read8;
dev_priv->uncore.funcs.mmio_readw = vlv_read16;
dev_priv->uncore.funcs.mmio_readl = vlv_read32;
dev_priv->uncore.funcs.mmio_readq = vlv_read64;
ASSIGN_READ_MMIO_VFUNCS(vlv);
} else {
dev_priv->uncore.funcs.mmio_readb = gen6_read8;
dev_priv->uncore.funcs.mmio_readw = gen6_read16;
dev_priv->uncore.funcs.mmio_readl = gen6_read32;
dev_priv->uncore.funcs.mmio_readq = gen6_read64;
ASSIGN_READ_MMIO_VFUNCS(gen6);
}
break;
case 5:
dev_priv->uncore.funcs.mmio_writeb = gen5_write8;
dev_priv->uncore.funcs.mmio_writew = gen5_write16;
dev_priv->uncore.funcs.mmio_writel = gen5_write32;
dev_priv->uncore.funcs.mmio_writeq = gen5_write64;
dev_priv->uncore.funcs.mmio_readb = gen5_read8;
dev_priv->uncore.funcs.mmio_readw = gen5_read16;
dev_priv->uncore.funcs.mmio_readl = gen5_read32;
dev_priv->uncore.funcs.mmio_readq = gen5_read64;
ASSIGN_WRITE_MMIO_VFUNCS(gen5);
ASSIGN_READ_MMIO_VFUNCS(gen5);
break;
case 4:
case 3:
case 2:
dev_priv->uncore.funcs.mmio_writeb = gen4_write8;
dev_priv->uncore.funcs.mmio_writew = gen4_write16;
dev_priv->uncore.funcs.mmio_writel = gen4_write32;
dev_priv->uncore.funcs.mmio_writeq = gen4_write64;
dev_priv->uncore.funcs.mmio_readb = gen4_read8;
dev_priv->uncore.funcs.mmio_readw = gen4_read16;
dev_priv->uncore.funcs.mmio_readl = gen4_read32;
dev_priv->uncore.funcs.mmio_readq = gen4_read64;
ASSIGN_WRITE_MMIO_VFUNCS(gen4);
ASSIGN_READ_MMIO_VFUNCS(gen4);
break;
}
 
i915_check_and_clear_faults(dev);
}
#undef ASSIGN_WRITE_MMIO_VFUNCS
#undef ASSIGN_READ_MMIO_VFUNCS
 
void intel_uncore_fini(struct drm_device *dev)
{
968,7 → 1264,7
/* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
uint32_t gen_bitmask;
} whitelist[] = {
{ RING_TIMESTAMP(RENDER_RING_BASE), 8, GEN_RANGE(4, 8) },
{ RING_TIMESTAMP(RENDER_RING_BASE), 8, GEN_RANGE(4, 9) },
};
 
int i915_reg_read_ioctl(struct drm_device *dev,
1044,41 → 1340,34
return 0;
}
 
static int i965_reset_complete(struct drm_device *dev)
static int i915_reset_complete(struct drm_device *dev)
{
u8 gdrst;
pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
return (gdrst & GRDOM_RESET_ENABLE) == 0;
pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst);
return (gdrst & GRDOM_RESET_STATUS) == 0;
}
 
static int i965_do_reset(struct drm_device *dev)
static int i915_do_reset(struct drm_device *dev)
{
int ret;
/* assert reset for at least 20 usec */
pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
udelay(20);
pci_write_config_byte(dev->pdev, I915_GDRST, 0);
 
/* FIXME: i965g/gm need a display save/restore for gpu reset. */
return -ENODEV;
return wait_for(i915_reset_complete(dev), 500);
}
 
/*
* Set the domains we want to reset (GRDOM/bits 2 and 3) as
* well as the reset bit (GR/bit 0). Setting the GR bit
* triggers the reset; when done, the hardware will clear it.
*/
pci_write_config_byte(dev->pdev, I965_GDRST,
GRDOM_RENDER | GRDOM_RESET_ENABLE);
ret = wait_for(i965_reset_complete(dev), 500);
if (ret)
return ret;
static int g4x_reset_complete(struct drm_device *dev)
{
u8 gdrst;
pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst);
return (gdrst & GRDOM_RESET_ENABLE) == 0;
}
 
pci_write_config_byte(dev->pdev, I965_GDRST,
GRDOM_MEDIA | GRDOM_RESET_ENABLE);
 
ret = wait_for(i965_reset_complete(dev), 500);
if (ret)
return ret;
 
pci_write_config_byte(dev->pdev, I965_GDRST, 0);
 
return 0;
static int g33_do_reset(struct drm_device *dev)
{
pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
return wait_for(g4x_reset_complete(dev), 500);
}
 
static int g4x_do_reset(struct drm_device *dev)
1086,9 → 1375,9
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
 
pci_write_config_byte(dev->pdev, I965_GDRST,
pci_write_config_byte(dev->pdev, I915_GDRST,
GRDOM_RENDER | GRDOM_RESET_ENABLE);
ret = wait_for(i965_reset_complete(dev), 500);
ret = wait_for(g4x_reset_complete(dev), 500);
if (ret)
return ret;
 
1096,9 → 1385,9
I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
POSTING_READ(VDECCLK_GATE_D);
 
pci_write_config_byte(dev->pdev, I965_GDRST,
pci_write_config_byte(dev->pdev, I915_GDRST,
GRDOM_MEDIA | GRDOM_RESET_ENABLE);
ret = wait_for(i965_reset_complete(dev), 500);
ret = wait_for(g4x_reset_complete(dev), 500);
if (ret)
return ret;
 
1106,7 → 1395,7
I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
POSTING_READ(VDECCLK_GATE_D);
 
pci_write_config_byte(dev->pdev, I965_GDRST, 0);
pci_write_config_byte(dev->pdev, I915_GDRST, 0);
 
return 0;
}
1164,8 → 1453,10
return ironlake_do_reset(dev);
else if (IS_G4X(dev))
return g4x_do_reset(dev);
else if (IS_GEN4(dev))
return i965_do_reset(dev);
else if (IS_G33(dev))
return g33_do_reset(dev);
else if (INTEL_INFO(dev)->gen >= 3)
return i915_do_reset(dev);
else
return -ENODEV;
}
/drivers/video/drm/i915/kms_display.c
5,73 → 5,25
#include <uapi/drm/drm.h>
#include "i915_drv.h"
#include "intel_drv.h"
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <errno-base.h>
#include <linux/pci.h>
 
#include <syscall.h>
 
#include "bitmap.h"
#include <display.h>
 
typedef struct
{
kobj_t header;
 
uint32_t *data;
uint32_t hot_x;
uint32_t hot_y;
 
struct list_head list;
struct drm_i915_gem_object *cobj;
}cursor_t;
 
#define KMS_CURSOR_WIDTH 64
#define KMS_CURSOR_HEIGHT 64
 
 
struct tag_display
{
int x;
int y;
int width;
int height;
int bpp;
int vrefresh;
int pitch;
int lfb;
 
int supported_modes;
struct drm_device *ddev;
struct drm_connector *connector;
struct drm_crtc *crtc;
 
struct list_head cursors;
 
cursor_t *cursor;
int (*init_cursor)(cursor_t*);
cursor_t* (__stdcall *select_cursor)(cursor_t*);
void (*show_cursor)(int show);
void (__stdcall *move_cursor)(cursor_t *cursor, int x, int y);
void (__stdcall *restore_cursor)(int x, int y);
void (*disable_mouse)(void);
u32 mask_seqno;
u32 check_mouse;
u32 check_m_pixel;
};
 
 
static display_t *os_display;
display_t *os_display;
struct drm_i915_gem_object *main_fb_obj;
 
u32_t cmd_buffer;
u32_t cmd_offset;
u32 cmd_buffer;
u32 cmd_offset;
 
void init_render();
int sna_init();
 
int init_cursor(cursor_t *cursor);
static cursor_t* __stdcall select_cursor_kms(cursor_t *cursor);
static void __stdcall move_cursor_kms(cursor_t *cursor, int x, int y);
 
330,7 → 282,7
struct drm_framebuffer *fb;
 
cursor_t *cursor;
u32_t ifl;
u32 ifl;
int ret;
 
mutex_lock(&dev->mode_config.mutex);
484,12 → 436,13
 
void __attribute__((regparm(1))) destroy_cursor(cursor_t *cursor)
{
struct drm_i915_gem_object *obj = cursor->cobj;
list_del(&cursor->list);
 
i915_gem_object_ggtt_unpin(cursor->cobj);
 
mutex_lock(&main_device->struct_mutex);
drm_gem_object_unreference(&cursor->cobj->base);
drm_gem_object_unreference(&obj->base);
mutex_unlock(&main_device->struct_mutex);
 
__DestroyObject(cursor);
645,15 → 598,6
 
#define CURRENT_TASK (0x80003000)
 
static u32_t get_display_map()
{
u32_t addr;
 
addr = (u32_t)os_display;
addr+= sizeof(display_t); /* shoot me */
return *(u32_t*)addr;
}
 
void FASTCALL GetWindowRect(rect_t *rc)__asm__("GetWindowRect");
 
int i915_mask_update(struct drm_device *dev, void *data,
719,12 → 663,12
 
// slot = 0x01;
 
src_offset = os_display->win_map;
src_offset+= winrc.top*os_display->width + winrc.left;
 
src_offset = (u8*)( winrc.top*os_display->width + winrc.left);
src_offset+= get_display_map();
dst_offset = (u8*)mask->bo_map;
 
u32_t tmp_h = mask->height;
u32 tmp_h = mask->height;
 
ifl = safe_cli();
{
926,11 → 870,11
 
i915_gem_object_set_to_cpu_domain(to_intel_bo(obj), true);
 
src_offset = (u8*)( mt*os_display->width + ml);
src_offset+= get_display_map();
src_offset = os_display->win_map;
src_offset+= mt*os_display->width + ml;
dst_offset = (u8*)mask->bo_map;
 
u32_t tmp_h = mask->height;
u32 tmp_h = mask->height;
 
ifl = safe_cli();
{
1145,21 → 1089,5
return 1;
}
 
unsigned int hweight16(unsigned int w)
{
unsigned int res = w - ((w >> 1) & 0x5555);
res = (res & 0x3333) + ((res >> 2) & 0x3333);
res = (res + (res >> 4)) & 0x0F0F;
return (res + (res >> 8)) & 0x00FF;
}
 
 
unsigned long round_jiffies_up_relative(unsigned long j)
{
unsigned long j0 = GetTimerTicks();
 
/* Use j0 because jiffies might change while we run */
return round_jiffies_common(j + j0, true) - j0;
}
 
 
/drivers/video/drm/i915/main.c
5,7 → 5,6
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <errno-base.h>
#include <linux/pci.h>
#include <syscall.h>
 
33,7 → 32,8
struct drm_file *drm_file_handlers[256];
videomode_t usermode;
 
void cpu_detect();
void cpu_detect1();
int kmap_init();
 
int _stdcall display_handler(ioctl_t *io);
int init_agp(void);
170,7 → 170,7
asm volatile ("int $0x40"::"a"(-1));
}
 
u32_t __attribute__((externally_visible)) drvEntry(int action, char *cmdline)
u32 __attribute__((externally_visible)) drvEntry(int action, char *cmdline)
{
static pci_dev_t device;
const struct pci_device_id *ent;
186,7 → 186,7
if( GetService("DISPLAY") != 0 )
return 0;
 
printf("\ni915 v3.17-rc5 build %s %s\nusage: i915 [options]\n"
printf("\ni915 v3.19-rc2 build %s %s\nusage: i915 [options]\n"
"-pm=<0,1> Enable powersavings, fbc, downclocking, etc. (default: 1 - true)\n",
__DATE__, __TIME__);
printf("-rc6=<-1,0-7> Enable power-saving render C-state 6.\n"
210,7 → 210,7
return 0;
}
 
cpu_detect();
cpu_detect1();
// dbgprintf("\ncache line size %d\n", x86_clflush_size);
 
err = enum_pci_devices();
220,6 → 220,13
return 0;
}
 
err = kmap_init();
if( unlikely(err != 0) )
{
dbgprintf("kmap initialization failed\n");
return 0;
}
 
dmi_scan_machine();
 
driver_wq_state = I915_DEV_INIT;
310,8 → 317,8
struct drm_file *file;
 
int retval = -1;
u32_t *inp;
u32_t *outp;
u32 *inp;
u32 *outp;
 
inp = io->input;
outp = io->output;
465,10 → 472,10
#define PCI_CLASS_BRIDGE_HOST 0x0600
#define PCI_CLASS_BRIDGE_ISA 0x0601
 
int pci_scan_filter(u32_t id, u32_t busnr, u32_t devfn)
int pci_scan_filter(u32 id, u32 busnr, u32 devfn)
{
u16_t vendor, device;
u32_t class;
u16 vendor, device;
u32 class;
int ret = 0;
 
vendor = id & 0xffff;
488,43 → 495,17
};
 
 
 
 
static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
unsigned int *ecx, unsigned int *edx)
{
/* ecx is often an input as well as an output. */
asm volatile("cpuid"
: "=a" (*eax),
"=b" (*ebx),
"=c" (*ecx),
"=d" (*edx)
: "0" (*eax), "2" (*ecx)
: "memory");
}
 
 
 
static inline void cpuid(unsigned int op,
unsigned int *eax, unsigned int *ebx,
unsigned int *ecx, unsigned int *edx)
{
*eax = op;
*ecx = 0;
__cpuid(eax, ebx, ecx, edx);
}
 
struct mtrr
{
u64_t base;
u64_t mask;
u64 base;
u64 mask;
};
 
struct cpuinfo
{
u64_t caps;
u64_t def_mtrr;
u64_t mtrr_cap;
u64 caps;
u64 def_mtrr;
u64 mtrr_cap;
int var_mtrr_count;
int fix_mtrr_count;
struct mtrr var_mtrr[9];
549,13 → 530,13
#define MTRR_WC 1
#define MTRR_WB 6
 
static inline u64_t read_msr(u32_t msr)
static inline u64 read_msr(u32 msr)
{
union {
u64_t val;
u64 val;
struct {
u32_t low;
u32_t high;
u32 low;
u32 high;
};
}tmp;
 
566,13 → 547,13
return tmp.val;
}
 
static inline void write_msr(u32_t msr, u64_t val)
static inline void write_msr(u32 msr, u64 val)
{
union {
u64_t val;
u64 val;
struct {
u32_t low;
u32_t high;
u32 low;
u32 high;
};
}tmp;
 
583,24 → 564,6
:: "a" (tmp.low), "d" (tmp.high), "c" (msr));
}
 
#define rdmsr(msr, low, high) \
do { \
u64 __val = read_msr((msr)); \
(void)((low) = (u32)__val); \
(void)((high) = (u32)(__val >> 32)); \
} while (0)
 
static inline void native_write_msr(unsigned int msr,
unsigned low, unsigned high)
{
asm volatile("wrmsr" : : "c" (msr), "a"(low), "d" (high) : "memory");
}
 
static inline void wbinvd(void)
{
asm volatile("wbinvd": : :"memory");
}
 
#define SIZE_OR_MASK_BITS(n) (~((1ULL << ((n) - PAGE_SHIFT)) - 1))
 
static void set_mtrr(unsigned int reg, unsigned long base,
630,53 → 593,16
};
}
 
static unsigned long __force_order;
 
static inline unsigned long read_cr0(void)
{
unsigned long val;
asm volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order));
return val;
}
 
static inline void write_cr0(unsigned long val)
{
asm volatile("mov %0,%%cr0": : "r" (val), "m" (__force_order));
}
 
static inline unsigned long read_cr4(void)
{
unsigned long val;
asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order));
return val;
}
 
static inline void write_cr4(unsigned long val)
{
asm volatile("mov %0,%%cr4": : "r" (val), "m" (__force_order));
}
 
static inline unsigned long read_cr3(void)
{
unsigned long val;
asm volatile("mov %%cr3,%0\n\t" : "=r" (val), "=m" (__force_order));
return val;
}
 
static inline void write_cr3(unsigned long val)
{
asm volatile("mov %0,%%cr3": : "r" (val), "m" (__force_order));
}
 
static u32 deftype_lo, deftype_hi;
 
void cpu_detect()
void cpu_detect1()
{
struct cpuinfo cpuinfo;
 
u32 junk, tfms, cap0, misc;
int i;
#if 0
 
cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
 
if (cap0 & (1<<19))
684,6 → 610,7
x86_clflush_size = ((misc >> 8) & 0xff) * 8;
}
 
#if 0
cpuid(0x80000002, (unsigned int*)&cpuinfo.model_name[0], (unsigned int*)&cpuinfo.model_name[4],
(unsigned int*)&cpuinfo.model_name[8], (unsigned int*)&cpuinfo.model_name[12]);
cpuid(0x80000003, (unsigned int*)&cpuinfo.model_name[16], (unsigned int*)&cpuinfo.model_name[20],
955,4 → 882,3
return __res;
}
 
 
/drivers/video/drm/i915/pci.c
2,11 → 2,11
#include <linux/kernel.h>
#include <linux/mutex.h>
#include <linux/mod_devicetable.h>
#include <errno-base.h>
#include <pci.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <syscall.h>
 
extern int pci_scan_filter(u32_t id, u32_t busnr, u32_t devfn);
extern int pci_scan_filter(u32 id, u32 busnr, u32 devfn);
 
static LIST_HEAD(devices);
 
31,9 → 31,9
}
 
 
static u32_t pci_size(u32_t base, u32_t maxbase, u32_t mask)
static u32 pci_size(u32 base, u32 maxbase, u32 mask)
{
u32_t size = mask & maxbase; /* Find the significant bits */
u32 size = mask & maxbase; /* Find the significant bits */
 
if (!size)
return 0;
50,9 → 50,9
return size;
}
 
static u64_t pci_size64(u64_t base, u64_t maxbase, u64_t mask)
static u64 pci_size64(u64 base, u64 maxbase, u64 mask)
{
u64_t size = mask & maxbase; /* Find the significant bits */
u64 size = mask & maxbase; /* Find the significant bits */
 
if (!size)
return 0;
69,7 → 69,7
return size;
}
 
static inline int is_64bit_memory(u32_t mask)
static inline int is_64bit_memory(u32 mask)
{
if ((mask & (PCI_BASE_ADDRESS_SPACE|PCI_BASE_ADDRESS_MEM_TYPE_MASK)) ==
(PCI_BASE_ADDRESS_SPACE_MEMORY|PCI_BASE_ADDRESS_MEM_TYPE_64))
79,15 → 79,15
 
static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
{
u32_t pos, reg, next;
u32_t l, sz;
u32 pos, reg, next;
u32 l, sz;
struct resource *res;
 
for(pos=0; pos < howmany; pos = next)
{
u64_t l64;
u64_t sz64;
u32_t raw_sz;
u64 l64;
u64 sz64;
u32 raw_sz;
 
next = pos + 1;
 
109,7 → 109,7
if ((l & PCI_BASE_ADDRESS_SPACE) ==
PCI_BASE_ADDRESS_SPACE_MEMORY)
{
sz = pci_size(l, sz, (u32_t)PCI_BASE_ADDRESS_MEM_MASK);
sz = pci_size(l, sz, (u32)PCI_BASE_ADDRESS_MEM_MASK);
/*
* For 64bit prefetchable memory sz could be 0, if the
* real size is bigger than 4G, so we need to check
131,14 → 131,14
res->flags |= pci_calc_resource_flags(l);
if (is_64bit_memory(l))
{
u32_t szhi, lhi;
u32 szhi, lhi;
 
lhi = PciRead32(dev->busnr, dev->devfn, reg+4);
PciWrite32(dev->busnr, dev->devfn, reg+4, ~0);
szhi = PciRead32(dev->busnr, dev->devfn, reg+4);
PciWrite32(dev->busnr, dev->devfn, reg+4, lhi);
sz64 = ((u64_t)szhi << 32) | raw_sz;
l64 = ((u64_t)lhi << 32) | l;
sz64 = ((u64)szhi << 32) | raw_sz;
l64 = ((u64)lhi << 32) | l;
sz64 = pci_size64(l64, sz64, PCI_BASE_ADDRESS_MEM_MASK);
next++;
 
162,7 → 162,7
{
/* 64-bit wide address, treat as disabled */
PciWrite32(dev->busnr, dev->devfn, reg,
l & ~(u32_t)PCI_BASE_ADDRESS_MEM_MASK);
l & ~(u32)PCI_BASE_ADDRESS_MEM_MASK);
PciWrite32(dev->busnr, dev->devfn, reg+4, 0);
res->start = 0;
res->end = sz;
186,7 → 186,7
 
if (sz && sz != 0xffffffff)
{
sz = pci_size(l, sz, (u32_t)PCI_ROM_ADDRESS_MASK);
sz = pci_size(l, sz, (u32)PCI_ROM_ADDRESS_MASK);
 
if (sz)
{
202,7 → 202,7
 
static void pci_read_irq(struct pci_dev *dev)
{
u8_t irq;
u8 irq;
 
irq = PciRead8(dev->busnr, dev->devfn, PCI_INTERRUPT_PIN);
dev->pin = irq;
214,7 → 214,7
 
int pci_setup_device(struct pci_dev *dev)
{
u32_t class;
u32 class;
 
class = PciRead32(dev->busnr, dev->devfn, PCI_CLASS_REVISION);
dev->revision = class & 0xff;
246,7 → 246,7
*/
if (class == PCI_CLASS_STORAGE_IDE)
{
u8_t progif;
u8 progif;
 
progif = PciRead8(dev->busnr, dev->devfn,PCI_CLASS_PROG);
if ((progif & 1) == 0)
311,12 → 311,12
return 0;
};
 
static pci_dev_t* pci_scan_device(u32_t busnr, int devfn)
static pci_dev_t* pci_scan_device(u32 busnr, int devfn)
{
pci_dev_t *dev;
 
u32_t id;
u8_t hdr;
u32 id;
u8 hdr;
 
int timeout = 10;
 
372,7 → 372,7
 
 
 
int pci_scan_slot(u32_t bus, int devfn)
int pci_scan_slot(u32 bus, int devfn)
{
int func, nr = 0;
 
480,8 → 480,8
int enum_pci_devices()
{
pci_dev_t *dev;
u32_t last_bus;
u32_t bus = 0 , devfn = 0;
u32 last_bus;
u32 bus = 0 , devfn = 0;
 
 
last_bus = PciApi(1);
664,11 → 664,6
}
 
 
struct pci_bus_region {
resource_size_t start;
resource_size_t end;
};
 
static inline void
pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
struct resource *res)
/drivers/video/drm/i915/utils.c
12,7 → 12,7
struct file *filep;
int count;
 
filep = malloc(sizeof(*filep));
filep = __builtin_malloc(sizeof(*filep));
 
if(unlikely(filep == NULL))
return ERR_PTR(-ENOMEM);
248,7 → 248,6
}
 
 
 
//const char hex_asc[] = "0123456789abcdef";
 
/**
478,35 → 477,302
}
 
 
#define KMAP_MAX 256
 
static struct mutex kmap_mutex;
static struct page* kmap_table[KMAP_MAX];
static int kmap_av;
static int kmap_first;
static void* kmap_base;
 
 
int kmap_init()
{
kmap_base = AllocKernelSpace(KMAP_MAX*4096);
if(kmap_base == NULL)
return -1;
 
kmap_av = KMAP_MAX;
MutexInit(&kmap_mutex);
return 0;
};
 
void *kmap(struct page *page)
{
void *vaddr = NULL;
int i;
 
do
{
MutexLock(&kmap_mutex);
if(kmap_av != 0)
{
for(i = kmap_first; i < KMAP_MAX; i++)
{
if(kmap_table[i] == NULL)
{
kmap_av--;
kmap_first = i;
kmap_table[i] = page;
vaddr = kmap_base + (i<<12);
MapPage(vaddr,(addr_t)page,3);
break;
};
};
};
MutexUnlock(&kmap_mutex);
}while(vaddr == NULL);
 
return vaddr;
};
 
void *kmap_atomic(struct page *page) __attribute__ ((alias ("kmap")));
 
void kunmap(struct page *page)
{
void *vaddr;
int i;
 
vaddr = (void*)MapIoMem(page_to_phys(page), 4096, PG_SW);
MutexLock(&kmap_mutex);
 
return vaddr;
for(i = 0; i < KMAP_MAX; i++)
{
if(kmap_table[i] == page)
{
kmap_av++;
if(i < kmap_first)
kmap_first = i;
kmap_table[i] = NULL;
vaddr = kmap_base + (i<<12);
MapPage(vaddr,0,0);
break;
};
};
 
MutexUnlock(&kmap_mutex);
};
 
void kunmap_atomic(void *vaddr)
{
int i;
 
MapPage(vaddr,0,0);
 
i = (vaddr - kmap_base) >> 12;
 
MutexLock(&kmap_mutex);
 
kmap_av++;
if(i < kmap_first)
kmap_first = i;
kmap_table[i] = NULL;
 
MutexUnlock(&kmap_mutex);
}
 
unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size)
size_t strlcat(char *dest, const char *src, size_t count)
{
const unsigned long *p = addr;
unsigned long result = 0;
unsigned long tmp;
size_t dsize = strlen(dest);
size_t len = strlen(src);
size_t res = dsize + len;
 
while (size & ~(BITS_PER_LONG-1)) {
if (~(tmp = *(p++)))
goto found;
result += BITS_PER_LONG;
size -= BITS_PER_LONG;
/* This would be a bug */
BUG_ON(dsize >= count);
 
dest += dsize;
count -= dsize;
if (len >= count)
len = count-1;
memcpy(dest, src, len);
dest[len] = 0;
return res;
}
if (!size)
return result;
EXPORT_SYMBOL(strlcat);
 
tmp = (*p) | (~0UL << size);
if (tmp == ~0UL) /* Are any bits zero? */
return result + size; /* Nope. */
found:
return result + ffz(tmp);
void msleep(unsigned int msecs)
{
msecs /= 10;
if(!msecs) msecs = 1;
 
__asm__ __volatile__ (
"call *__imp__Delay"
::"b" (msecs));
__asm__ __volatile__ (
"":::"ebx");
 
};
 
 
/* simple loop based delay: */
static void delay_loop(unsigned long loops)
{
asm volatile(
" test %0,%0 \n"
" jz 3f \n"
" jmp 1f \n"
 
".align 16 \n"
"1: jmp 2f \n"
 
".align 16 \n"
"2: dec %0 \n"
" jnz 2b \n"
"3: dec %0 \n"
 
: /* we don't need output */
:"a" (loops)
);
}
 
 
static void (*delay_fn)(unsigned long) = delay_loop;
 
void __delay(unsigned long loops)
{
delay_fn(loops);
}
 
 
inline void __const_udelay(unsigned long xloops)
{
int d0;
 
xloops *= 4;
asm("mull %%edx"
: "=d" (xloops), "=&a" (d0)
: "1" (xloops), ""
(loops_per_jiffy * (HZ/4)));
 
__delay(++xloops);
}
 
void __udelay(unsigned long usecs)
{
__const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */
}
 
unsigned int _sw_hweight32(unsigned int w)
{
#ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER
w -= (w >> 1) & 0x55555555;
w = (w & 0x33333333) + ((w >> 2) & 0x33333333);
w = (w + (w >> 4)) & 0x0f0f0f0f;
return (w * 0x01010101) >> 24;
#else
unsigned int res = w - ((w >> 1) & 0x55555555);
res = (res & 0x33333333) + ((res >> 2) & 0x33333333);
res = (res + (res >> 4)) & 0x0F0F0F0F;
res = res + (res >> 8);
return (res + (res >> 16)) & 0x000000FF;
#endif
}
EXPORT_SYMBOL(_sw_hweight32);
 
 
void usleep_range(unsigned long min, unsigned long max)
{
udelay(max);
}
EXPORT_SYMBOL(usleep_range);
 
 
static unsigned long round_jiffies_common(unsigned long j, int cpu,
bool force_up)
{
int rem;
unsigned long original = j;
 
/*
* We don't want all cpus firing their timers at once hitting the
* same lock or cachelines, so we skew each extra cpu with an extra
* 3 jiffies. This 3 jiffies came originally from the mm/ code which
* already did this.
* The skew is done by adding 3*cpunr, then round, then subtract this
* extra offset again.
*/
j += cpu * 3;
 
rem = j % HZ;
 
/*
* If the target jiffie is just after a whole second (which can happen
* due to delays of the timer irq, long irq off times etc etc) then
* we should round down to the whole second, not up. Use 1/4th second
* as cutoff for this rounding as an extreme upper bound for this.
* But never round down if @force_up is set.
*/
if (rem < HZ/4 && !force_up) /* round down */
j = j - rem;
else /* round up */
j = j - rem + HZ;
 
/* now that we have rounded, subtract the extra skew again */
j -= cpu * 3;
 
/*
* Make sure j is still in the future. Otherwise return the
* unmodified value.
*/
return time_is_after_jiffies(j) ? j : original;
}
 
 
unsigned long round_jiffies_up_relative(unsigned long j, int cpu)
{
unsigned long j0 = jiffies;
 
/* Use j0 because jiffies might change while we run */
return round_jiffies_common(j + j0, 0, true) - j0;
}
EXPORT_SYMBOL_GPL(__round_jiffies_up_relative);
 
 
#include <linux/rcupdate.h>
 
struct rcu_ctrlblk {
struct rcu_head *rcucblist; /* List of pending callbacks (CBs). */
struct rcu_head **donetail; /* ->next pointer of last "done" CB. */
struct rcu_head **curtail; /* ->next pointer of last CB. */
// RCU_TRACE(long qlen); /* Number of pending CBs. */
// RCU_TRACE(unsigned long gp_start); /* Start time for stalls. */
// RCU_TRACE(unsigned long ticks_this_gp); /* Statistic for stalls. */
// RCU_TRACE(unsigned long jiffies_stall); /* Jiffies at next stall. */
// RCU_TRACE(const char *name); /* Name of RCU type. */
};
 
/* Definition for rcupdate control block. */
static struct rcu_ctrlblk rcu_sched_ctrlblk = {
.donetail = &rcu_sched_ctrlblk.rcucblist,
.curtail = &rcu_sched_ctrlblk.rcucblist,
// RCU_TRACE(.name = "rcu_sched")
};
 
static void __call_rcu(struct rcu_head *head,
void (*func)(struct rcu_head *rcu),
struct rcu_ctrlblk *rcp)
{
unsigned long flags;
 
// debug_rcu_head_queue(head);
head->func = func;
head->next = NULL;
 
local_irq_save(flags);
*rcp->curtail = head;
rcp->curtail = &head->next;
// RCU_TRACE(rcp->qlen++);
local_irq_restore(flags);
}
 
/*
* Post an RCU callback to be invoked after the end of an RCU-sched grace
* period. But since we have but one CPU, that would be after any
* quiescent state.
*/
void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
{
__call_rcu(head, func, &rcu_sched_ctrlblk);
}
 
 
 
/drivers/video/drm/radeon/Makefile.lto
36,6 → 36,7
HFILES:= $(DRV_INCLUDES)/linux/types.h \
$(DRV_INCLUDES)/linux/list.h \
$(DRV_INCLUDES)/linux/pci.h \
$(DRV_INCLUDES)/drm/drm.h \
$(DRV_INCLUDES)/drm/drmP.h \
$(DRV_INCLUDES)/drm/drm_edid.h \
$(DRV_INCLUDES)/drm/drm_crtc.h \
59,7 → 60,6
$(DRM_TOPDIR)/drm_crtc_helper.c \
$(DRM_TOPDIR)/drm_dp_helper.c \
$(DRM_TOPDIR)/drm_drv.c \
$(DRM_TOPDIR)/drm_atomic.c \
$(DRM_TOPDIR)/drm_edid.c \
$(DRM_TOPDIR)/drm_fb_helper.c \
$(DRM_TOPDIR)/drm_gem.c \
129,7 → 129,6
radeon_ring.c \
radeon_sa.c \
radeon_semaphore.c \
radeon_sync.c \
radeon_test.c \
radeon_ttm.c \
radeon_ucode.c \
146,6 → 145,7
rv740_dpm.c \
r520.c \
r600.c \
r600_audio.c \
r600_blit_shaders.c \
r600_cs.c \
r600_dma.c \
/drivers/video/drm/radeon/main.c
134,7 → 134,7
if( GetService("DISPLAY") != 0 )
return 0;
 
printf("Radeon v3.19-rc1 cmdline %s\n", cmdline);
printf("Radeon v3.19-rc2 cmdline %s\n", cmdline);
 
if( cmdline && *cmdline )
parse_cmdline(cmdline, &usermode, log, &radeon_modeset);
/drivers/video/drm/radeon/rdisplay.c
4,7 → 4,7
#include "radeon.h"
#include "radeon_object.h"
#include "bitmap.h"
#include "display.h"
#include <display.h>
 
#include "r100d.h"
 
32,20 → 32,20
rdev = (struct radeon_device *)os_display->ddev->dev_private;
 
r = radeon_bo_create(rdev, CURSOR_WIDTH*CURSOR_HEIGHT*4,
4096, false, RADEON_GEM_DOMAIN_VRAM, 0, NULL, NULL, &cursor->robj);
4096, false, RADEON_GEM_DOMAIN_VRAM, 0, NULL, NULL, (struct radeon_bo**)&cursor->cobj);
 
if (unlikely(r != 0))
return r;
 
r = radeon_bo_reserve(cursor->robj, false);
r = radeon_bo_reserve(cursor->cobj, false);
if (unlikely(r != 0))
return r;
 
r = radeon_bo_pin(cursor->robj, RADEON_GEM_DOMAIN_VRAM, NULL);
r = radeon_bo_pin(cursor->cobj, RADEON_GEM_DOMAIN_VRAM, NULL);
if (unlikely(r != 0))
return r;
 
r = radeon_bo_kmap(cursor->robj, (void**)&bits);
r = radeon_bo_kmap(cursor->cobj, (void**)&bits);
if (r) {
DRM_ERROR("radeon: failed to map cursor (%d).\n", r);
return r;
63,7 → 63,7
for(i = 0; i < CURSOR_WIDTH*(CURSOR_HEIGHT-32); i++)
*bits++ = 0;
 
radeon_bo_kunmap(cursor->robj);
radeon_bo_kunmap(cursor->cobj);
 
// cursor->header.destroy = destroy_cursor;
 
73,7 → 73,7
void __attribute__((regparm(1))) destroy_cursor(cursor_t *cursor)
{
list_del(&cursor->list);
radeon_bo_unpin(cursor->robj);
radeon_bo_unpin(cursor->cobj);
KernelFree(cursor->data);
__DestroyObject(cursor);
};
110,7 → 110,7
old = os_display->cursor;
 
os_display->cursor = cursor;
gpu_addr = radeon_bo_gpu_offset(cursor->robj);
gpu_addr = radeon_bo_gpu_offset(cursor->cobj);
 
if (ASIC_IS_DCE4(rdev))
{
207,7 → 207,7
WREG32(RADEON_CUR_HORZ_VERT_POSN,
(RADEON_CUR_LOCK | (x << 16) | y));
 
gpu_addr = radeon_bo_gpu_offset(cursor->robj);
gpu_addr = radeon_bo_gpu_offset(cursor->cobj);
 
/* offset is from DISP(2)_BASE_ADDRESS */
WREG32(RADEON_CUR_OFFSET,
/drivers/video/drm/radeon/rdisplay_kms.c
6,7 → 6,7
#include "drm_fb_helper.h"
#include "hmm.h"
#include "bitmap.h"
#include "display.h"
#include <display.h>
 
extern struct drm_framebuffer *main_fb;
extern struct drm_gem_object *main_fb_obj;
94,7 → 94,7
old = os_display->cursor;
 
os_display->cursor = cursor;
gpu_addr = radeon_bo_gpu_offset(cursor->robj);
gpu_addr = radeon_bo_gpu_offset(cursor->cobj);
 
if (ASIC_IS_DCE4(rdev)) {
WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
169,7 → 169,7
WREG32(RADEON_CUR_HORZ_VERT_POSN,
(RADEON_CUR_LOCK | (x << 16) | y));
 
gpu_addr = radeon_bo_gpu_offset(cursor->robj);
gpu_addr = radeon_bo_gpu_offset(cursor->cobj);
 
/* offset is from DISP(2)_BASE_ADDRESS */
WREG32(RADEON_CUR_OFFSET,