35,11 → 35,15 |
#include "i915_reg.h" |
#include "intel_bios.h" |
#include "intel_ringbuffer.h" |
#include "intel_lrc.h" |
#include "i915_gem_gtt.h" |
#include "i915_gem_render_state.h" |
//#include <linux/io-mapping.h> |
#include <linux/i2c.h> |
#include <linux/i2c-algo-bit.h> |
#include <drm/intel-gtt.h> |
#include <drm/drm_legacy.h> /* for struct drm_dma_handle */ |
#include <drm/drm_gem.h> |
//#include <linux/backlight.h> |
#include <linux/hashtable.h> |
|
50,22 → 54,13 |
/* General customization: |
*/ |
|
#define I915_TILING_NONE 0 |
|
#define VGA_RSRC_NONE 0x00 |
#define VGA_RSRC_LEGACY_IO 0x01 |
#define VGA_RSRC_LEGACY_MEM 0x02 |
#define VGA_RSRC_LEGACY_MASK (VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM) |
/* Non-legacy access */ |
#define VGA_RSRC_NORMAL_IO 0x04 |
#define VGA_RSRC_NORMAL_MEM 0x08 |
|
#define DRIVER_AUTHOR "Tungsten Graphics, Inc." |
|
#define DRIVER_NAME "i915" |
#define DRIVER_DESC "Intel Graphics" |
#define DRIVER_DATE "20140725" |
#define DRIVER_DATE "20141121" |
|
#undef WARN_ON |
#define WARN_ON(x) WARN(x, "WARN_ON(" #x ")") |
|
enum pipe { |
INVALID_PIPE = -1, |
PIPE_A = 0, |
85,6 → 80,14 |
}; |
#define transcoder_name(t) ((t) + 'A') |
|
/* |
* This is the maximum (across all platforms) number of planes (primary + |
* sprites) that can be active at the same time on one pipe. |
* |
* This value doesn't count the cursor plane. |
*/ |
#define I915_MAX_PLANES 3 |
|
enum plane { |
PLANE_A = 0, |
PLANE_B, |
173,7 → 176,10 |
I915_GEM_DOMAIN_INSTRUCTION | \ |
I915_GEM_DOMAIN_VERTEX) |
|
#define for_each_pipe(p) for ((p) = 0; (p) < INTEL_INFO(dev)->num_pipes; (p)++) |
#define for_each_pipe(__dev_priv, __p) \ |
for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) |
#define for_each_plane(pipe, p) \ |
for ((p) = 0; (p) < INTEL_INFO(dev)->num_sprites[(pipe)] + 1; (p)++) |
#define for_each_sprite(p, s) for ((s) = 0; (s) < INTEL_INFO(dev)->num_sprites[(p)]; (s)++) |
|
#define for_each_crtc(dev, crtc) \ |
182,6 → 188,11 |
#define for_each_intel_crtc(dev, intel_crtc) \ |
list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) |
|
#define for_each_intel_encoder(dev, intel_encoder) \ |
list_for_each_entry(intel_encoder, \ |
&(dev)->mode_config.encoder_list, \ |
base.head) |
|
#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \ |
list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \ |
if ((intel_encoder)->base.crtc == (__crtc)) |
203,27 → 214,52 |
/* real shared dpll ids must be >= 0 */ |
DPLL_ID_PCH_PLL_A = 0, |
DPLL_ID_PCH_PLL_B = 1, |
/* hsw/bdw */ |
DPLL_ID_WRPLL1 = 0, |
DPLL_ID_WRPLL2 = 1, |
/* skl */ |
DPLL_ID_SKL_DPLL1 = 0, |
DPLL_ID_SKL_DPLL2 = 1, |
DPLL_ID_SKL_DPLL3 = 2, |
}; |
#define I915_NUM_PLLS 2 |
#define I915_NUM_PLLS 3 |
|
struct intel_dpll_hw_state { |
/* i9xx, pch plls */ |
uint32_t dpll; |
uint32_t dpll_md; |
uint32_t fp0; |
uint32_t fp1; |
|
/* hsw, bdw */ |
uint32_t wrpll; |
|
/* skl */ |
/* |
* DPLL_CTRL1 has 6 bits for each each this DPLL. We store those in |
* lower part of crtl1 and they get shifted into position when writing |
* the register. This allows us to easily compare the state to share |
* the DPLL. |
*/ |
uint32_t ctrl1; |
/* HDMI only, 0 when used for DP */ |
uint32_t cfgcr1, cfgcr2; |
}; |
|
struct intel_shared_dpll_config { |
unsigned crtc_mask; /* mask of CRTCs sharing this PLL */ |
struct intel_dpll_hw_state hw_state; |
}; |
|
struct intel_shared_dpll { |
int refcount; /* count of number of CRTCs sharing this PLL */ |
struct intel_shared_dpll_config config; |
struct intel_shared_dpll_config *new_config; |
|
int active; /* count of number of active CRTCs (i.e. DPMS on) */ |
bool on; /* is the PLL actually active? Disabled during modeset */ |
const char *name; |
/* should match the index in the dev_priv->shared_dplls array */ |
enum intel_dpll_id id; |
struct intel_dpll_hw_state hw_state; |
/* The mode_set hook is optional and should be used together with the |
* intel_prepare_shared_dpll function. */ |
void (*mode_set)(struct drm_i915_private *dev_priv, |
237,6 → 273,11 |
struct intel_dpll_hw_state *hw_state); |
}; |
|
#define SKL_DPLL0 0 |
#define SKL_DPLL1 1 |
#define SKL_DPLL2 2 |
#define SKL_DPLL3 3 |
|
/* Used by dp and fdi links */ |
struct intel_link_m_n { |
uint32_t tu; |
265,7 → 306,6 |
#define DRIVER_PATCHLEVEL 0 |
|
#define WATCH_LISTS 0 |
#define WATCH_GTT 0 |
|
struct opregion_header; |
struct opregion_acpi; |
288,10 → 328,6 |
struct intel_overlay; |
struct intel_overlay_error_state; |
|
struct drm_i915_master_private { |
drm_local_map_t *sarea; |
struct _drm_i915_sarea *sarea_priv; |
}; |
#define I915_FENCE_REG_NONE -1 |
#define I915_MAX_NUM_FENCES 32 |
/* 32 fences + sign bit for FENCE_REG_NONE */ |
398,6 → 434,7 |
pid_t pid; |
char comm[TASK_COMM_LEN]; |
} ring[I915_NUM_RINGS]; |
|
struct drm_i915_error_buffer { |
u32 size; |
u32 name; |
416,9 → 453,11 |
} **active_bo, **pinned_bo; |
|
u32 *active_bo_count, *pinned_bo_count; |
u32 vm_count; |
}; |
|
struct intel_connector; |
struct intel_encoder; |
struct intel_crtc_config; |
struct intel_plane_config; |
struct intel_crtc; |
445,7 → 484,7 |
* Returns true on success, false on failure. |
*/ |
bool (*find_dpll)(const struct intel_limit *limit, |
struct drm_crtc *crtc, |
struct intel_crtc *crtc, |
int target, int refclk, |
struct dpll *match_clock, |
struct dpll *best_clock); |
461,15 → 500,14 |
struct intel_crtc_config *); |
void (*get_plane_config)(struct intel_crtc *, |
struct intel_plane_config *); |
int (*crtc_mode_set)(struct drm_crtc *crtc, |
int x, int y, |
struct drm_framebuffer *old_fb); |
int (*crtc_compute_clock)(struct intel_crtc *crtc); |
void (*crtc_enable)(struct drm_crtc *crtc); |
void (*crtc_disable)(struct drm_crtc *crtc); |
void (*off)(struct drm_crtc *crtc); |
void (*write_eld)(struct drm_connector *connector, |
struct drm_crtc *crtc, |
void (*audio_codec_enable)(struct drm_connector *connector, |
struct intel_encoder *encoder, |
struct drm_display_mode *mode); |
void (*audio_codec_disable)(struct intel_encoder *encoder); |
void (*fdi_link_train)(struct drm_crtc *crtc); |
void (*init_clock_gating)(struct drm_device *dev); |
int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc, |
487,7 → 525,7 |
/* display clock increase/decrease */ |
/* pll clock increase/decrease */ |
|
int (*setup_backlight)(struct intel_connector *connector); |
int (*setup_backlight)(struct intel_connector *connector, enum pipe pipe); |
uint32_t (*get_backlight)(struct intel_connector *connector); |
void (*set_backlight)(struct intel_connector *connector, |
uint32_t level); |
526,6 → 564,7 |
|
unsigned fw_rendercount; |
unsigned fw_mediacount; |
unsigned fw_blittercount; |
|
struct timer_list force_wake_timer; |
}; |
544,6 → 583,7 |
func(is_ivybridge) sep \ |
func(is_valleyview) sep \ |
func(is_haswell) sep \ |
func(is_skylake) sep \ |
func(is_preliminary) sep \ |
func(has_fbc) sep \ |
func(has_pipe_cxsr) sep \ |
561,6 → 601,7 |
|
struct intel_device_info { |
u32 display_mmio_offset; |
u16 device_id; |
u8 num_pipes:3; |
u8 num_sprites[I915_MAX_PIPES]; |
u8 gen; |
625,13 → 666,22 |
uint8_t remap_slice; |
struct drm_i915_file_private *file_priv; |
struct i915_ctx_hang_stats hang_stats; |
struct i915_address_space *vm; |
struct i915_hw_ppgtt *ppgtt; |
|
/* Legacy ring buffer submission */ |
struct { |
struct drm_i915_gem_object *rcs_state; |
bool initialized; |
} legacy_hw_ctx; |
|
/* Execlists */ |
bool rcs_initialized; |
struct { |
struct drm_i915_gem_object *state; |
struct intel_ringbuffer *ringbuf; |
int unpin_count; |
} engine[I915_NUM_RINGS]; |
|
struct list_head link; |
}; |
|
645,6 → 695,20 |
struct drm_mm_node compressed_fb; |
struct drm_mm_node *compressed_llb; |
|
bool false_color; |
|
/* Tracks whether the HW is actually enabled, not whether the feature is |
* possible. */ |
bool enabled; |
|
/* On gen8 some rings cannont perform fbc clean operation so for now |
* we are doing this on SW with mmio. |
* This variable works in the opposite information direction |
* of ring->fbc_dirty telling software on frontbuffer tracking |
* to perform the cache clean on sw side. |
*/ |
bool need_sw_cache_clean; |
|
struct intel_fbc_work { |
struct delayed_work work; |
struct drm_crtc *crtc; |
686,6 → 750,7 |
PCH_IBX, /* Ibexpeak PCH */ |
PCH_CPT, /* Cougarpoint PCH */ |
PCH_LPT, /* Lynxpoint PCH */ |
PCH_SPT, /* Sunrisepoint PCH */ |
PCH_NOP, |
}; |
|
698,6 → 763,8 |
#define QUIRK_LVDS_SSC_DISABLE (1<<1) |
#define QUIRK_INVERT_BRIGHTNESS (1<<2) |
#define QUIRK_BACKLIGHT_PRESENT (1<<3) |
#define QUIRK_PIPEB_FORCE (1<<4) |
#define QUIRK_PIN_SWIZZLED_PAGES (1<<5) |
|
struct intel_fbdev; |
struct intel_fbc_work; |
749,7 → 816,6 |
u32 saveBLC_HIST_CTL; |
u32 saveBLC_PWM_CTL; |
u32 saveBLC_PWM_CTL2; |
u32 saveBLC_HIST_CTL_B; |
u32 saveBLC_CPU_PWM_CTL; |
u32 saveBLC_CPU_PWM_CTL2; |
u32 saveFPB0; |
858,6 → 924,7 |
u32 savePIPEB_LINK_N1; |
u32 saveMCHBAR_RENDER_STANDBY; |
u32 savePCH_PORT_HOTPLUG; |
u16 saveGCDGMBUS; |
}; |
|
struct vlv_s0ix_state { |
928,8 → 995,12 |
}; |
|
struct intel_gen6_power_mgmt { |
/* work and pm_iir are protected by dev_priv->irq_lock */ |
/* |
* work, interrupts_enabled and pm_iir are protected by |
* dev_priv->irq_lock |
*/ |
struct work_struct work; |
bool interrupts_enabled; |
u32 pm_iir; |
|
/* Frequencies are stored in potentially platform dependent multiples. |
1052,31 → 1123,6 |
struct i915_power_well *power_wells; |
}; |
|
struct i915_dri1_state { |
unsigned allow_batchbuffer : 1; |
u32 __iomem *gfx_hws_cpu_addr; |
|
unsigned int cpp; |
int back_offset; |
int front_offset; |
int current_page; |
int page_flipping; |
|
uint32_t counter; |
}; |
|
struct i915_ums_state { |
/** |
* Flag if the X Server, and thus DRM, is not currently in |
* control of the device. |
* |
* This is set between LeaveVT and EnterVT. It needs to be |
* replaced with a semaphore. It also needs to be |
* transitioned away from for kernel modesetting. |
*/ |
int mm_suspended; |
}; |
|
#define MAX_L3_SLICES 2 |
struct intel_l3_parity { |
u32 *remap_info[MAX_L3_SLICES]; |
1153,6 → 1199,7 |
}; |
|
struct drm_i915_error_state_buf { |
struct drm_i915_private *i915; |
unsigned bytes; |
unsigned size; |
int err; |
1225,6 → 1272,9 |
|
/* For missed irq/seqno simulation. */ |
unsigned int test_irq_rings; |
|
/* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */ |
bool reload_in_reset; |
}; |
|
enum modeset_restore { |
1234,6 → 1284,12 |
}; |
|
struct ddi_vbt_port_info { |
/* |
* This is an index in the HDMI/DVI DDI buffer translation table. |
* The special value HDMI_LEVEL_SHIFT_UNKNOWN means the VBT didn't |
* populate this field. |
*/ |
#define HDMI_LEVEL_SHIFT_UNKNOWN 0xff |
uint8_t hdmi_level_shift; |
|
uint8_t supports_dvi:1; |
1324,6 → 1380,49 |
enum intel_ddb_partitioning partitioning; |
}; |
|
struct skl_ddb_entry { |
uint16_t start, end; /* in number of blocks, 'end' is exclusive */ |
}; |
|
static inline uint16_t skl_ddb_entry_size(const struct skl_ddb_entry *entry) |
{ |
return entry->end - entry->start; |
} |
|
static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1, |
const struct skl_ddb_entry *e2) |
{ |
if (e1->start == e2->start && e1->end == e2->end) |
return true; |
|
return false; |
} |
|
struct skl_ddb_allocation { |
struct skl_ddb_entry pipe[I915_MAX_PIPES]; |
struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES]; |
struct skl_ddb_entry cursor[I915_MAX_PIPES]; |
}; |
|
struct skl_wm_values { |
bool dirty[I915_MAX_PIPES]; |
struct skl_ddb_allocation ddb; |
uint32_t wm_linetime[I915_MAX_PIPES]; |
uint32_t plane[I915_MAX_PIPES][I915_MAX_PLANES][8]; |
uint32_t cursor[I915_MAX_PIPES][8]; |
uint32_t plane_trans[I915_MAX_PIPES][I915_MAX_PLANES]; |
uint32_t cursor_trans[I915_MAX_PIPES]; |
}; |
|
struct skl_wm_level { |
bool plane_en[I915_MAX_PLANES]; |
bool cursor_en; |
uint16_t plane_res_b[I915_MAX_PLANES]; |
uint8_t plane_res_l[I915_MAX_PLANES]; |
uint16_t cursor_res_b; |
uint8_t cursor_res_l; |
}; |
|
/* |
* This struct helps tracking the state needed for runtime PM, which puts the |
* device in PCI D3 state. Notice that when this happens, nothing on the |
1336,7 → 1435,7 |
* |
* Our driver uses the autosuspend delay feature, which means we'll only really |
* suspend if we stay with zero refcount for a certain amount of time. The |
* default value is currently very conservative (see intel_init_runtime_pm), but |
* default value is currently very conservative (see intel_runtime_pm_enable), but |
* it can be changed with the standard runtime PM files from sysfs. |
* |
* The irqs_disabled variable becomes true exactly after we disable the IRQs and |
1349,7 → 1448,7 |
*/ |
struct i915_runtime_pm { |
bool suspended; |
bool _irqs_disabled; |
bool irqs_enabled; |
}; |
|
enum intel_pipe_crc_source { |
1393,6 → 1492,20 |
unsigned flip_bits; |
}; |
|
struct i915_wa_reg { |
u32 addr; |
u32 value; |
/* bitmask representing WA bits */ |
u32 mask; |
}; |
|
#define I915_MAX_WA_REGS 16 |
|
struct i915_workarounds { |
struct i915_wa_reg reg[I915_MAX_WA_REGS]; |
u32 count; |
}; |
|
struct drm_i915_private { |
struct drm_device *dev; |
|
1426,7 → 1539,7 |
struct drm_i915_gem_object *semaphore_obj; |
uint32_t last_seqno, next_seqno; |
|
drm_dma_handle_t *status_page_dmah; |
struct drm_dma_handle *status_page_dmah; |
struct resource mch_res; |
|
/* protects the irq masks */ |
1471,15 → 1584,20 |
struct intel_opregion opregion; |
struct intel_vbt_data vbt; |
|
bool preserve_bios_swizzle; |
|
/* overlay */ |
struct intel_overlay *overlay; |
|
/* backlight registers and fields in struct intel_panel */ |
spinlock_t backlight_lock; |
struct mutex backlight_lock; |
|
/* LVDS info */ |
bool no_aux_handshake; |
|
/* protects panel power sequencer state */ |
struct mutex pps_mutex; |
|
struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */ |
int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ |
int num_fence_regs; /* 8 on pre-965, 16 otherwise */ |
1486,6 → 1604,7 |
|
unsigned int fsb_freq, mem_freq, is_ddr3; |
unsigned int vlv_cdclk_freq; |
unsigned int hpll_freq; |
|
/** |
* wq - Driver workqueue for GEM. |
1531,6 → 1650,8 |
struct intel_shared_dpll shared_dplls[I915_NUM_PLLS]; |
int dpio_phy_iosf_port[I915_NUM_PHYS_VLV]; |
|
struct i915_workarounds workarounds; |
|
/* Reclocking support */ |
bool render_reclock_avail; |
bool lvds_downclock_avail; |
1566,14 → 1687,9 |
#ifdef CONFIG_DRM_I915_FBDEV |
/* list of fbdev register on this device */ |
struct intel_fbdev *fbdev; |
struct work_struct fbdev_suspend_work; |
#endif |
|
/* |
* The console may be contended at resume, but we don't |
* want it to block on it. |
*/ |
struct work_struct console_resume_work; |
|
struct drm_property *broadcast_rgb_property; |
struct drm_property *force_audio_property; |
|
1598,9 → 1714,25 |
uint16_t spr_latency[5]; |
/* cursor */ |
uint16_t cur_latency[5]; |
/* |
* Raw watermark memory latency values |
* for SKL for all 8 levels |
* in 1us units. |
*/ |
uint16_t skl_latency[8]; |
|
/* |
* The skl_wm_values structure is a bit too big for stack |
* allocation, so we keep the staging struct where we store |
* intermediate results here instead. |
*/ |
struct skl_wm_values skl_results; |
|
/* current hardware state */ |
union { |
struct ilk_wm_values hw; |
struct skl_wm_values skl_hw; |
}; |
} wm; |
|
struct i915_runtime_pm pm; |
1619,12 → 1751,22 |
*/ |
struct workqueue_struct *dp_wq; |
|
/* Old dri1 support infrastructure, beware the dragons ya fools entering |
* here! */ |
struct i915_dri1_state dri1; |
/* Old ums support infrastructure, same warning applies. */ |
struct i915_ums_state ums; |
uint32_t bios_vgacntr; |
|
/* Abstract the submission mechanism (legacy ringbuffer or execlists) away */ |
struct { |
int (*do_execbuf)(struct drm_device *dev, struct drm_file *file, |
struct intel_engine_cs *ring, |
struct intel_context *ctx, |
struct drm_i915_gem_execbuffer2 *args, |
struct list_head *vmas, |
struct drm_i915_gem_object *batch_obj, |
u64 exec_start, u32 flags); |
int (*init_rings)(struct drm_device *dev); |
void (*cleanup_ring)(struct intel_engine_cs *ring); |
void (*stop_ring)(struct intel_engine_cs *ring); |
} gt; |
|
/* |
* NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch |
* will be rejected. Instead look for a better place. |
1766,17 → 1908,8 |
* Only honoured if hardware has relevant pte bit |
*/ |
unsigned long gt_ro:1; |
|
/* |
* Is the GPU currently using a fence to access this buffer, |
*/ |
unsigned int pending_fenced_gpu_access:1; |
unsigned int fenced_gpu_access:1; |
|
unsigned int cache_level:3; |
|
unsigned int has_aliasing_ppgtt_mapping:1; |
unsigned int has_global_gtt_mapping:1; |
unsigned int has_dma_mapping:1; |
|
unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS; |
1809,10 → 1942,10 |
unsigned long user_pin_count; |
struct drm_file *pin_filp; |
|
union { |
/** for phy allocated objects */ |
drm_dma_handle_t *phys_handle; |
struct drm_dma_handle *phys_handle; |
|
union { |
struct i915_gem_userptr { |
uintptr_t ptr; |
unsigned read_only :1; |
1976,51 → 2109,65 |
int count; |
}; |
|
#define INTEL_INFO(dev) (&to_i915(dev)->info) |
/* Note that the (struct drm_i915_private *) cast is just to shut up gcc. */ |
#define __I915__(p) ({ \ |
struct drm_i915_private *__p; \ |
if (__builtin_types_compatible_p(typeof(*p), struct drm_i915_private)) \ |
__p = (struct drm_i915_private *)p; \ |
else if (__builtin_types_compatible_p(typeof(*p), struct drm_device)) \ |
__p = to_i915((struct drm_device *)p); \ |
else \ |
BUILD_BUG(); \ |
__p; \ |
}) |
#define INTEL_INFO(p) (&__I915__(p)->info) |
#define INTEL_DEVID(p) (INTEL_INFO(p)->device_id) |
|
#define IS_I830(dev) ((dev)->pdev->device == 0x3577) |
#define IS_845G(dev) ((dev)->pdev->device == 0x2562) |
#define IS_I830(dev) (INTEL_DEVID(dev) == 0x3577) |
#define IS_845G(dev) (INTEL_DEVID(dev) == 0x2562) |
#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x) |
#define IS_I865G(dev) ((dev)->pdev->device == 0x2572) |
#define IS_I865G(dev) (INTEL_DEVID(dev) == 0x2572) |
#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) |
#define IS_I915GM(dev) ((dev)->pdev->device == 0x2592) |
#define IS_I945G(dev) ((dev)->pdev->device == 0x2772) |
#define IS_I915GM(dev) (INTEL_DEVID(dev) == 0x2592) |
#define IS_I945G(dev) (INTEL_DEVID(dev) == 0x2772) |
#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm) |
#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater) |
#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline) |
#define IS_GM45(dev) ((dev)->pdev->device == 0x2A42) |
#define IS_GM45(dev) (INTEL_DEVID(dev) == 0x2A42) |
#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x) |
#define IS_PINEVIEW_G(dev) ((dev)->pdev->device == 0xa001) |
#define IS_PINEVIEW_M(dev) ((dev)->pdev->device == 0xa011) |
#define IS_PINEVIEW_G(dev) (INTEL_DEVID(dev) == 0xa001) |
#define IS_PINEVIEW_M(dev) (INTEL_DEVID(dev) == 0xa011) |
#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview) |
#define IS_G33(dev) (INTEL_INFO(dev)->is_g33) |
#define IS_IRONLAKE_M(dev) ((dev)->pdev->device == 0x0046) |
#define IS_IRONLAKE_M(dev) (INTEL_DEVID(dev) == 0x0046) |
#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge) |
#define IS_IVB_GT1(dev) ((dev)->pdev->device == 0x0156 || \ |
(dev)->pdev->device == 0x0152 || \ |
(dev)->pdev->device == 0x015a) |
#define IS_SNB_GT1(dev) ((dev)->pdev->device == 0x0102 || \ |
(dev)->pdev->device == 0x0106 || \ |
(dev)->pdev->device == 0x010A) |
#define IS_IVB_GT1(dev) (INTEL_DEVID(dev) == 0x0156 || \ |
INTEL_DEVID(dev) == 0x0152 || \ |
INTEL_DEVID(dev) == 0x015a) |
#define IS_SNB_GT1(dev) (INTEL_DEVID(dev) == 0x0102 || \ |
INTEL_DEVID(dev) == 0x0106 || \ |
INTEL_DEVID(dev) == 0x010A) |
#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview) |
#define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev)) |
#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) |
#define IS_BROADWELL(dev) (!INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev)) |
#define IS_SKYLAKE(dev) (INTEL_INFO(dev)->is_skylake) |
#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) |
#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \ |
((dev)->pdev->device & 0xFF00) == 0x0C00) |
(INTEL_DEVID(dev) & 0xFF00) == 0x0C00) |
#define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \ |
(((dev)->pdev->device & 0xf) == 0x2 || \ |
((dev)->pdev->device & 0xf) == 0x6 || \ |
((dev)->pdev->device & 0xf) == 0xe)) |
((INTEL_DEVID(dev) & 0xf) == 0x2 || \ |
(INTEL_DEVID(dev) & 0xf) == 0x6 || \ |
(INTEL_DEVID(dev) & 0xf) == 0xe)) |
#define IS_BDW_GT3(dev) (IS_BROADWELL(dev) && \ |
(INTEL_DEVID(dev) & 0x00F0) == 0x0020) |
#define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \ |
((dev)->pdev->device & 0xFF00) == 0x0A00) |
#define IS_ULT(dev) (IS_HSW_ULT(dev) || IS_BDW_ULT(dev)) |
(INTEL_DEVID(dev) & 0xFF00) == 0x0A00) |
#define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \ |
((dev)->pdev->device & 0x00F0) == 0x0020) |
(INTEL_DEVID(dev) & 0x00F0) == 0x0020) |
/* ULX machines are also considered ULT. */ |
#define IS_HSW_ULX(dev) ((dev)->pdev->device == 0x0A0E || \ |
(dev)->pdev->device == 0x0A1E) |
#define IS_HSW_ULX(dev) (INTEL_DEVID(dev) == 0x0A0E || \ |
INTEL_DEVID(dev) == 0x0A1E) |
#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary) |
|
/* |
2036,6 → 2183,7 |
#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6) |
#define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7) |
#define IS_GEN8(dev) (INTEL_INFO(dev)->gen == 8) |
#define IS_GEN9(dev) (INTEL_INFO(dev)->gen == 9) |
|
#define RENDER_RING (1<<RCS) |
#define BSD_RING (1<<VCS) |
2048,14 → 2196,13 |
#define HAS_VEBOX(dev) (INTEL_INFO(dev)->ring_mask & VEBOX_RING) |
#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc) |
#define HAS_WT(dev) ((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \ |
to_i915(dev)->ellc_size) |
__I915__(dev)->ellc_size) |
#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) |
|
#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6) |
#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >= 6) |
#define HAS_PPGTT(dev) (INTEL_INFO(dev)->gen >= 7 && !IS_GEN8(dev)) |
#define USES_PPGTT(dev) intel_enable_ppgtt(dev, false) |
#define USES_FULL_PPGTT(dev) intel_enable_ppgtt(dev, true) |
#define HAS_LOGICAL_RING_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 8) |
#define USES_PPGTT(dev) (i915.enable_ppgtt) |
#define USES_FULL_PPGTT(dev) (i915.enable_ppgtt == 2) |
|
#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) |
#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical) |
2086,7 → 2233,7 |
#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) |
#define HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) |
|
#define HAS_IPS(dev) (IS_ULT(dev) || IS_BROADWELL(dev)) |
#define HAS_IPS(dev) (IS_HSW_ULT(dev) || IS_BROADWELL(dev)) |
|
#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi) |
#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg) |
2093,6 → 2240,8 |
#define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev)) |
#define HAS_RUNTIME_PM(dev) (IS_GEN6(dev) || IS_HASWELL(dev) || \ |
IS_BROADWELL(dev) || IS_VALLEYVIEW(dev)) |
#define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6) |
#define HAS_RC6p(dev) (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev)) |
|
#define INTEL_PCH_DEVICE_ID_MASK 0xff00 |
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 |
2100,8 → 2249,11 |
#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00 |
#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00 |
#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00 |
#define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100 |
#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00 |
|
#define INTEL_PCH_TYPE(dev) (to_i915(dev)->pch_type) |
#define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type) |
#define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT) |
#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT) |
#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) |
#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) |
2135,6 → 2287,7 |
int enable_rc6; |
int enable_fbc; |
int enable_ppgtt; |
int enable_execlists; |
int enable_psr; |
unsigned int preliminary_hw_support; |
int disable_power_well; |
2154,8 → 2307,6 |
extern struct i915_params i915 __read_mostly; |
|
/* i915_dma.c */ |
void i915_update_dri1_breadcrumb(struct drm_device *dev); |
extern void i915_kernel_lost_context(struct drm_device * dev); |
extern int i915_driver_load(struct drm_device *, unsigned long flags); |
extern int i915_driver_unload(struct drm_device *); |
extern int i915_driver_open(struct drm_device *dev, struct drm_file *file); |
2169,9 → 2320,6 |
extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, |
unsigned long arg); |
#endif |
extern int i915_emit_box(struct drm_device *dev, |
struct drm_clip_rect *box, |
int DR1, int DR4); |
extern int intel_gpu_reset(struct drm_device *dev); |
extern int i915_reset(struct drm_device *dev); |
extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); |
2181,8 → 2329,6 |
int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on); |
void intel_hpd_cancel_work(struct drm_i915_private *dev_priv); |
|
extern void intel_console_resume(struct work_struct *work); |
|
/* i915_irq.c */ |
void i915_queue_hangcheck(struct drm_device *dev); |
__printf(3, 4) |
2189,10 → 2335,10 |
void i915_handle_error(struct drm_device *dev, bool wedged, |
const char *fmt, ...); |
|
void gen6_set_pm_mask(struct drm_i915_private *dev_priv, u32 pm_iir, |
int new_delay); |
extern void intel_irq_init(struct drm_device *dev); |
extern void intel_hpd_init(struct drm_device *dev); |
extern void intel_irq_init(struct drm_i915_private *dev_priv); |
extern void intel_hpd_init(struct drm_i915_private *dev_priv); |
int intel_irq_install(struct drm_i915_private *dev_priv); |
void intel_irq_uninstall(struct drm_i915_private *dev_priv); |
|
extern void intel_uncore_sanitize(struct drm_device *dev); |
extern void intel_uncore_early_sanitize(struct drm_device *dev, |
2212,10 → 2358,19 |
|
void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv); |
void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv); |
void |
ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask); |
void |
ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask); |
void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, |
uint32_t interrupt_mask, |
uint32_t enabled_irq_mask); |
#define ibx_enable_display_interrupt(dev_priv, bits) \ |
ibx_display_interrupt_update((dev_priv), (bits), (bits)) |
#define ibx_disable_display_interrupt(dev_priv, bits) \ |
ibx_display_interrupt_update((dev_priv), (bits), 0) |
|
/* i915_gem.c */ |
int i915_gem_init_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
int i915_gem_create_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
int i915_gem_pread_ioctl(struct drm_device *dev, void *data, |
2230,6 → 2385,20 |
struct drm_file *file_priv); |
int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
void i915_gem_execbuffer_move_to_active(struct list_head *vmas, |
struct intel_engine_cs *ring); |
void i915_gem_execbuffer_retire_commands(struct drm_device *dev, |
struct drm_file *file, |
struct intel_engine_cs *ring, |
struct drm_i915_gem_object *obj); |
int i915_gem_ringbuffer_submission(struct drm_device *dev, |
struct drm_file *file, |
struct intel_engine_cs *ring, |
struct intel_context *ctx, |
struct drm_i915_gem_execbuffer2 *args, |
struct list_head *vmas, |
struct drm_i915_gem_object *batch_obj, |
u64 exec_start, u32 flags); |
int i915_gem_execbuffer(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
int i915_gem_execbuffer2(struct drm_device *dev, void *data, |
2248,10 → 2417,6 |
struct drm_file *file_priv); |
int i915_gem_madvise_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
int i915_gem_entervt_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
int i915_gem_set_tiling(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
int i915_gem_get_tiling(struct drm_device *dev, void *data, |
2264,6 → 2429,12 |
int i915_gem_wait_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
void i915_gem_load(struct drm_device *dev); |
unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv, |
long target, |
unsigned flags); |
#define I915_SHRINK_PURGEABLE 0x1 |
#define I915_SHRINK_UNBOUND 0x2 |
#define I915_SHRINK_BOUND 0x4 |
void *i915_gem_object_alloc(struct drm_device *dev); |
void i915_gem_object_free(struct drm_i915_gem_object *obj); |
void i915_gem_object_init(struct drm_i915_gem_object *obj, |
2288,7 → 2459,6 |
int i915_gem_object_put_pages(struct drm_i915_gem_object *obj); |
void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv); |
void i915_gem_release_mmap(struct drm_i915_gem_object *obj); |
void i915_gem_lastclose(struct drm_device *dev); |
|
int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj, |
int *needs_clflush); |
2382,6 → 2552,7 |
bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force); |
int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj); |
int __must_check i915_gem_init(struct drm_device *dev); |
int i915_gem_init_rings(struct drm_device *dev); |
int __must_check i915_gem_init_hw(struct drm_device *dev); |
int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice); |
void i915_gem_init_swizzling(struct drm_device *dev); |
2394,6 → 2565,11 |
u32 *seqno); |
#define i915_add_request(ring, seqno) \ |
__i915_add_request(ring, NULL, NULL, seqno) |
int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno, |
unsigned reset_counter, |
bool interruptible, |
s64 *timeout, |
struct drm_i915_file_private *file_priv); |
int __must_check i915_wait_seqno(struct intel_engine_cs *ring, |
uint32_t seqno); |
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); |
2452,7 → 2628,7 |
} |
|
/* Some GGTT VM helpers */ |
#define obj_to_ggtt(obj) \ |
#define i915_obj_to_ggtt(obj) \ |
(&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base) |
static inline bool i915_is_ggtt(struct i915_address_space *vm) |
{ |
2461,21 → 2637,30 |
return vm == ggtt; |
} |
|
static inline struct i915_hw_ppgtt * |
i915_vm_to_ppgtt(struct i915_address_space *vm) |
{ |
WARN_ON(i915_is_ggtt(vm)); |
|
return container_of(vm, struct i915_hw_ppgtt, base); |
} |
|
|
static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj) |
{ |
return i915_gem_obj_bound(obj, obj_to_ggtt(obj)); |
return i915_gem_obj_bound(obj, i915_obj_to_ggtt(obj)); |
} |
|
static inline unsigned long |
i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *obj) |
{ |
return i915_gem_obj_offset(obj, obj_to_ggtt(obj)); |
return i915_gem_obj_offset(obj, i915_obj_to_ggtt(obj)); |
} |
|
static inline unsigned long |
i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj) |
{ |
return i915_gem_obj_size(obj, obj_to_ggtt(obj)); |
return i915_gem_obj_size(obj, i915_obj_to_ggtt(obj)); |
} |
|
static inline int __must_check |
2483,7 → 2668,8 |
uint32_t alignment, |
unsigned flags) |
{ |
return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment, flags | PIN_GLOBAL); |
return i915_gem_object_pin(obj, i915_obj_to_ggtt(obj), |
alignment, flags | PIN_GLOBAL); |
} |
|
static inline int |
2495,7 → 2681,6 |
void i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj); |
|
/* i915_gem_context.c */ |
#define ctx_to_ppgtt(ctx) container_of((ctx)->vm, struct i915_hw_ppgtt, base) |
int __must_check i915_gem_context_init(struct drm_device *dev); |
void i915_gem_context_fini(struct drm_device *dev); |
void i915_gem_context_reset(struct drm_device *dev); |
2507,6 → 2692,8 |
struct intel_context * |
i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id); |
void i915_gem_context_free(struct kref *ctx_ref); |
struct drm_i915_gem_object * |
i915_gem_alloc_context_obj(struct drm_device *dev, size_t size); |
static inline void i915_gem_context_reference(struct intel_context *ctx) |
{ |
kref_get(&ctx->ref); |
2527,8 → 2714,6 |
int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file); |
|
/* i915_gem_render_state.c */ |
int i915_gem_render_state_init(struct intel_engine_cs *ring); |
/* i915_gem_evict.c */ |
int __must_check i915_gem_evict_something(struct drm_device *dev, |
struct i915_address_space *vm, |
2596,6 → 2781,7 |
int i915_error_state_to_str(struct drm_i915_error_state_buf *estr, |
const struct i915_error_state_file_priv *error); |
int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb, |
struct drm_i915_private *i915, |
size_t count, loff_t pos); |
static inline void i915_error_state_buf_release( |
struct drm_i915_error_state_buf *eb) |
2610,7 → 2796,7 |
void i915_destroy_error_state(struct drm_device *dev); |
|
void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone); |
const char *i915_cache_level_str(int type); |
const char *i915_cache_level_str(struct drm_i915_private *i915, int type); |
|
/* i915_cmd_parser.c */ |
int i915_cmd_parser_get_version(void); |
2653,7 → 2839,6 |
extern void intel_i2c_reset(struct drm_device *dev); |
|
/* intel_opregion.c */ |
struct intel_encoder; |
#ifdef CONFIG_ACPI |
extern int intel_opregion_setup(struct drm_device *dev); |
extern void intel_opregion_init(struct drm_device *dev); |
2691,7 → 2876,6 |
|
/* modesetting */ |
extern void intel_modeset_init_hw(struct drm_device *dev); |
extern void intel_modeset_suspend_hw(struct drm_device *dev); |
extern void intel_modeset_init(struct drm_device *dev); |
extern void intel_modeset_gem_init(struct drm_device *dev); |
extern void intel_modeset_cleanup(struct drm_device *dev); |
2702,6 → 2886,7 |
extern void i915_redisable_vga(struct drm_device *dev); |
extern void i915_redisable_vga_power_on(struct drm_device *dev); |
extern bool intel_fbc_enabled(struct drm_device *dev); |
extern void bdw_fbc_sw_flush(struct drm_device *dev, u32 value); |
extern void intel_disable_fbc(struct drm_device *dev); |
extern bool ironlake_set_drps(struct drm_device *dev, u8 val); |
extern void intel_init_pch_refclk(struct drm_device *dev); |
2741,8 → 2926,8 |
void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine); |
void assert_force_wake_inactive(struct drm_i915_private *dev_priv); |
|
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val); |
int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val); |
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val); |
int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val); |
|
/* intel_sideband.c */ |
u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr); |
2772,7 → 2957,9 |
|
#define FORCEWAKE_RENDER (1 << 0) |
#define FORCEWAKE_MEDIA (1 << 1) |
#define FORCEWAKE_ALL (FORCEWAKE_RENDER | FORCEWAKE_MEDIA) |
#define FORCEWAKE_BLITTER (1 << 2) |
#define FORCEWAKE_ALL (FORCEWAKE_RENDER | FORCEWAKE_MEDIA | \ |
FORCEWAKE_BLITTER) |
|
|
#define I915_READ8(reg) dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true) |
2838,6 → 3025,11 |
return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1); |
} |
|
static inline unsigned long nsecs_to_jiffies_timeout(const u64 n) |
{ |
return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1); |
} |
|
static inline unsigned long |
timespec_to_jiffies_timeout(const struct timespec *value) |
{ |
2905,4 → 3097,36 |
#define ioread32(addr) readl(addr) |
|
|
static inline int pm_runtime_get_sync(struct device *dev) |
{ |
return 0; |
} |
|
static inline int pm_runtime_set_active(struct device *dev) |
{ |
return 0; |
} |
|
static inline void pm_runtime_disable(struct device *dev) |
{ |
|
} |
|
static inline int pm_runtime_put_autosuspend(struct device *dev) |
{ |
return 0; |
} |
|
static inline u8 inb(u16 port) |
{ |
u8 v; |
asm volatile("inb %1,%0" : "=a" (v) : "dN" (port)); |
return v; |
} |
|
static inline void outb(u8 v, u16 port) |
{ |
asm volatile("outb %0,%1" : : "a" (v), "dN" (port)); |
} |
|
#endif |