31,6 → 31,7 |
#define _I915_DRV_H_ |
|
#include <uapi/drm/i915_drm.h> |
#include <uapi/drm/drm_fourcc.h> |
|
#include "i915_reg.h" |
#include "intel_bios.h" |
38,7 → 39,7 |
#include "intel_lrc.h" |
#include "i915_gem_gtt.h" |
#include "i915_gem_render_state.h" |
//#include <linux/io-mapping.h> |
#include <linux/scatterlist.h> |
#include <linux/i2c.h> |
#include <linux/i2c-algo-bit.h> |
#include <drm/intel-gtt.h> |
46,21 → 47,94 |
#include <drm/drm_gem.h> |
//#include <linux/backlight.h> |
#include <linux/hashtable.h> |
#include <linux/kref.h> |
#include "intel_guc.h" |
|
#include <linux/spinlock.h> |
#include <linux/err.h> |
|
extern int i915_fbsize; |
extern struct drm_i915_gem_object *main_fb_obj; |
extern struct drm_framebuffer *main_framebuffer; |
|
static struct drm_i915_gem_object *get_fb_obj() |
{ |
return main_fb_obj; |
}; |
|
#define ioread32(addr) readl(addr) |
static inline u8 inb(u16 port) |
{ |
u8 v; |
asm volatile("inb %1,%0" : "=a" (v) : "dN" (port)); |
return v; |
} |
|
static inline void outb(u8 v, u16 port) |
{ |
asm volatile("outb %0,%1" : : "a" (v), "dN" (port)); |
} |
|
|
/* General customization: |
*/ |
|
#define DRIVER_NAME "i915" |
#define DRIVER_DESC "Intel Graphics" |
#define DRIVER_DATE "20141121" |
#define DRIVER_DATE "20151010" |
|
#undef WARN_ON |
#define WARN_ON(x) WARN(x, "WARN_ON(" #x ")") |
/* Many gcc seem to no see through this and fall over :( */ |
#if 0 |
#define WARN_ON(x) ({ \ |
bool __i915_warn_cond = (x); \ |
if (__builtin_constant_p(__i915_warn_cond)) \ |
BUILD_BUG_ON(__i915_warn_cond); \ |
WARN(__i915_warn_cond, "WARN_ON(" #x ")"); }) |
#else |
#define WARN_ON(x) WARN((x), "WARN_ON(%s)", #x ) |
#endif |
|
#undef WARN_ON_ONCE |
#define WARN_ON_ONCE(x) WARN_ONCE((x), "WARN_ON_ONCE(%s)", #x ) |
|
#define MISSING_CASE(x) WARN(1, "Missing switch case (%lu) in %s\n", \ |
(long) (x), __func__); |
|
/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and |
* WARN_ON()) for hw state sanity checks to check for unexpected conditions |
* which may not necessarily be a user visible problem. This will either |
* WARN() or DRM_ERROR() depending on the verbose_checks moduleparam, to |
* enable distros and users to tailor their preferred amount of i915 abrt |
* spam. |
*/ |
#define I915_STATE_WARN(condition, format...) ({ \ |
int __ret_warn_on = !!(condition); \ |
if (unlikely(__ret_warn_on)) { \ |
if (i915.verbose_state_checks) \ |
WARN(1, format); \ |
else \ |
DRM_ERROR(format); \ |
} \ |
unlikely(__ret_warn_on); \ |
}) |
|
#define I915_STATE_WARN_ON(condition) ({ \ |
int __ret_warn_on = !!(condition); \ |
if (unlikely(__ret_warn_on)) { \ |
if (i915.verbose_state_checks) \ |
WARN(1, "WARN_ON(" #condition ")\n"); \ |
else \ |
DRM_ERROR("WARN_ON(" #condition ")\n"); \ |
} \ |
unlikely(__ret_warn_on); \ |
}) |
|
static inline const char *yesno(bool v) |
{ |
return v ? "yes" : "no"; |
} |
|
enum pipe { |
INVALID_PIPE = -1, |
PIPE_A = 0, |
81,17 → 155,17 |
#define transcoder_name(t) ((t) + 'A') |
|
/* |
* This is the maximum (across all platforms) number of planes (primary + |
* sprites) that can be active at the same time on one pipe. |
* |
* This value doesn't count the cursor plane. |
* I915_MAX_PLANES in the enum below is the maximum (across all platforms) |
* number of planes per CRTC. Not all platforms really have this many planes, |
* which means some arrays of size I915_MAX_PLANES may have unused entries |
* between the topmost sprite plane and the cursor plane. |
*/ |
#define I915_MAX_PLANES 3 |
|
enum plane { |
PLANE_A = 0, |
PLANE_B, |
PLANE_C, |
PLANE_CURSOR, |
I915_MAX_PLANES, |
}; |
#define plane_name(p) ((p) + 'A') |
|
138,6 → 212,7 |
POWER_DOMAIN_PORT_DDI_C_4_LANES, |
POWER_DOMAIN_PORT_DDI_D_2_LANES, |
POWER_DOMAIN_PORT_DDI_D_4_LANES, |
POWER_DOMAIN_PORT_DDI_E_2_LANES, |
POWER_DOMAIN_PORT_DSI, |
POWER_DOMAIN_PORT_CRT, |
POWER_DOMAIN_PORT_OTHER, |
144,6 → 219,11 |
POWER_DOMAIN_VGA, |
POWER_DOMAIN_AUDIO, |
POWER_DOMAIN_PLLS, |
POWER_DOMAIN_AUX_A, |
POWER_DOMAIN_AUX_B, |
POWER_DOMAIN_AUX_C, |
POWER_DOMAIN_AUX_D, |
POWER_DOMAIN_GMBUS, |
POWER_DOMAIN_INIT, |
|
POWER_DOMAIN_NUM, |
158,17 → 238,51 |
|
enum hpd_pin { |
HPD_NONE = 0, |
HPD_PORT_A = HPD_NONE, /* PORT_A is internal */ |
HPD_TV = HPD_NONE, /* TV is known to be unreliable */ |
HPD_CRT, |
HPD_SDVO_B, |
HPD_SDVO_C, |
HPD_PORT_A, |
HPD_PORT_B, |
HPD_PORT_C, |
HPD_PORT_D, |
HPD_PORT_E, |
HPD_NUM_PINS |
}; |
|
#define for_each_hpd_pin(__pin) \ |
for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++) |
|
struct i915_hotplug { |
struct work_struct hotplug_work; |
|
struct { |
unsigned long last_jiffies; |
int count; |
enum { |
HPD_ENABLED = 0, |
HPD_DISABLED = 1, |
HPD_MARK_DISABLED = 2 |
} state; |
} stats[HPD_NUM_PINS]; |
u32 event_bits; |
struct delayed_work reenable_work; |
|
struct intel_digital_port *irq_port[I915_MAX_PORTS]; |
u32 long_port_mask; |
u32 short_port_mask; |
struct work_struct dig_port_work; |
|
/* |
* if we get a HPD irq from DP and a HPD irq from non-DP |
* the non-DP HPD could block the workqueue on a mode config |
* mutex getting, that userspace may have taken. However |
* userspace is waiting on the DP workqueue to run which is |
* blocked behind the non-DP one. |
*/ |
struct workqueue_struct *dp_wq; |
}; |
|
#define I915_GEM_GPU_DOMAINS \ |
(I915_GEM_DOMAIN_RENDER | \ |
I915_GEM_DOMAIN_SAMPLER | \ |
178,13 → 292,29 |
|
#define for_each_pipe(__dev_priv, __p) \ |
for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) |
#define for_each_plane(pipe, p) \ |
for ((p) = 0; (p) < INTEL_INFO(dev)->num_sprites[(pipe)] + 1; (p)++) |
#define for_each_sprite(p, s) for ((s) = 0; (s) < INTEL_INFO(dev)->num_sprites[(p)]; (s)++) |
#define for_each_plane(__dev_priv, __pipe, __p) \ |
for ((__p) = 0; \ |
(__p) < INTEL_INFO(__dev_priv)->num_sprites[(__pipe)] + 1; \ |
(__p)++) |
#define for_each_sprite(__dev_priv, __p, __s) \ |
for ((__s) = 0; \ |
(__s) < INTEL_INFO(__dev_priv)->num_sprites[(__p)]; \ |
(__s)++) |
|
#define for_each_crtc(dev, crtc) \ |
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) |
|
#define for_each_intel_plane(dev, intel_plane) \ |
list_for_each_entry(intel_plane, \ |
&dev->mode_config.plane_list, \ |
base.head) |
|
#define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) \ |
list_for_each_entry(intel_plane, \ |
&(dev)->mode_config.plane_list, \ |
base.head) \ |
if ((intel_plane)->pipe == (intel_crtc)->pipe) |
|
#define for_each_intel_crtc(dev, intel_crtc) \ |
list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) |
|
193,6 → 323,11 |
&(dev)->mode_config.encoder_list, \ |
base.head) |
|
#define for_each_intel_connector(dev, intel_connector) \ |
list_for_each_entry(intel_connector, \ |
&dev->mode_config.connector_list, \ |
base.head) |
|
#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \ |
list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \ |
if ((intel_encoder)->base.crtc == (__crtc)) |
209,6 → 344,30 |
struct i915_mm_struct; |
struct i915_mmu_object; |
|
struct drm_i915_file_private { |
struct drm_i915_private *dev_priv; |
struct drm_file *file; |
|
struct { |
spinlock_t lock; |
struct list_head request_list; |
/* 20ms is a fairly arbitrary limit (greater than the average frame time) |
* chosen to prevent the CPU getting more than a frame ahead of the GPU |
* (when using lax throttling for the frontbuffer). We also use it to |
* offer free GPU waitboosts for severely congested workloads. |
*/ |
#define DRM_I915_THROTTLE_JIFFIES msecs_to_jiffies(20) |
} mm; |
struct idr context_idr; |
|
struct intel_rps_client { |
struct list_head link; |
unsigned boosts; |
} rps; |
|
struct intel_engine_cs *bsd_ring; |
}; |
|
enum intel_dpll_id { |
DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */ |
/* real shared dpll ids must be >= 0 */ |
217,6 → 376,8 |
/* hsw/bdw */ |
DPLL_ID_WRPLL1 = 0, |
DPLL_ID_WRPLL2 = 1, |
DPLL_ID_SPLL = 2, |
|
/* skl */ |
DPLL_ID_SKL_DPLL1 = 0, |
DPLL_ID_SKL_DPLL2 = 1, |
233,11 → 394,12 |
|
/* hsw, bdw */ |
uint32_t wrpll; |
uint32_t spll; |
|
/* skl */ |
/* |
* DPLL_CTRL1 has 6 bits for each each this DPLL. We store those in |
* lower part of crtl1 and they get shifted into position when writing |
* lower part of ctrl1 and they get shifted into position when writing |
* the register. This allows us to easily compare the state to share |
* the DPLL. |
*/ |
244,6 → 406,10 |
uint32_t ctrl1; |
/* HDMI only, 0 when used for DP */ |
uint32_t cfgcr1, cfgcr2; |
|
/* bxt */ |
uint32_t ebb0, ebb4, pll0, pll1, pll2, pll3, pll6, pll8, pll9, pll10, |
pcsdw12; |
}; |
|
struct intel_shared_dpll_config { |
253,7 → 419,6 |
|
struct intel_shared_dpll { |
struct intel_shared_dpll_config config; |
struct intel_shared_dpll_config *new_config; |
|
int active; /* count of number of active CRTCs (i.e. DPMS on) */ |
bool on; /* is the PLL actually active? Disabled during modeset */ |
313,14 → 478,14 |
struct opregion_asle; |
|
struct intel_opregion { |
struct opregion_header __iomem *header; |
struct opregion_acpi __iomem *acpi; |
struct opregion_swsci __iomem *swsci; |
struct opregion_header *header; |
struct opregion_acpi *acpi; |
struct opregion_swsci *swsci; |
u32 swsci_gbda_sub_functions; |
u32 swsci_sbcb_sub_functions; |
struct opregion_asle __iomem *asle; |
void __iomem *vbt; |
u32 __iomem *lid_state; |
struct opregion_asle *asle; |
void *vbt; |
u32 *lid_state; |
struct work_struct asle_work; |
}; |
#define OPREGION_SIZE (8*1024) |
355,6 → 520,7 |
struct timeval time; |
|
char error_msg[128]; |
int iommu; |
u32 reset_count; |
u32 suspend_count; |
|
368,6 → 534,8 |
u32 forcewake; |
u32 error; /* gen6+ */ |
u32 err_int; /* gen7 */ |
u32 fault_data0; /* gen8, gen9 */ |
u32 fault_data1; /* gen8, gen9 */ |
u32 done_reg; |
u32 gac_eco; |
u32 gam_ecochk; |
377,6 → 545,7 |
u64 fence[I915_MAX_NUM_FENCES]; |
struct intel_overlay_error_state *overlay; |
struct intel_display_error_state *display; |
struct drm_i915_error_object *semaphore_obj; |
|
struct drm_i915_error_ring { |
bool valid; |
393,6 → 562,7 |
u32 semaphore_seqno[I915_NUM_RINGS - 1]; |
|
/* Register state */ |
u32 start; |
u32 tail; |
u32 head; |
u32 ctl; |
413,7 → 583,7 |
|
struct drm_i915_error_object { |
int page_count; |
u32 gtt_offset; |
u64 gtt_offset; |
u32 *pages[0]; |
} *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page; |
|
438,8 → 608,8 |
struct drm_i915_error_buffer { |
u32 size; |
u32 name; |
u32 rseqno, wseqno; |
u32 gtt_offset; |
u32 rseqno[I915_NUM_RINGS], wseqno; |
u64 gtt_offset; |
u32 read_domains; |
u32 write_domain; |
s32 fence_reg:I915_MAX_NUM_FENCE_BITS; |
458,16 → 628,13 |
|
struct intel_connector; |
struct intel_encoder; |
struct intel_crtc_config; |
struct intel_plane_config; |
struct intel_crtc_state; |
struct intel_initial_plane_config; |
struct intel_crtc; |
struct intel_limit; |
struct dpll; |
|
struct drm_i915_display_funcs { |
bool (*fbc_enabled)(struct drm_device *dev); |
void (*enable_fbc)(struct drm_crtc *crtc); |
void (*disable_fbc)(struct drm_device *dev); |
int (*get_display_clock_speed)(struct drm_device *dev); |
int (*get_fifo_size)(struct drm_device *dev, int plane); |
/** |
484,7 → 651,7 |
* Returns true on success, false on failure. |
*/ |
bool (*find_dpll)(const struct intel_limit *limit, |
struct intel_crtc *crtc, |
struct intel_crtc_state *crtc_state, |
int target, int refclk, |
struct dpll *match_clock, |
struct dpll *best_clock); |
493,20 → 660,21 |
struct drm_crtc *crtc, |
uint32_t sprite_width, uint32_t sprite_height, |
int pixel_size, bool enable, bool scaled); |
void (*modeset_global_resources)(struct drm_device *dev); |
int (*modeset_calc_cdclk)(struct drm_atomic_state *state); |
void (*modeset_commit_cdclk)(struct drm_atomic_state *state); |
/* Returns the active state of the crtc, and if the crtc is active, |
* fills out the pipe-config with the hw state. */ |
bool (*get_pipe_config)(struct intel_crtc *, |
struct intel_crtc_config *); |
void (*get_plane_config)(struct intel_crtc *, |
struct intel_plane_config *); |
int (*crtc_compute_clock)(struct intel_crtc *crtc); |
struct intel_crtc_state *); |
void (*get_initial_plane_config)(struct intel_crtc *, |
struct intel_initial_plane_config *); |
int (*crtc_compute_clock)(struct intel_crtc *crtc, |
struct intel_crtc_state *crtc_state); |
void (*crtc_enable)(struct drm_crtc *crtc); |
void (*crtc_disable)(struct drm_crtc *crtc); |
void (*off)(struct drm_crtc *crtc); |
void (*audio_codec_enable)(struct drm_connector *connector, |
struct intel_encoder *encoder, |
struct drm_display_mode *mode); |
const struct drm_display_mode *adjusted_mode); |
void (*audio_codec_disable)(struct intel_encoder *encoder); |
void (*fdi_link_train)(struct drm_crtc *crtc); |
void (*init_clock_gating)(struct drm_device *dev); |
513,7 → 681,7 |
int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc, |
struct drm_framebuffer *fb, |
struct drm_i915_gem_object *obj, |
struct intel_engine_cs *ring, |
struct drm_i915_gem_request *req, |
uint32_t flags); |
void (*update_primary_plane)(struct drm_crtc *crtc, |
struct drm_framebuffer *fb, |
524,20 → 692,30 |
/* render clock increase/decrease */ |
/* display clock increase/decrease */ |
/* pll clock increase/decrease */ |
}; |
|
int (*setup_backlight)(struct intel_connector *connector, enum pipe pipe); |
uint32_t (*get_backlight)(struct intel_connector *connector); |
void (*set_backlight)(struct intel_connector *connector, |
uint32_t level); |
void (*disable_backlight)(struct intel_connector *connector); |
void (*enable_backlight)(struct intel_connector *connector); |
enum forcewake_domain_id { |
FW_DOMAIN_ID_RENDER = 0, |
FW_DOMAIN_ID_BLITTER, |
FW_DOMAIN_ID_MEDIA, |
|
FW_DOMAIN_ID_COUNT |
}; |
|
enum forcewake_domains { |
FORCEWAKE_RENDER = (1 << FW_DOMAIN_ID_RENDER), |
FORCEWAKE_BLITTER = (1 << FW_DOMAIN_ID_BLITTER), |
FORCEWAKE_MEDIA = (1 << FW_DOMAIN_ID_MEDIA), |
FORCEWAKE_ALL = (FORCEWAKE_RENDER | |
FORCEWAKE_BLITTER | |
FORCEWAKE_MEDIA) |
}; |
|
struct intel_uncore_funcs { |
void (*force_wake_get)(struct drm_i915_private *dev_priv, |
int fw_engine); |
enum forcewake_domains domains); |
void (*force_wake_put)(struct drm_i915_private *dev_priv, |
int fw_engine); |
enum forcewake_domains domains); |
|
uint8_t (*mmio_readb)(struct drm_i915_private *dev_priv, off_t offset, bool trace); |
uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, off_t offset, bool trace); |
560,15 → 738,48 |
struct intel_uncore_funcs funcs; |
|
unsigned fifo_count; |
unsigned forcewake_count; |
enum forcewake_domains fw_domains; |
|
unsigned fw_rendercount; |
unsigned fw_mediacount; |
unsigned fw_blittercount; |
struct intel_uncore_forcewake_domain { |
struct drm_i915_private *i915; |
enum forcewake_domain_id id; |
unsigned wake_count; |
struct timer_list timer; |
u32 reg_set; |
u32 val_set; |
u32 val_clear; |
u32 reg_ack; |
u32 reg_post; |
u32 val_reset; |
} fw_domain[FW_DOMAIN_ID_COUNT]; |
}; |
|
struct timer_list force_wake_timer; |
/* Iterate over initialised fw domains */ |
#define for_each_fw_domain_mask(domain__, mask__, dev_priv__, i__) \ |
for ((i__) = 0, (domain__) = &(dev_priv__)->uncore.fw_domain[0]; \ |
(i__) < FW_DOMAIN_ID_COUNT; \ |
(i__)++, (domain__) = &(dev_priv__)->uncore.fw_domain[i__]) \ |
if (((mask__) & (dev_priv__)->uncore.fw_domains) & (1 << (i__))) |
|
#define for_each_fw_domain(domain__, dev_priv__, i__) \ |
for_each_fw_domain_mask(domain__, FORCEWAKE_ALL, dev_priv__, i__) |
|
enum csr_state { |
FW_UNINITIALIZED = 0, |
FW_LOADED, |
FW_FAILED |
}; |
|
struct intel_csr { |
const char *fw_path; |
uint32_t *dmc_payload; |
uint32_t dmc_fw_size; |
uint32_t mmio_count; |
uint32_t mmioaddr[8]; |
uint32_t mmiodata[8]; |
enum csr_state state; |
}; |
|
#define DEV_INFO_FOR_EACH_FLAG(func, sep) \ |
func(is_mobile) sep \ |
func(is_i85x) sep \ |
612,6 → 823,18 |
int trans_offsets[I915_MAX_TRANSCODERS]; |
int palette_offsets[I915_MAX_PIPES]; |
int cursor_offsets[I915_MAX_PIPES]; |
|
/* Slice/subslice/EU info */ |
u8 slice_total; |
u8 subslice_total; |
u8 subslice_per_slice; |
u8 eu_total; |
u8 eu_per_subslice; |
/* For each slice, which subslice(s) has(have) 7 EUs (bitfield)? */ |
u8 subslice_7eu[3]; |
u8 has_slice_pg:1; |
u8 has_subslice_pg:1; |
u8 has_eu_pg:1; |
}; |
|
#undef DEFINE_FLAG |
637,6 → 860,11 |
/* Time when this context was last blamed for a GPU reset */ |
unsigned long guilty_ts; |
|
/* If the contexts causes a second GPU hang within this time, |
* it is permanently banned from submitting any more work. |
*/ |
unsigned long ban_period_seconds; |
|
/* This context is banned to submit more work */ |
bool banned; |
}; |
643,16 → 871,20 |
|
/* This must match up with the value previously used for execbuf2.rsvd1. */ |
#define DEFAULT_CONTEXT_HANDLE 0 |
|
#define CONTEXT_NO_ZEROMAP (1<<0) |
/** |
* struct intel_context - as the name implies, represents a context. |
* @ref: reference count. |
* @user_handle: userspace tracking identity for this context. |
* @remap_slice: l3 row remapping information. |
* @flags: context specific flags: |
* CONTEXT_NO_ZEROMAP: do not allow mapping things to page 0. |
* @file_priv: filp associated with this context (NULL for global default |
* context). |
* @hang_stats: information about the role of this context in possible GPU |
* hangs. |
* @vm: virtual memory space used by this context. |
* @ppgtt: virtual memory space used by this context. |
* @legacy_hw_ctx: render context backing object and whether it is correctly |
* initialized (legacy ring submission mechanism only). |
* @link: link in the global list of contexts. |
664,6 → 896,8 |
struct kref ref; |
int user_handle; |
uint8_t remap_slice; |
struct drm_i915_private *i915; |
int flags; |
struct drm_i915_file_private *file_priv; |
struct i915_ctx_hang_stats hang_stats; |
struct i915_hw_ppgtt *ppgtt; |
675,21 → 909,33 |
} legacy_hw_ctx; |
|
/* Execlists */ |
bool rcs_initialized; |
struct { |
struct drm_i915_gem_object *state; |
struct intel_ringbuffer *ringbuf; |
int unpin_count; |
int pin_count; |
} engine[I915_NUM_RINGS]; |
|
struct list_head link; |
}; |
|
enum fb_op_origin { |
ORIGIN_GTT, |
ORIGIN_CPU, |
ORIGIN_CS, |
ORIGIN_FLIP, |
ORIGIN_DIRTYFB, |
}; |
|
struct i915_fbc { |
unsigned long size; |
/* This is always the inner lock when overlapping with struct_mutex and |
* it's the outer lock when overlapping with stolen_lock. */ |
struct mutex lock; |
unsigned long uncompressed_size; |
unsigned threshold; |
unsigned int fb_id; |
enum plane plane; |
unsigned int possible_framebuffer_bits; |
unsigned int busy_bits; |
struct intel_crtc *crtc; |
int y; |
|
struct drm_mm_node compressed_fb; |
701,17 → 947,9 |
* possible. */ |
bool enabled; |
|
/* On gen8 some rings cannont perform fbc clean operation so for now |
* we are doing this on SW with mmio. |
* This variable works in the opposite information direction |
* of ring->fbc_dirty telling software on frontbuffer tracking |
* to perform the cache clean on sw side. |
*/ |
bool need_sw_cache_clean; |
|
struct intel_fbc_work { |
struct delayed_work work; |
struct drm_crtc *crtc; |
struct intel_crtc *crtc; |
struct drm_framebuffer *fb; |
} *fbc_work; |
|
727,14 → 965,45 |
FBC_MULTIPLE_PIPES, /* more than one pipe active */ |
FBC_MODULE_PARAM, |
FBC_CHIP_DEFAULT, /* disabled by default on this chip */ |
FBC_ROTATION, /* rotation is not supported */ |
FBC_IN_DBG_MASTER, /* kernel debugger is active */ |
FBC_BAD_STRIDE, /* stride is not supported */ |
FBC_PIXEL_RATE, /* pixel rate is too big */ |
FBC_PIXEL_FORMAT /* pixel format is invalid */ |
} no_fbc_reason; |
|
bool (*fbc_enabled)(struct drm_i915_private *dev_priv); |
void (*enable_fbc)(struct intel_crtc *crtc); |
void (*disable_fbc)(struct drm_i915_private *dev_priv); |
}; |
|
/** |
* HIGH_RR is the highest eDP panel refresh rate read from EDID |
* LOW_RR is the lowest eDP panel refresh rate found from EDID |
* parsing for same resolution. |
*/ |
enum drrs_refresh_rate_type { |
DRRS_HIGH_RR, |
DRRS_LOW_RR, |
DRRS_MAX_RR, /* RR count */ |
}; |
|
enum drrs_support_type { |
DRRS_NOT_SUPPORTED = 0, |
STATIC_DRRS_SUPPORT = 1, |
SEAMLESS_DRRS_SUPPORT = 2 |
}; |
|
struct intel_dp; |
struct i915_drrs { |
struct intel_connector *connector; |
struct mutex mutex; |
struct delayed_work work; |
struct intel_dp *dp; |
unsigned busy_frontbuffer_bits; |
enum drrs_refresh_rate_type refresh_rate_type; |
enum drrs_support_type type; |
}; |
|
struct intel_dp; |
struct i915_psr { |
struct mutex lock; |
bool sink_support; |
743,6 → 1012,8 |
bool active; |
struct delayed_work work; |
unsigned busy_frontbuffer_bits; |
bool psr2_support; |
bool aux_frame_sync; |
}; |
|
enum intel_pch { |
779,150 → 1050,21 |
}; |
|
struct i915_suspend_saved_registers { |
u8 saveLBB; |
u32 saveDSPACNTR; |
u32 saveDSPBCNTR; |
u32 saveDSPARB; |
u32 savePIPEACONF; |
u32 savePIPEBCONF; |
u32 savePIPEASRC; |
u32 savePIPEBSRC; |
u32 saveFPA0; |
u32 saveFPA1; |
u32 saveDPLL_A; |
u32 saveDPLL_A_MD; |
u32 saveHTOTAL_A; |
u32 saveHBLANK_A; |
u32 saveHSYNC_A; |
u32 saveVTOTAL_A; |
u32 saveVBLANK_A; |
u32 saveVSYNC_A; |
u32 saveBCLRPAT_A; |
u32 saveTRANSACONF; |
u32 saveTRANS_HTOTAL_A; |
u32 saveTRANS_HBLANK_A; |
u32 saveTRANS_HSYNC_A; |
u32 saveTRANS_VTOTAL_A; |
u32 saveTRANS_VBLANK_A; |
u32 saveTRANS_VSYNC_A; |
u32 savePIPEASTAT; |
u32 saveDSPASTRIDE; |
u32 saveDSPASIZE; |
u32 saveDSPAPOS; |
u32 saveDSPAADDR; |
u32 saveDSPASURF; |
u32 saveDSPATILEOFF; |
u32 savePFIT_PGM_RATIOS; |
u32 saveBLC_HIST_CTL; |
u32 saveBLC_PWM_CTL; |
u32 saveBLC_PWM_CTL2; |
u32 saveBLC_CPU_PWM_CTL; |
u32 saveBLC_CPU_PWM_CTL2; |
u32 saveFPB0; |
u32 saveFPB1; |
u32 saveDPLL_B; |
u32 saveDPLL_B_MD; |
u32 saveHTOTAL_B; |
u32 saveHBLANK_B; |
u32 saveHSYNC_B; |
u32 saveVTOTAL_B; |
u32 saveVBLANK_B; |
u32 saveVSYNC_B; |
u32 saveBCLRPAT_B; |
u32 saveTRANSBCONF; |
u32 saveTRANS_HTOTAL_B; |
u32 saveTRANS_HBLANK_B; |
u32 saveTRANS_HSYNC_B; |
u32 saveTRANS_VTOTAL_B; |
u32 saveTRANS_VBLANK_B; |
u32 saveTRANS_VSYNC_B; |
u32 savePIPEBSTAT; |
u32 saveDSPBSTRIDE; |
u32 saveDSPBSIZE; |
u32 saveDSPBPOS; |
u32 saveDSPBADDR; |
u32 saveDSPBSURF; |
u32 saveDSPBTILEOFF; |
u32 saveVGA0; |
u32 saveVGA1; |
u32 saveVGA_PD; |
u32 saveVGACNTRL; |
u32 saveADPA; |
u32 saveLVDS; |
u32 savePP_ON_DELAYS; |
u32 savePP_OFF_DELAYS; |
u32 saveDVOA; |
u32 saveDVOB; |
u32 saveDVOC; |
u32 savePP_ON; |
u32 savePP_OFF; |
u32 savePP_CONTROL; |
u32 savePP_DIVISOR; |
u32 savePFIT_CONTROL; |
u32 save_palette_a[256]; |
u32 save_palette_b[256]; |
u32 saveFBC_CONTROL; |
u32 saveIER; |
u32 saveIIR; |
u32 saveIMR; |
u32 saveDEIER; |
u32 saveDEIMR; |
u32 saveGTIER; |
u32 saveGTIMR; |
u32 saveFDI_RXA_IMR; |
u32 saveFDI_RXB_IMR; |
u32 saveCACHE_MODE_0; |
u32 saveMI_ARB_STATE; |
u32 saveSWF0[16]; |
u32 saveSWF1[16]; |
u32 saveSWF2[3]; |
u8 saveMSR; |
u8 saveSR[8]; |
u8 saveGR[25]; |
u8 saveAR_INDEX; |
u8 saveAR[21]; |
u8 saveDACMASK; |
u8 saveCR[37]; |
u32 saveSWF3[3]; |
uint64_t saveFENCE[I915_MAX_NUM_FENCES]; |
u32 saveCURACNTR; |
u32 saveCURAPOS; |
u32 saveCURABASE; |
u32 saveCURBCNTR; |
u32 saveCURBPOS; |
u32 saveCURBBASE; |
u32 saveCURSIZE; |
u32 saveDP_B; |
u32 saveDP_C; |
u32 saveDP_D; |
u32 savePIPEA_GMCH_DATA_M; |
u32 savePIPEB_GMCH_DATA_M; |
u32 savePIPEA_GMCH_DATA_N; |
u32 savePIPEB_GMCH_DATA_N; |
u32 savePIPEA_DP_LINK_M; |
u32 savePIPEB_DP_LINK_M; |
u32 savePIPEA_DP_LINK_N; |
u32 savePIPEB_DP_LINK_N; |
u32 saveFDI_RXA_CTL; |
u32 saveFDI_TXA_CTL; |
u32 saveFDI_RXB_CTL; |
u32 saveFDI_TXB_CTL; |
u32 savePFA_CTL_1; |
u32 savePFB_CTL_1; |
u32 savePFA_WIN_SZ; |
u32 savePFB_WIN_SZ; |
u32 savePFA_WIN_POS; |
u32 savePFB_WIN_POS; |
u32 savePCH_DREF_CONTROL; |
u32 saveDISP_ARB_CTL; |
u32 savePIPEA_DATA_M1; |
u32 savePIPEA_DATA_N1; |
u32 savePIPEA_LINK_M1; |
u32 savePIPEA_LINK_N1; |
u32 savePIPEB_DATA_M1; |
u32 savePIPEB_DATA_N1; |
u32 savePIPEB_LINK_M1; |
u32 savePIPEB_LINK_N1; |
u32 saveMCHBAR_RENDER_STANDBY; |
u32 savePCH_PORT_HOTPLUG; |
u16 saveGCDGMBUS; |
}; |
985,6 → 1127,7 |
/* Display 2 CZ domain */ |
u32 gu_ctl0; |
u32 gu_ctl1; |
u32 pcbr; |
u32 clock_gate_dis2; |
}; |
|
1018,25 → 1161,35 |
u8 max_freq_softlimit; /* Max frequency permitted by the driver */ |
u8 max_freq; /* Maximum frequency, RP0 if not overclocking */ |
u8 min_freq; /* AKA RPn. Minimum frequency */ |
u8 idle_freq; /* Frequency to request when we are idle */ |
u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */ |
u8 rp1_freq; /* "less than" RP0 power/freqency */ |
u8 rp0_freq; /* Non-overclocked max frequency. */ |
u32 cz_freq; |
|
u32 ei_interrupt_count; |
u8 up_threshold; /* Current %busy required to uplock */ |
u8 down_threshold; /* Current %busy required to downclock */ |
|
int last_adj; |
enum { LOW_POWER, BETWEEN, HIGH_POWER } power; |
|
spinlock_t client_lock; |
struct list_head clients; |
bool client_boost; |
|
bool enabled; |
struct delayed_work delayed_resume_work; |
unsigned boosts; |
|
struct intel_rps_client semaphores, mmioflips; |
|
/* manual wa residency calculations */ |
struct intel_rps_ei up_ei, down_ei; |
|
/* |
* Protects RPS/RC6 register access and PCU communication. |
* Must be taken after struct_mutex if nested. |
* Must be taken after struct_mutex if nested. Note that |
* this lock may be held for long periods of time when |
* talking to hw - so only take it when talking to hw! |
*/ |
struct mutex hw_lock; |
}; |
1061,9 → 1214,6 |
|
int c_m; |
int r_t; |
|
struct drm_i915_gem_object *pwrctx; |
struct drm_i915_gem_object *renderctx; |
}; |
|
struct drm_i915_private; |
1133,6 → 1283,10 |
struct i915_gem_mm { |
/** Memory allocator for GTT stolen memory */ |
struct drm_mm stolen; |
/** Protects the usage of the GTT stolen memory allocator. This is |
* always the inner lock when overlapping with struct_mutex. */ |
struct mutex stolen_lock; |
|
/** List of all objects in gtt_space. Used to restore gtt |
* mappings on resume */ |
struct list_head bound_list; |
1220,15 → 1374,14 |
/* Hang gpu twice in this window and your context gets banned */ |
#define DRM_I915_CTX_BAN_PERIOD DIV_ROUND_UP(8*DRM_I915_HANGCHECK_PERIOD, 1000) |
|
struct timer_list hangcheck_timer; |
struct workqueue_struct *hangcheck_wq; |
struct delayed_work hangcheck_work; |
|
/* For reset and error_state handling. */ |
spinlock_t lock; |
/* Protected by the above dev->gpu_error.lock. */ |
struct drm_i915_error_state *first_error; |
struct work_struct work; |
|
|
unsigned long missed_irq_rings; |
|
/** |
1283,6 → 1436,15 |
MODESET_SUSPENDED, |
}; |
|
#define DP_AUX_A 0x40 |
#define DP_AUX_B 0x10 |
#define DP_AUX_C 0x20 |
#define DP_AUX_D 0x30 |
|
#define DDC_PIN_B 0x05 |
#define DDC_PIN_C 0x04 |
#define DDC_PIN_D 0x06 |
|
struct ddi_vbt_port_info { |
/* |
* This is an index in the HDMI/DVI DDI buffer translation table. |
1295,12 → 1457,19 |
uint8_t supports_dvi:1; |
uint8_t supports_hdmi:1; |
uint8_t supports_dp:1; |
|
uint8_t alternate_aux_channel; |
uint8_t alternate_ddc_pin; |
|
uint8_t dp_boost_level; |
uint8_t hdmi_boost_level; |
}; |
|
enum drrs_support_type { |
DRRS_NOT_SUPPORTED = 0, |
STATIC_DRRS_SUPPORT = 1, |
SEAMLESS_DRRS_SUPPORT = 2 |
enum psr_lines_to_wait { |
PSR_0_LINES_TO_WAIT = 0, |
PSR_1_LINE_TO_WAIT, |
PSR_4_LINES_TO_WAIT, |
PSR_8_LINES_TO_WAIT |
}; |
|
struct intel_vbt_data { |
1332,6 → 1501,15 |
struct edp_power_seq edp_pps; |
|
struct { |
bool full_link; |
bool require_aux_wakeup; |
int idle_frames; |
enum psr_lines_to_wait lines_to_wait; |
int tp1_wakeup_time; |
int tp2_tp3_wakeup_time; |
} psr; |
|
struct { |
u16 pwm_freq_hz; |
bool present; |
bool active_low_pwm; |
1380,6 → 1558,29 |
enum intel_ddb_partitioning partitioning; |
}; |
|
struct vlv_pipe_wm { |
uint16_t primary; |
uint16_t sprite[2]; |
uint8_t cursor; |
}; |
|
struct vlv_sr_wm { |
uint16_t plane; |
uint8_t cursor; |
}; |
|
struct vlv_wm_values { |
struct vlv_pipe_wm pipe[3]; |
struct vlv_sr_wm sr; |
struct { |
uint8_t cursor; |
uint8_t sprite[2]; |
uint8_t primary; |
} ddl[3]; |
uint8_t level; |
bool cxsr; |
}; |
|
struct skl_ddb_entry { |
uint16_t start, end; /* in number of blocks, 'end' is exclusive */ |
}; |
1400,8 → 1601,8 |
|
struct skl_ddb_allocation { |
struct skl_ddb_entry pipe[I915_MAX_PIPES]; |
struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES]; |
struct skl_ddb_entry cursor[I915_MAX_PIPES]; |
struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES]; /* packed/uv */ |
struct skl_ddb_entry y_plane[I915_MAX_PIPES][I915_MAX_PLANES]; |
}; |
|
struct skl_wm_values { |
1409,18 → 1610,13 |
struct skl_ddb_allocation ddb; |
uint32_t wm_linetime[I915_MAX_PIPES]; |
uint32_t plane[I915_MAX_PIPES][I915_MAX_PLANES][8]; |
uint32_t cursor[I915_MAX_PIPES][8]; |
uint32_t plane_trans[I915_MAX_PIPES][I915_MAX_PLANES]; |
uint32_t cursor_trans[I915_MAX_PIPES]; |
}; |
|
struct skl_wm_level { |
bool plane_en[I915_MAX_PLANES]; |
bool cursor_en; |
uint16_t plane_res_b[I915_MAX_PLANES]; |
uint8_t plane_res_l[I915_MAX_PLANES]; |
uint16_t cursor_res_b; |
uint8_t cursor_res_l; |
}; |
|
/* |
1506,8 → 1702,27 |
u32 count; |
}; |
|
struct i915_virtual_gpu { |
bool active; |
}; |
|
struct i915_execbuffer_params { |
struct drm_device *dev; |
struct drm_file *file; |
uint32_t dispatch_flags; |
uint32_t args_batch_start_offset; |
uint64_t batch_obj_vm_offset; |
struct intel_engine_cs *ring; |
struct drm_i915_gem_object *batch_obj; |
struct intel_context *ctx; |
struct drm_i915_gem_request *request; |
}; |
|
struct drm_i915_private { |
struct drm_device *dev; |
struct kmem_cache *objects; |
struct kmem_cache *vmas; |
struct kmem_cache *requests; |
|
const struct intel_device_info info; |
|
1517,9 → 1732,17 |
|
struct intel_uncore uncore; |
|
struct intel_gmbus gmbus[GMBUS_NUM_PORTS]; |
struct i915_virtual_gpu vgpu; |
|
struct intel_guc guc; |
|
struct intel_csr csr; |
|
/* Display CSR-related protection */ |
struct mutex csr_lock; |
|
struct intel_gmbus gmbus[GMBUS_NUM_PINS]; |
|
/** gmbus_mutex protects against concurrent usage of the single hw gmbus |
* controller on different i2c buses. */ |
struct mutex gmbus_mutex; |
1550,11 → 1773,9 |
|
bool display_irqs_enabled; |
|
/* To control wakeup latency, e.g. for irq-driven dp aux transfers. */ |
// struct pm_qos_request pm_qos; |
|
/* DPIO indirect register protection */ |
struct mutex dpio_lock; |
/* Sideband mailbox protection */ |
struct mutex sb_lock; |
|
/** Cached value of IMR to avoid reads in updating the bitfield */ |
union { |
1566,19 → 1787,7 |
u32 pm_rps_events; |
u32 pipestat_irq_mask[I915_MAX_PIPES]; |
|
struct work_struct hotplug_work; |
struct { |
unsigned long hpd_last_jiffies; |
int hpd_cnt; |
enum { |
HPD_ENABLED = 0, |
HPD_DISABLED = 1, |
HPD_MARK_DISABLED = 2 |
} hpd_mark; |
} hpd_stats[HPD_NUM_PINS]; |
u32 hpd_event_bits; |
struct delayed_work hotplug_reenable_work; |
|
struct i915_hotplug hotplug; |
struct i915_fbc fbc; |
struct i915_drrs drrs; |
struct intel_opregion opregion; |
1599,12 → 1808,14 |
struct mutex pps_mutex; |
|
struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */ |
int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ |
int num_fence_regs; /* 8 on pre-965, 16 otherwise */ |
|
unsigned int fsb_freq, mem_freq, is_ddr3; |
unsigned int vlv_cdclk_freq; |
unsigned int skl_boot_cdclk; |
unsigned int cdclk_freq, max_cdclk_freq; |
unsigned int max_dotclk_freq; |
unsigned int hpll_freq; |
unsigned int czclk_freq; |
|
/** |
* wq - Driver workqueue for GEM. |
1654,9 → 1865,6 |
|
/* Reclocking support */ |
bool render_reclock_avail; |
bool lvds_downclock_avail; |
/* indicates the reduced downclock for LVDS*/ |
int lvds_downclock; |
|
struct i915_frontbuffer_tracking fb_tracking; |
|
1684,7 → 1892,7 |
|
struct drm_i915_gem_object *vlv_pctx; |
|
#ifdef CONFIG_DRM_I915_FBDEV |
#ifdef CONFIG_DRM_FBDEV_EMULATION |
/* list of fbdev register on this device */ |
struct intel_fbdev *fbdev; |
struct work_struct fbdev_suspend_work; |
1693,11 → 1901,22 |
struct drm_property *broadcast_rgb_property; |
struct drm_property *force_audio_property; |
|
/* hda/i915 audio component */ |
struct i915_audio_component *audio_component; |
bool audio_component_registered; |
/** |
* av_mutex - mutex for audio/video sync |
* |
*/ |
struct mutex av_mutex; |
|
uint32_t hw_context_size; |
struct list_head context_list; |
|
u32 fdi_rx_config; |
|
u32 chv_phy_control; |
|
u32 suspend_count; |
struct i915_suspend_saved_registers regfile; |
struct vlv_s0ix_state vlv_s0ix_state; |
1732,41 → 1951,29 |
union { |
struct ilk_wm_values hw; |
struct skl_wm_values skl_hw; |
struct vlv_wm_values vlv; |
}; |
|
uint8_t max_level; |
} wm; |
|
struct i915_runtime_pm pm; |
|
struct intel_digital_port *hpd_irq_port[I915_MAX_PORTS]; |
u32 long_hpd_port_mask; |
u32 short_hpd_port_mask; |
struct work_struct dig_port_work; |
|
/* |
* if we get a HPD irq from DP and a HPD irq from non-DP |
* the non-DP HPD could block the workqueue on a mode config |
* mutex getting, that userspace may have taken. However |
* userspace is waiting on the DP workqueue to run which is |
* blocked behind the non-DP one. |
*/ |
struct workqueue_struct *dp_wq; |
|
uint32_t bios_vgacntr; |
|
/* Abstract the submission mechanism (legacy ringbuffer or execlists) away */ |
struct { |
int (*do_execbuf)(struct drm_device *dev, struct drm_file *file, |
struct intel_engine_cs *ring, |
struct intel_context *ctx, |
int (*execbuf_submit)(struct i915_execbuffer_params *params, |
struct drm_i915_gem_execbuffer2 *args, |
struct list_head *vmas, |
struct drm_i915_gem_object *batch_obj, |
u64 exec_start, u32 flags); |
struct list_head *vmas); |
int (*init_rings)(struct drm_device *dev); |
void (*cleanup_ring)(struct intel_engine_cs *ring); |
void (*stop_ring)(struct intel_engine_cs *ring); |
} gt; |
|
bool edp_low_vswing; |
|
/* perform PHY state sanity checks? */ |
bool chv_phy_assert[2]; |
|
/* |
* NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch |
* will be rejected. Instead look for a better place. |
1778,6 → 1985,16 |
return dev->dev_private; |
} |
|
static inline struct drm_i915_private *dev_to_i915(struct device *dev) |
{ |
return to_i915(dev_get_drvdata(dev)); |
} |
|
static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc) |
{ |
return container_of(guc, struct drm_i915_private, guc); |
} |
|
/* Iterate over initialised rings */ |
#define for_each_ring(ring__, dev_priv__, i__) \ |
for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \ |
1814,13 → 2031,14 |
|
/* |
* Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is |
* considered to be the frontbuffer for the given plane interface-vise. This |
* considered to be the frontbuffer for the given plane interface-wise. This |
* doesn't mean that the hw necessarily already scans it out, but that any |
* rendering (by the cpu or gpu) will land in the frontbuffer eventually. |
* |
* We have one bit per pipe and per scanout plane type. |
*/ |
#define INTEL_FRONTBUFFER_BITS_PER_PIPE 4 |
#define INTEL_MAX_SPRITE_BITS_PER_PIPE 5 |
#define INTEL_FRONTBUFFER_BITS_PER_PIPE 8 |
#define INTEL_FRONTBUFFER_BITS \ |
(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES) |
#define INTEL_FRONTBUFFER_PRIMARY(pipe) \ |
1827,12 → 2045,12 |
(1 << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))) |
#define INTEL_FRONTBUFFER_CURSOR(pipe) \ |
(1 << (1 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))) |
#define INTEL_FRONTBUFFER_SPRITE(pipe) \ |
(1 << (2 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))) |
#define INTEL_FRONTBUFFER_SPRITE(pipe, plane) \ |
(1 << (2 + plane + (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))) |
#define INTEL_FRONTBUFFER_OVERLAY(pipe) \ |
(1 << (3 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))) |
(1 << (2 + INTEL_MAX_SPRITE_BITS_PER_PIPE + (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))) |
#define INTEL_FRONTBUFFER_ALL_MASK(pipe) \ |
(0xf << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))) |
(0xff << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))) |
|
struct drm_i915_gem_object { |
struct drm_gem_object base; |
1846,16 → 2064,18 |
struct drm_mm_node *stolen; |
struct list_head global_list; |
|
struct list_head ring_list; |
struct list_head ring_list[I915_NUM_RINGS]; |
/** Used in execbuf to temporarily hold a ref */ |
struct list_head obj_exec_link; |
|
struct list_head batch_pool_link; |
|
/** |
* This is set if the object is on the active lists (has pending |
* rendering and so a non-zero seqno), and is not set if it i s on |
* inactive (ready to be unbound) list. |
*/ |
unsigned int active:1; |
unsigned int active:I915_NUM_RINGS; |
|
/** |
* This is set if the object has been written to since last bound |
1900,8 → 2120,6 |
* accurate mappable working set. |
*/ |
unsigned int fault_mappable:1; |
unsigned int pin_mappable:1; |
unsigned int pin_display:1; |
|
/* |
* Is the object to be mapped as read-only to the GPU |
1909,25 → 2127,37 |
*/ |
unsigned long gt_ro:1; |
unsigned int cache_level:3; |
unsigned int cache_dirty:1; |
|
unsigned int has_dma_mapping:1; |
|
unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS; |
|
unsigned int pin_display; |
|
struct sg_table *pages; |
int pages_pin_count; |
struct get_page { |
struct scatterlist *sg; |
int last; |
} get_page; |
|
/* prime dma-buf support */ |
void *dma_buf_vmapping; |
int vmapping_count; |
|
struct intel_engine_cs *ring; |
|
/** Breadcrumb of last rendering to the buffer. */ |
uint32_t last_read_seqno; |
uint32_t last_write_seqno; |
/** Breadcrumb of last rendering to the buffer. |
* There can only be one writer, but we allow for multiple readers. |
* If there is a writer that necessarily implies that all other |
* read requests are complete - but we may only be lazily clearing |
* the read requests. A read request is naturally the most recent |
* request on a ring, so we may have two different write and read |
* requests on one ring where the write request is older than the |
* read request. This allows for the CPU to read from an active |
* buffer by only waiting for the write to complete. |
* */ |
struct drm_i915_gem_request *last_read_req[I915_NUM_RINGS]; |
struct drm_i915_gem_request *last_write_req; |
/** Breadcrumb of last fenced GPU access to the buffer. */ |
uint32_t last_fenced_seqno; |
struct drm_i915_gem_request *last_fenced_req; |
|
/** Current tiling stride for the object, if it's tiled. */ |
uint32_t stride; |
1938,10 → 2168,6 |
/** Record of address bit 17 of each page at last unbind. */ |
unsigned long *bit_17; |
|
/** User space pin count and filp owning the pin */ |
unsigned long user_pin_count; |
struct drm_file *pin_filp; |
|
union { |
/** for phy allocated objects */ |
struct drm_dma_handle *phys_handle; |
1970,27 → 2196,61 |
* The request queue allows us to note sequence numbers that have been emitted |
* and may be associated with active buffers to be retired. |
* |
* By keeping this list, we can avoid having to do questionable |
* sequence-number comparisons on buffer last_rendering_seqnos, and associate |
* an emission time with seqnos for tracking how far ahead of the GPU we are. |
* By keeping this list, we can avoid having to do questionable sequence |
* number comparisons on buffer last_read|write_seqno. It also allows an |
* emission time to be associated with the request for tracking how far ahead |
* of the GPU the submission is. |
* |
* The requests are reference counted, so upon creation they should have an |
* initial reference taken using kref_init |
*/ |
struct drm_i915_gem_request { |
struct kref ref; |
|
/** On Which ring this request was generated */ |
struct drm_i915_private *i915; |
struct intel_engine_cs *ring; |
|
/** GEM sequence number associated with this request. */ |
uint32_t seqno; |
/** GEM sequence number associated with the previous request, |
* when the HWS breadcrumb is equal to this the GPU is processing |
* this request. |
*/ |
u32 previous_seqno; |
|
/** GEM sequence number associated with this request, |
* when the HWS breadcrumb is equal or greater than this the GPU |
* has finished processing this request. |
*/ |
u32 seqno; |
|
/** Position in the ringbuffer of the start of the request */ |
u32 head; |
|
/** Position in the ringbuffer of the end of the request */ |
/** |
* Position in the ringbuffer of the start of the postfix. |
* This is required to calculate the maximum available ringbuffer |
* space without overwriting the postfix. |
*/ |
u32 postfix; |
|
/** Position in the ringbuffer of the end of the whole request */ |
u32 tail; |
|
/** Context related to this request */ |
/** |
* Context and ring buffer related to this request |
* Contexts are refcounted, so when this request is associated with a |
* context, we must increment the context's refcount, to guarantee that |
* it persists while any request is linked to it. Requests themselves |
* are also refcounted, so the request will only be freed when the last |
* reference to it is dismissed, and the code in |
* i915_gem_request_free() will then decrement the refcount on the |
* context. |
*/ |
struct intel_context *ctx; |
struct intel_ringbuffer *ringbuf; |
|
/** Batch buffer related to this request if any */ |
/** Batch buffer related to this request if any (used for |
error state dump only) */ |
struct drm_i915_gem_object *batch_obj; |
|
/** Time at which this request was emitted, in jiffies. */ |
2002,24 → 2262,98 |
struct drm_i915_file_private *file_priv; |
/** file_priv list entry for this request */ |
struct list_head client_list; |
}; |
|
struct drm_i915_file_private { |
struct drm_i915_private *dev_priv; |
struct drm_file *file; |
/** process identifier submitting this request */ |
struct pid *pid; |
|
struct { |
spinlock_t lock; |
struct list_head request_list; |
struct delayed_work idle_work; |
} mm; |
struct idr context_idr; |
/** |
* The ELSP only accepts two elements at a time, so we queue |
* context/tail pairs on a given queue (ring->execlist_queue) until the |
* hardware is available. The queue serves a double purpose: we also use |
* it to keep track of the up to 2 contexts currently in the hardware |
* (usually one in execution and the other queued up by the GPU): We |
* only remove elements from the head of the queue when the hardware |
* informs us that an element has been completed. |
* |
* All accesses to the queue are mediated by a spinlock |
* (ring->execlist_lock). |
*/ |
|
atomic_t rps_wait_boost; |
struct intel_engine_cs *bsd_ring; |
/** Execlist link in the submission queue.*/ |
struct list_head execlist_link; |
|
/** Execlists no. of times this request has been sent to the ELSP */ |
int elsp_submitted; |
|
}; |
|
int i915_gem_request_alloc(struct intel_engine_cs *ring, |
struct intel_context *ctx, |
struct drm_i915_gem_request **req_out); |
void i915_gem_request_cancel(struct drm_i915_gem_request *req); |
void i915_gem_request_free(struct kref *req_ref); |
int i915_gem_request_add_to_client(struct drm_i915_gem_request *req, |
struct drm_file *file); |
|
static inline uint32_t |
i915_gem_request_get_seqno(struct drm_i915_gem_request *req) |
{ |
return req ? req->seqno : 0; |
} |
|
static inline struct intel_engine_cs * |
i915_gem_request_get_ring(struct drm_i915_gem_request *req) |
{ |
return req ? req->ring : NULL; |
} |
|
static inline struct drm_i915_gem_request * |
i915_gem_request_reference(struct drm_i915_gem_request *req) |
{ |
if (req) |
kref_get(&req->ref); |
return req; |
} |
|
static inline void |
i915_gem_request_unreference(struct drm_i915_gem_request *req) |
{ |
WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex)); |
kref_put(&req->ref, i915_gem_request_free); |
} |
|
static inline void |
i915_gem_request_unreference__unlocked(struct drm_i915_gem_request *req) |
{ |
struct drm_device *dev; |
|
if (!req) |
return; |
|
dev = req->ring->dev; |
if (kref_put_mutex(&req->ref, i915_gem_request_free, &dev->struct_mutex)) |
mutex_unlock(&dev->struct_mutex); |
} |
|
static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst, |
struct drm_i915_gem_request *src) |
{ |
if (src) |
i915_gem_request_reference(src); |
|
if (*pdst) |
i915_gem_request_unreference(*pdst); |
|
*pdst = src; |
} |
|
/* |
* XXX: i915_gem_request_completed should be here but currently needs the |
* definition of i915_seqno_passed() which is below. It will be moved in |
* a later patch when the call to i915_seqno_passed() is obsoleted... |
*/ |
|
/* |
* A command that requires special handling by the command parser. |
*/ |
struct drm_i915_cmd_descriptor { |
2071,10 → 2405,15 |
* Describes where to find a register address in the command to check |
* against the ring's register whitelist. Only valid if flags has the |
* CMD_DESC_REGISTER bit set. |
* |
* A non-zero step value implies that the command may access multiple |
* registers in sequence (e.g. LRI), in that case step gives the |
* distance in dwords between individual offset fields. |
*/ |
struct { |
u32 offset; |
u32 mask; |
u32 step; |
} reg; |
|
#define MAX_CMD_DESC_BITMASKS 3 |
2122,6 → 2461,7 |
}) |
#define INTEL_INFO(p) (&__I915__(p)->info) |
#define INTEL_DEVID(p) (INTEL_INFO(p)->device_id) |
#define INTEL_REVID(p) (__I915__(p)->dev->pdev->revision) |
|
#define IS_I830(dev) (INTEL_DEVID(dev) == 0x3577) |
#define IS_845G(dev) (INTEL_DEVID(dev) == 0x2562) |
2144,21 → 2484,22 |
#define IS_IVB_GT1(dev) (INTEL_DEVID(dev) == 0x0156 || \ |
INTEL_DEVID(dev) == 0x0152 || \ |
INTEL_DEVID(dev) == 0x015a) |
#define IS_SNB_GT1(dev) (INTEL_DEVID(dev) == 0x0102 || \ |
INTEL_DEVID(dev) == 0x0106 || \ |
INTEL_DEVID(dev) == 0x010A) |
#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview) |
#define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev)) |
#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) |
#define IS_BROADWELL(dev) (!INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev)) |
#define IS_SKYLAKE(dev) (INTEL_INFO(dev)->is_skylake) |
#define IS_BROXTON(dev) (!INTEL_INFO(dev)->is_skylake && IS_GEN9(dev)) |
#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) |
#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \ |
(INTEL_DEVID(dev) & 0xFF00) == 0x0C00) |
#define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \ |
((INTEL_DEVID(dev) & 0xf) == 0x2 || \ |
(INTEL_DEVID(dev) & 0xf) == 0x6 || \ |
((INTEL_DEVID(dev) & 0xf) == 0x6 || \ |
(INTEL_DEVID(dev) & 0xf) == 0xb || \ |
(INTEL_DEVID(dev) & 0xf) == 0xe)) |
/* ULX machines are also considered ULT. */ |
#define IS_BDW_ULX(dev) (IS_BROADWELL(dev) && \ |
(INTEL_DEVID(dev) & 0xf) == 0xe) |
#define IS_BDW_GT3(dev) (IS_BROADWELL(dev) && \ |
(INTEL_DEVID(dev) & 0x00F0) == 0x0020) |
#define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \ |
2168,8 → 2509,32 |
/* ULX machines are also considered ULT. */ |
#define IS_HSW_ULX(dev) (INTEL_DEVID(dev) == 0x0A0E || \ |
INTEL_DEVID(dev) == 0x0A1E) |
#define IS_SKL_ULT(dev) (INTEL_DEVID(dev) == 0x1906 || \ |
INTEL_DEVID(dev) == 0x1913 || \ |
INTEL_DEVID(dev) == 0x1916 || \ |
INTEL_DEVID(dev) == 0x1921 || \ |
INTEL_DEVID(dev) == 0x1926) |
#define IS_SKL_ULX(dev) (INTEL_DEVID(dev) == 0x190E || \ |
INTEL_DEVID(dev) == 0x1915 || \ |
INTEL_DEVID(dev) == 0x191E) |
#define IS_SKL_GT3(dev) (IS_SKYLAKE(dev) && \ |
(INTEL_DEVID(dev) & 0x00F0) == 0x0020) |
#define IS_SKL_GT4(dev) (IS_SKYLAKE(dev) && \ |
(INTEL_DEVID(dev) & 0x00F0) == 0x0030) |
|
#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary) |
|
#define SKL_REVID_A0 (0x0) |
#define SKL_REVID_B0 (0x1) |
#define SKL_REVID_C0 (0x2) |
#define SKL_REVID_D0 (0x3) |
#define SKL_REVID_E0 (0x4) |
#define SKL_REVID_F0 (0x5) |
|
#define BXT_REVID_A0 (0x0) |
#define BXT_REVID_B0 (0x3) |
#define BXT_REVID_C0 (0x9) |
|
/* |
* The genX designation typically refers to the render engine, so render |
* capability related checks should use IS_GEN, while display and other checks |
2202,7 → 2567,8 |
#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6) |
#define HAS_LOGICAL_RING_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 8) |
#define USES_PPGTT(dev) (i915.enable_ppgtt) |
#define USES_FULL_PPGTT(dev) (i915.enable_ppgtt == 2) |
#define USES_FULL_PPGTT(dev) (i915.enable_ppgtt >= 2) |
#define USES_FULL_48BIT_PPGTT(dev) (i915.enable_ppgtt == 3) |
|
#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) |
#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical) |
2223,9 → 2589,6 |
*/ |
#define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \ |
IS_I915GM(dev))) |
#define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) |
#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev)) |
#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev)) |
#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv) |
#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug) |
|
2235,14 → 2598,31 |
|
#define HAS_IPS(dev) (IS_HSW_ULT(dev) || IS_BROADWELL(dev)) |
|
#define HAS_DP_MST(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev) || \ |
INTEL_INFO(dev)->gen >= 9) |
|
#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi) |
#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg) |
#define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev)) |
#define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev) || \ |
IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev) || \ |
IS_SKYLAKE(dev)) |
#define HAS_RUNTIME_PM(dev) (IS_GEN6(dev) || IS_HASWELL(dev) || \ |
IS_BROADWELL(dev) || IS_VALLEYVIEW(dev)) |
IS_BROADWELL(dev) || IS_VALLEYVIEW(dev) || \ |
IS_SKYLAKE(dev)) |
#define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6) |
#define HAS_RC6p(dev) (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev)) |
|
#define HAS_CSR(dev) (IS_GEN9(dev)) |
|
#define HAS_GUC_UCODE(dev) (IS_GEN9(dev)) |
#define HAS_GUC_SCHED(dev) (IS_GEN9(dev)) |
|
#define HAS_RESOURCE_STREAMER(dev) (IS_HASWELL(dev) || \ |
INTEL_INFO(dev)->gen >= 8) |
|
#define HAS_CORE_RING_FREQ(dev) (INTEL_INFO(dev)->gen >= 6 && \ |
!IS_VALLEYVIEW(dev) && !IS_BROXTON(dev)) |
|
#define INTEL_PCH_DEVICE_ID_MASK 0xff00 |
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 |
#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 |
2251,10 → 2631,12 |
#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00 |
#define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100 |
#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00 |
#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100 |
|
#define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type) |
#define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT) |
#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT) |
#define HAS_PCH_LPT_LP(dev) (__I915__(dev)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) |
#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) |
#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) |
#define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP) |
2267,20 → 2649,20 |
#define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev)) |
|
#define GT_FREQUENCY_MULTIPLIER 50 |
#define GEN9_FREQ_SCALER 3 |
|
#include "i915_trace.h" |
|
extern const struct drm_ioctl_desc i915_ioctls[]; |
extern int i915_max_ioctl; |
|
extern int i915_master_create(struct drm_device *dev, struct drm_master *master); |
extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master); |
extern int i915_resume_switcheroo(struct drm_device *dev); |
|
/* i915_params.c */ |
struct i915_params { |
int modeset; |
int panel_ignore_lid; |
unsigned int powersave; |
int semaphores; |
unsigned int lvds_downclock; |
int lvds_channel_mode; |
int panel_use_ssc; |
int vbt_sdvo_panel_type; |
2298,11 → 2680,17 |
bool enable_hangcheck; |
bool fastboot; |
bool prefault_disable; |
bool load_detect_test; |
bool reset; |
bool disable_display; |
bool disable_vtd_wa; |
bool enable_guc_submission; |
int guc_log_level; |
int use_mmio_flip; |
bool mmio_debug; |
int mmio_debug; |
bool verbose_state_checks; |
bool nuclear_pageflip; |
int edp_vswing; |
}; |
extern struct i915_params i915 __read_mostly; |
|
2315,12 → 2703,12 |
struct drm_file *file); |
extern void i915_driver_postclose(struct drm_device *dev, |
struct drm_file *file); |
extern int i915_driver_device_is_agp(struct drm_device * dev); |
#ifdef CONFIG_COMPAT |
extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, |
unsigned long arg); |
#endif |
extern int intel_gpu_reset(struct drm_device *dev); |
extern bool intel_has_gpu_reset(struct drm_device *dev); |
extern int i915_reset(struct drm_device *dev); |
extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); |
extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); |
2327,7 → 2715,14 |
extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); |
extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); |
int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on); |
void i915_firmware_load_error_print(const char *fw_path, int err); |
|
/* intel_hotplug.c */ |
void intel_hpd_irq_handler(struct drm_device *dev, u32 pin_mask, u32 long_mask); |
void intel_hpd_init(struct drm_i915_private *dev_priv); |
void intel_hpd_init_work(struct drm_i915_private *dev_priv); |
void intel_hpd_cancel_work(struct drm_i915_private *dev_priv); |
bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port); |
|
/* i915_irq.c */ |
void i915_queue_hangcheck(struct drm_device *dev); |
2336,7 → 2731,6 |
const char *fmt, ...); |
|
extern void intel_irq_init(struct drm_i915_private *dev_priv); |
extern void intel_hpd_init(struct drm_i915_private *dev_priv); |
int intel_irq_install(struct drm_i915_private *dev_priv); |
void intel_irq_uninstall(struct drm_i915_private *dev_priv); |
|
2347,6 → 2741,23 |
extern void intel_uncore_check_errors(struct drm_device *dev); |
extern void intel_uncore_fini(struct drm_device *dev); |
extern void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore); |
const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id); |
void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, |
enum forcewake_domains domains); |
void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv, |
enum forcewake_domains domains); |
/* Like above but the caller must manage the uncore.lock itself. |
* Must be used with I915_READ_FW and friends. |
*/ |
void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv, |
enum forcewake_domains domains); |
void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv, |
enum forcewake_domains domains); |
void assert_forcewakes_inactive(struct drm_i915_private *dev_priv); |
static inline bool intel_vgpu_active(struct drm_device *dev) |
{ |
return to_i915(dev)->vgpu.active; |
} |
|
void |
i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, |
2358,6 → 2769,9 |
|
void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv); |
void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv); |
void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv, |
uint32_t mask, |
uint32_t bits); |
void |
ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask); |
void |
2386,27 → 2800,15 |
int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
void i915_gem_execbuffer_move_to_active(struct list_head *vmas, |
struct intel_engine_cs *ring); |
void i915_gem_execbuffer_retire_commands(struct drm_device *dev, |
struct drm_file *file, |
struct intel_engine_cs *ring, |
struct drm_i915_gem_object *obj); |
int i915_gem_ringbuffer_submission(struct drm_device *dev, |
struct drm_file *file, |
struct intel_engine_cs *ring, |
struct intel_context *ctx, |
struct drm_i915_gem_request *req); |
void i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params); |
int i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params, |
struct drm_i915_gem_execbuffer2 *args, |
struct list_head *vmas, |
struct drm_i915_gem_object *batch_obj, |
u64 exec_start, u32 flags); |
struct list_head *vmas); |
int i915_gem_execbuffer(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
int i915_gem_execbuffer2(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
int i915_gem_pin_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
int i915_gem_unpin_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
int i915_gem_busy_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data, |
2429,12 → 2831,6 |
int i915_gem_wait_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
void i915_gem_load(struct drm_device *dev); |
unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv, |
long target, |
unsigned flags); |
#define I915_SHRINK_PURGEABLE 0x1 |
#define I915_SHRINK_UNBOUND 0x2 |
#define I915_SHRINK_BOUND 0x4 |
void *i915_gem_object_alloc(struct drm_device *dev); |
void i915_gem_object_free(struct drm_i915_gem_object *obj); |
void i915_gem_object_init(struct drm_i915_gem_object *obj, |
2441,21 → 2837,41 |
const struct drm_i915_gem_object_ops *ops); |
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, |
size_t size); |
void i915_init_vm(struct drm_i915_private *dev_priv, |
struct i915_address_space *vm); |
struct drm_i915_gem_object *i915_gem_object_create_from_data( |
struct drm_device *dev, const void *data, size_t size); |
void i915_gem_free_object(struct drm_gem_object *obj); |
void i915_gem_vma_destroy(struct i915_vma *vma); |
|
#define PIN_MAPPABLE 0x1 |
#define PIN_NONBLOCK 0x2 |
#define PIN_GLOBAL 0x4 |
#define PIN_OFFSET_BIAS 0x8 |
/* Flags used by pin/bind&friends. */ |
#define PIN_MAPPABLE (1<<0) |
#define PIN_NONBLOCK (1<<1) |
#define PIN_GLOBAL (1<<2) |
#define PIN_OFFSET_BIAS (1<<3) |
#define PIN_USER (1<<4) |
#define PIN_UPDATE (1<<5) |
#define PIN_ZONE_4G (1<<6) |
#define PIN_HIGH (1<<7) |
#define PIN_OFFSET_MASK (~4095) |
int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, |
int __must_check |
i915_gem_object_pin(struct drm_i915_gem_object *obj, |
struct i915_address_space *vm, |
uint32_t alignment, |
uint64_t flags); |
int __must_check |
i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, |
const struct i915_ggtt_view *view, |
uint32_t alignment, |
uint64_t flags); |
|
int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, |
u32 flags); |
void __i915_vma_set_map_and_fenceable(struct i915_vma *vma); |
int __must_check i915_vma_unbind(struct i915_vma *vma); |
/* |
* BEWARE: Do not use the function below unless you can _absolutely_ |
* _guarantee_ VMA in question is _not in use_ anywhere. |
*/ |
int __must_check __i915_vma_unbind_no_wait(struct i915_vma *vma); |
int i915_gem_object_put_pages(struct drm_i915_gem_object *obj); |
void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv); |
void i915_gem_release_mmap(struct drm_i915_gem_object *obj); |
2464,15 → 2880,32 |
int *needs_clflush); |
|
int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj); |
static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n) |
|
static inline int __sg_page_count(struct scatterlist *sg) |
{ |
struct sg_page_iter sg_iter; |
return sg->length >> PAGE_SHIFT; |
} |
|
for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, n) |
return sg_page_iter_page(&sg_iter); |
static inline struct page * |
i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n) |
{ |
if (WARN_ON(n >= obj->base.size >> PAGE_SHIFT)) |
return NULL; |
|
return NULL; |
if (n < obj->get_page.last) { |
obj->get_page.sg = obj->pages->sgl; |
obj->get_page.last = 0; |
} |
|
while (obj->get_page.last + __sg_page_count(obj->get_page.sg) <= n) { |
obj->get_page.last += __sg_page_count(obj->get_page.sg++); |
if (unlikely(sg_is_chain(obj->get_page.sg))) |
obj->get_page.sg = sg_chain_ptr(obj->get_page.sg); |
} |
|
return nth_page(sg_page(obj->get_page.sg), n - obj->get_page.last); |
} |
|
static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) |
{ |
BUG_ON(obj->pages == NULL); |
2486,9 → 2919,10 |
|
int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); |
int i915_gem_object_sync(struct drm_i915_gem_object *obj, |
struct intel_engine_cs *to); |
struct intel_engine_cs *to, |
struct drm_i915_gem_request **to_req); |
void i915_vma_move_to_active(struct i915_vma *vma, |
struct intel_engine_cs *ring); |
struct drm_i915_gem_request *req); |
int i915_gem_dumb_create(struct drm_file *file_priv, |
struct drm_device *dev, |
struct drm_mode_create_dumb *args); |
2503,14 → 2937,23 |
return (int32_t)(seq1 - seq2) >= 0; |
} |
|
static inline bool i915_gem_request_started(struct drm_i915_gem_request *req, |
bool lazy_coherency) |
{ |
u32 seqno = req->ring->get_seqno(req->ring, lazy_coherency); |
return i915_seqno_passed(seqno, req->previous_seqno); |
} |
|
static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req, |
bool lazy_coherency) |
{ |
u32 seqno = req->ring->get_seqno(req->ring, lazy_coherency); |
return i915_seqno_passed(seqno, req->seqno); |
} |
|
int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno); |
int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno); |
int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj); |
int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj); |
|
bool i915_gem_object_pin_fence(struct drm_i915_gem_object *obj); |
void i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj); |
|
struct drm_i915_gem_request * |
i915_gem_find_active_request(struct intel_engine_cs *ring); |
|
2518,7 → 2961,6 |
void i915_gem_retire_requests_ring(struct intel_engine_cs *ring); |
int __must_check i915_gem_check_wedge(struct i915_gpu_error *error, |
bool interruptible); |
int __must_check i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno); |
|
static inline bool i915_reset_in_progress(struct i915_gpu_error *error) |
{ |
2550,30 → 2992,32 |
|
void i915_gem_reset(struct drm_device *dev); |
bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force); |
int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj); |
int __must_check i915_gem_init(struct drm_device *dev); |
int i915_gem_init_rings(struct drm_device *dev); |
int __must_check i915_gem_init_hw(struct drm_device *dev); |
int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice); |
int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice); |
void i915_gem_init_swizzling(struct drm_device *dev); |
void i915_gem_cleanup_ringbuffer(struct drm_device *dev); |
int __must_check i915_gpu_idle(struct drm_device *dev); |
int __must_check i915_gem_suspend(struct drm_device *dev); |
int __i915_add_request(struct intel_engine_cs *ring, |
struct drm_file *file, |
void __i915_add_request(struct drm_i915_gem_request *req, |
struct drm_i915_gem_object *batch_obj, |
u32 *seqno); |
#define i915_add_request(ring, seqno) \ |
__i915_add_request(ring, NULL, NULL, seqno) |
int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno, |
bool flush_caches); |
#define i915_add_request(req) \ |
__i915_add_request(req, NULL, true) |
#define i915_add_request_no_flush(req) \ |
__i915_add_request(req, NULL, false) |
int __i915_wait_request(struct drm_i915_gem_request *req, |
unsigned reset_counter, |
bool interruptible, |
s64 *timeout, |
struct drm_i915_file_private *file_priv); |
int __must_check i915_wait_seqno(struct intel_engine_cs *ring, |
uint32_t seqno); |
struct intel_rps_client *rps); |
int __must_check i915_wait_request(struct drm_i915_gem_request *req); |
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); |
int __must_check |
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, |
bool readonly); |
int __must_check |
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, |
bool write); |
int __must_check |
2581,8 → 3025,11 |
int __must_check |
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, |
u32 alignment, |
struct intel_engine_cs *pipelined); |
void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj); |
struct intel_engine_cs *pipelined, |
struct drm_i915_gem_request **pipelined_request, |
const struct i915_ggtt_view *view); |
void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj, |
const struct i915_ggtt_view *view); |
int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, |
int align); |
int i915_gem_open(struct drm_device *dev, struct drm_file *file); |
2603,29 → 3050,44 |
struct dma_buf *i915_gem_prime_export(struct drm_device *dev, |
struct drm_gem_object *gem_obj, int flags); |
|
void i915_gem_restore_fences(struct drm_device *dev); |
u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o, |
const struct i915_ggtt_view *view); |
u64 i915_gem_obj_offset(struct drm_i915_gem_object *o, |
struct i915_address_space *vm); |
static inline u64 |
i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o) |
{ |
return i915_gem_obj_ggtt_offset_view(o, &i915_ggtt_view_normal); |
} |
|
unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o, |
struct i915_address_space *vm); |
bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o); |
bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o, |
const struct i915_ggtt_view *view); |
bool i915_gem_obj_bound(struct drm_i915_gem_object *o, |
struct i915_address_space *vm); |
|
unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o, |
struct i915_address_space *vm); |
struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, |
struct i915_vma * |
i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, |
struct i915_address_space *vm); |
struct i915_vma * |
i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj, |
const struct i915_ggtt_view *view); |
|
struct i915_vma * |
i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj, |
struct i915_address_space *vm); |
struct i915_vma * |
i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj, |
const struct i915_ggtt_view *view); |
|
struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj); |
static inline bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj) { |
struct i915_vma *vma; |
list_for_each_entry(vma, &obj->vma_list, vma_link) |
if (vma->pin_count > 0) |
return true; |
return false; |
static inline struct i915_vma * |
i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj) |
{ |
return i915_gem_obj_to_ggtt_view(obj, &i915_ggtt_view_normal); |
} |
bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj); |
|
/* Some GGTT VM helpers */ |
#define i915_obj_to_ggtt(obj) \ |
2648,16 → 3110,10 |
|
static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj) |
{ |
return i915_gem_obj_bound(obj, i915_obj_to_ggtt(obj)); |
return i915_gem_obj_ggtt_bound_view(obj, &i915_ggtt_view_normal); |
} |
|
static inline unsigned long |
i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *obj) |
{ |
return i915_gem_obj_offset(obj, i915_obj_to_ggtt(obj)); |
} |
|
static inline unsigned long |
i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj) |
{ |
return i915_gem_obj_size(obj, i915_obj_to_ggtt(obj)); |
2678,17 → 3134,35 |
return i915_vma_unbind(i915_gem_obj_to_ggtt(obj)); |
} |
|
void i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj); |
void i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj, |
const struct i915_ggtt_view *view); |
static inline void |
i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj) |
{ |
i915_gem_object_ggtt_unpin_view(obj, &i915_ggtt_view_normal); |
} |
|
/* i915_gem_fence.c */ |
int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj); |
int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj); |
|
bool i915_gem_object_pin_fence(struct drm_i915_gem_object *obj); |
void i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj); |
|
void i915_gem_restore_fences(struct drm_device *dev); |
|
void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); |
void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj); |
void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj); |
|
/* i915_gem_context.c */ |
int __must_check i915_gem_context_init(struct drm_device *dev); |
void i915_gem_context_fini(struct drm_device *dev); |
void i915_gem_context_reset(struct drm_device *dev); |
int i915_gem_context_open(struct drm_device *dev, struct drm_file *file); |
int i915_gem_context_enable(struct drm_i915_private *dev_priv); |
int i915_gem_context_enable(struct drm_i915_gem_request *req); |
void i915_gem_context_close(struct drm_device *dev, struct drm_file *file); |
int i915_switch_context(struct intel_engine_cs *ring, |
struct intel_context *to); |
int i915_switch_context(struct drm_i915_gem_request *req); |
struct intel_context * |
i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id); |
void i915_gem_context_free(struct kref *ctx_ref); |
2713,6 → 3187,10 |
struct drm_file *file); |
int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file); |
int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
|
/* i915_gem_evict.c */ |
int __must_check i915_gem_evict_something(struct drm_device *dev, |
2724,7 → 3202,6 |
unsigned long end, |
unsigned flags); |
int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle); |
int i915_gem_evict_everything(struct drm_device *dev); |
|
/* belongs in i915_gem_gtt.h */ |
static inline void i915_gem_chipset_flush(struct drm_device *dev) |
2734,9 → 3211,16 |
} |
|
/* i915_gem_stolen.c */ |
int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv, |
struct drm_mm_node *node, u64 size, |
unsigned alignment); |
int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv, |
struct drm_mm_node *node, u64 size, |
unsigned alignment, u64 start, |
u64 end); |
void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv, |
struct drm_mm_node *node); |
int i915_gem_init_stolen(struct drm_device *dev); |
int i915_gem_stolen_setup_compression(struct drm_device *dev, int size, int fb_cpp); |
void i915_gem_stolen_cleanup_compression(struct drm_device *dev); |
void i915_gem_cleanup_stolen(struct drm_device *dev); |
struct drm_i915_gem_object * |
i915_gem_object_create_stolen(struct drm_device *dev, u32 size); |
2746,6 → 3230,18 |
u32 gtt_offset, |
u32 size); |
|
/* i915_gem_shrinker.c */ |
unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv, |
unsigned long target, |
unsigned flags); |
#define I915_SHRINK_PURGEABLE 0x1 |
#define I915_SHRINK_UNBOUND 0x2 |
#define I915_SHRINK_BOUND 0x4 |
#define I915_SHRINK_ACTIVE 0x8 |
unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv); |
void i915_gem_shrinker_init(struct drm_i915_private *dev_priv); |
|
|
/* i915_gem_tiling.c */ |
static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) |
{ |
2755,10 → 3251,6 |
obj->tiling_mode != I915_TILING_NONE; |
} |
|
void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); |
void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj); |
void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj); |
|
/* i915_gem_debug.c */ |
#if WATCH_LISTS |
int i915_verify_lists(struct drm_device *dev); |
2770,8 → 3262,11 |
int i915_debugfs_init(struct drm_minor *minor); |
void i915_debugfs_cleanup(struct drm_minor *minor); |
#ifdef CONFIG_DEBUG_FS |
int i915_debugfs_connector_add(struct drm_connector *connector); |
void intel_display_crc_init(struct drm_device *dev); |
#else |
static inline int i915_debugfs_connector_add(struct drm_connector *connector) |
{ return 0; } |
static inline void intel_display_crc_init(struct drm_device *dev) {} |
#endif |
|
2805,7 → 3300,9 |
bool i915_needs_cmd_parser(struct intel_engine_cs *ring); |
int i915_parse_cmds(struct intel_engine_cs *ring, |
struct drm_i915_gem_object *batch_obj, |
struct drm_i915_gem_object *shadow_batch_obj, |
u32 batch_start_offset, |
u32 batch_len, |
bool is_master); |
|
/* i915_suspend.c */ |
2812,10 → 3309,6 |
extern int i915_save_state(struct drm_device *dev); |
extern int i915_restore_state(struct drm_device *dev); |
|
/* i915_ums.c */ |
void i915_save_display_reg(struct drm_device *dev); |
void i915_restore_display_reg(struct drm_device *dev); |
|
/* i915_sysfs.c */ |
void i915_setup_sysfs(struct drm_device *dev_priv); |
void i915_teardown_sysfs(struct drm_device *dev_priv); |
2823,13 → 3316,11 |
/* intel_i2c.c */ |
extern int intel_setup_gmbus(struct drm_device *dev); |
extern void intel_teardown_gmbus(struct drm_device *dev); |
static inline bool intel_gmbus_is_port_valid(unsigned port) |
{ |
return (port >= GMBUS_PORT_SSC && port <= GMBUS_PORT_DPD); |
} |
extern bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv, |
unsigned int pin); |
|
extern struct i2c_adapter *intel_gmbus_get_adapter( |
struct drm_i915_private *dev_priv, unsigned port); |
extern struct i2c_adapter * |
intel_gmbus_get_adapter(struct drm_i915_private *dev_priv, unsigned int pin); |
extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed); |
extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit); |
static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter) |
2881,17 → 3372,12 |
extern void intel_modeset_cleanup(struct drm_device *dev); |
extern void intel_connector_unregister(struct intel_connector *); |
extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); |
extern void intel_modeset_setup_hw_state(struct drm_device *dev, |
bool force_restore); |
extern void intel_display_resume(struct drm_device *dev); |
extern void i915_redisable_vga(struct drm_device *dev); |
extern void i915_redisable_vga_power_on(struct drm_device *dev); |
extern bool intel_fbc_enabled(struct drm_device *dev); |
extern void bdw_fbc_sw_flush(struct drm_device *dev, u32 value); |
extern void intel_disable_fbc(struct drm_device *dev); |
extern bool ironlake_set_drps(struct drm_device *dev, u8 val); |
extern void intel_init_pch_refclk(struct drm_device *dev); |
extern void gen6_set_rps(struct drm_device *dev, u8 val); |
extern void valleyview_set_rps(struct drm_device *dev, u8 val); |
extern void intel_set_rps(struct drm_device *dev, u8 val); |
extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, |
bool enable); |
extern void intel_detect_pch(struct drm_device *dev); |
2904,10 → 3390,7 |
int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file); |
|
void intel_notify_mmio_flip(struct intel_engine_cs *ring); |
|
/* overlay */ |
#ifdef CONFIG_DEBUG_FS |
extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev); |
extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e, |
struct intel_overlay_error_state *error); |
2916,22 → 3399,13 |
extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e, |
struct drm_device *dev, |
struct intel_display_error_state *error); |
#endif |
|
/* On SNB platform, before reading ring registers forcewake bit |
* must be set to prevent GT core from power down and stale values being |
* returned. |
*/ |
void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine); |
void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine); |
void assert_force_wake_inactive(struct drm_i915_private *dev_priv); |
|
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val); |
int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val); |
|
/* intel_sideband.c */ |
u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr); |
void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val); |
u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr); |
void vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val); |
u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr); |
u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg); |
void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); |
2952,16 → 3426,9 |
u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg); |
void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); |
|
int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val); |
int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val); |
int intel_gpu_freq(struct drm_i915_private *dev_priv, int val); |
int intel_freq_opcode(struct drm_i915_private *dev_priv, int val); |
|
#define FORCEWAKE_RENDER (1 << 0) |
#define FORCEWAKE_MEDIA (1 << 1) |
#define FORCEWAKE_BLITTER (1 << 2) |
#define FORCEWAKE_ALL (FORCEWAKE_RENDER | FORCEWAKE_MEDIA | \ |
FORCEWAKE_BLITTER) |
|
|
#define I915_READ8(reg) dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true) |
#define I915_WRITE8(reg, val) dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true) |
|
2985,19 → 3452,29 |
#define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true) |
|
#define I915_READ64_2x32(lower_reg, upper_reg) ({ \ |
u32 upper = I915_READ(upper_reg); \ |
u32 lower = I915_READ(lower_reg); \ |
u32 tmp = I915_READ(upper_reg); \ |
if (upper != tmp) { \ |
upper = tmp; \ |
u32 upper, lower, old_upper, loop = 0; \ |
upper = I915_READ(upper_reg); \ |
do { \ |
old_upper = upper; \ |
lower = I915_READ(lower_reg); \ |
WARN_ON(I915_READ(upper_reg) != upper); \ |
} \ |
upper = I915_READ(upper_reg); \ |
} while (upper != old_upper && loop++ < 2); \ |
(u64)upper << 32 | lower; }) |
|
#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) |
#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) |
|
/* These are untraced mmio-accessors that are only valid to be used inside |
* criticial sections inside IRQ handlers where forcewake is explicitly |
* controlled. |
* Think twice, and think again, before using these. |
* Note: Should only be used between intel_uncore_forcewake_irqlock() and |
* intel_uncore_forcewake_irqunlock(). |
*/ |
#define I915_READ_FW(reg__) readl(dev_priv->regs + (reg__)) |
#define I915_WRITE_FW(reg__, val__) writel(val__, dev_priv->regs + (reg__)) |
#define POSTING_READ_FW(reg__) (void)I915_READ_FW(reg__) |
|
/* "Broadcast RGB" property */ |
#define INTEL_BROADCAST_RGB_AUTO 0 |
#define INTEL_BROADCAST_RGB_FULL 1 |
3059,74 → 3536,15 |
|
if (time_after(target_jiffies, tmp_jiffies)) { |
remaining_jiffies = target_jiffies - tmp_jiffies; |
while ((int)remaining_jiffies > 0) { |
delay(remaining_jiffies); |
remaining_jiffies = target_jiffies - jiffies; |
} |
} |
} |
|
typedef struct |
static inline void i915_trace_irq_get(struct intel_engine_cs *ring, |
struct drm_i915_gem_request *req) |
{ |
int width; |
int height; |
int bpp; |
int freq; |
}videomode_t; |
|
struct cmdtable |
{ |
char *key; |
int size; |
int *val; |
}; |
|
#define CMDENTRY(key, val) {(key), (sizeof(key)-1), &val} |
|
void parse_cmdline(char *cmdline, struct cmdtable *table, char *log, videomode_t *mode); |
struct drm_i915_gem_object |
*kos_gem_fb_object_create(struct drm_device *dev, u32 gtt_offset, u32 size); |
|
extern struct drm_i915_gem_object *main_fb_obj; |
|
static struct drm_i915_gem_object *get_fb_obj() |
{ |
return main_fb_obj; |
}; |
|
#define ioread32(addr) readl(addr) |
|
|
static inline int pm_runtime_get_sync(struct device *dev) |
{ |
return 0; |
if (ring->trace_irq_req == NULL && ring->irq_get(ring)) |
i915_gem_request_assign(&ring->trace_irq_req, req); |
} |
|
static inline int pm_runtime_set_active(struct device *dev) |
{ |
return 0; |
} |
|
static inline void pm_runtime_disable(struct device *dev) |
{ |
|
} |
|
static inline int pm_runtime_put_autosuspend(struct device *dev) |
{ |
return 0; |
} |
|
static inline u8 inb(u16 port) |
{ |
u8 v; |
asm volatile("inb %1,%0" : "=a" (v) : "dN" (port)); |
return v; |
} |
|
static inline void outb(u8 v, u16 port) |
{ |
asm volatile("outb %0,%1" : : "a" (v), "dN" (port)); |
} |
|
#endif |