35,6 → 35,7 |
#include "intel_ringbuffer.h" |
//#include <linux/io-mapping.h> |
#include <linux/i2c.h> |
#include <linux/i2c-algo-bit.h> |
#include <drm/intel-gtt.h> |
//#include <linux/backlight.h> |
|
46,6 → 47,13 |
|
#define I915_TILING_NONE 0 |
|
#define VGA_RSRC_NONE 0x00 |
#define VGA_RSRC_LEGACY_IO 0x01 |
#define VGA_RSRC_LEGACY_MEM 0x02 |
#define VGA_RSRC_LEGACY_MASK (VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM) |
/* Non-legacy access */ |
#define VGA_RSRC_NORMAL_IO 0x04 |
#define VGA_RSRC_NORMAL_MEM 0x08 |
|
#define DRIVER_AUTHOR "Tungsten Graphics, Inc." |
|
68,10 → 76,34 |
}; |
#define plane_name(p) ((p) + 'A') |
|
enum port { |
PORT_A = 0, |
PORT_B, |
PORT_C, |
PORT_D, |
PORT_E, |
I915_MAX_PORTS |
}; |
#define port_name(p) ((p) + 'A') |
|
#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) |
|
#define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++) |
|
#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \ |
list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \ |
if ((intel_encoder)->base.crtc == (__crtc)) |
|
struct intel_pch_pll { |
int refcount; /* count of number of CRTCs sharing this PLL */ |
int active; /* count of number of active CRTCs (i.e. DPMS on) */ |
bool on; /* is the PLL actually active? Disabled during modeset */ |
int pll_reg; |
int fp0_reg; |
int fp1_reg; |
}; |
#define I915_NUM_PLLS 2 |
|
/* Interface history: |
* |
* 1.1: Original. |
88,6 → 120,7 |
|
#define WATCH_COHERENCY 0 |
#define WATCH_LISTS 0 |
#define WATCH_GTT 0 |
|
#define I915_GEM_PHYS_CURSOR_0 1 |
#define I915_GEM_PHYS_CURSOR_1 2 |
109,11 → 142,11 |
struct drm_i915_private; |
|
struct intel_opregion { |
struct opregion_header *header; |
struct opregion_acpi *acpi; |
struct opregion_swsci *swsci; |
struct opregion_asle *asle; |
void *vbt; |
struct opregion_header __iomem *header; |
struct opregion_acpi __iomem *acpi; |
struct opregion_swsci __iomem *swsci; |
struct opregion_asle __iomem *asle; |
void __iomem *vbt; |
u32 __iomem *lid_state; |
}; |
#define OPREGION_SIZE (8*1024) |
133,7 → 166,7 |
struct drm_i915_fence_reg { |
struct list_head lru_list; |
struct drm_i915_gem_object *obj; |
uint32_t setup_seqno; |
int pin_count; |
}; |
|
struct sdvo_device_mapping { |
150,38 → 183,50 |
struct drm_i915_error_state { |
u32 eir; |
u32 pgtbl_er; |
u32 ier; |
u32 ccid; |
bool waiting[I915_NUM_RINGS]; |
u32 pipestat[I915_MAX_PIPES]; |
u32 ipeir; |
u32 ipehr; |
u32 instdone; |
u32 acthd; |
u32 tail[I915_NUM_RINGS]; |
u32 head[I915_NUM_RINGS]; |
u32 ipeir[I915_NUM_RINGS]; |
u32 ipehr[I915_NUM_RINGS]; |
u32 instdone[I915_NUM_RINGS]; |
u32 acthd[I915_NUM_RINGS]; |
u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1]; |
u32 rc_psmi[I915_NUM_RINGS]; /* sleep state */ |
/* our own tracking of ring head and tail */ |
u32 cpu_ring_head[I915_NUM_RINGS]; |
u32 cpu_ring_tail[I915_NUM_RINGS]; |
u32 error; /* gen6+ */ |
u32 bcs_acthd; /* gen6+ blt engine */ |
u32 bcs_ipehr; |
u32 bcs_ipeir; |
u32 bcs_instdone; |
u32 bcs_seqno; |
u32 vcs_acthd; /* gen6+ bsd engine */ |
u32 vcs_ipehr; |
u32 vcs_ipeir; |
u32 vcs_instdone; |
u32 vcs_seqno; |
u32 instpm; |
u32 instps; |
u32 instdone1; |
u32 seqno; |
u32 err_int; /* gen7 */ |
u32 instpm[I915_NUM_RINGS]; |
u32 instps[I915_NUM_RINGS]; |
u32 extra_instdone[I915_NUM_INSTDONE_REG]; |
u32 seqno[I915_NUM_RINGS]; |
u64 bbaddr; |
u32 fault_reg[I915_NUM_RINGS]; |
u32 done_reg; |
u32 faddr[I915_NUM_RINGS]; |
u64 fence[I915_MAX_NUM_FENCES]; |
struct timeval time; |
struct drm_i915_error_ring { |
struct drm_i915_error_object { |
int page_count; |
u32 gtt_offset; |
u32 *pages[0]; |
} *ringbuffer[I915_NUM_RINGS], *batchbuffer[I915_NUM_RINGS]; |
} *ringbuffer, *batchbuffer; |
struct drm_i915_error_request { |
long jiffies; |
u32 seqno; |
u32 tail; |
} *requests; |
int num_requests; |
} ring[I915_NUM_RINGS]; |
struct drm_i915_error_buffer { |
u32 size; |
u32 name; |
u32 seqno; |
u32 rseqno, wseqno; |
u32 gtt_offset; |
u32 read_domains; |
u32 write_domain; |
190,7 → 235,7 |
u32 tiling:2; |
u32 dirty:1; |
u32 purgeable:1; |
u32 ring:4; |
s32 ring:4; |
u32 cache_level:2; |
} *active_bo, *pinned_bo; |
u32 active_bo_count, pinned_bo_count; |
199,7 → 244,6 |
}; |
|
struct drm_i915_display_funcs { |
void (*dpms)(struct drm_crtc *crtc, int mode); |
bool (*fbc_enabled)(struct drm_device *dev); |
void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval); |
void (*disable_fbc)(struct drm_device *dev); |
208,11 → 252,16 |
void (*update_wm)(struct drm_device *dev); |
void (*update_sprite_wm)(struct drm_device *dev, int pipe, |
uint32_t sprite_width, int pixel_size); |
void (*update_linetime_wm)(struct drm_device *dev, int pipe, |
struct drm_display_mode *mode); |
int (*crtc_mode_set)(struct drm_crtc *crtc, |
struct drm_display_mode *mode, |
struct drm_display_mode *adjusted_mode, |
int x, int y, |
struct drm_framebuffer *old_fb); |
void (*crtc_enable)(struct drm_crtc *crtc); |
void (*crtc_disable)(struct drm_crtc *crtc); |
void (*off)(struct drm_crtc *crtc); |
void (*write_eld)(struct drm_connector *connector, |
struct drm_crtc *crtc); |
void (*fdi_link_train)(struct drm_crtc *crtc); |
223,8 → 272,6 |
struct drm_i915_gem_object *obj); |
int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb, |
int x, int y); |
void (*force_wake_get)(struct drm_i915_private *dev_priv); |
void (*force_wake_put)(struct drm_i915_private *dev_priv); |
/* clock updates for mode set */ |
/* cursor updates */ |
/* render clock increase/decrease */ |
232,6 → 279,37 |
/* pll clock increase/decrease */ |
}; |
|
struct drm_i915_gt_funcs { |
void (*force_wake_get)(struct drm_i915_private *dev_priv); |
void (*force_wake_put)(struct drm_i915_private *dev_priv); |
}; |
|
#define DEV_INFO_FLAGS \ |
DEV_INFO_FLAG(is_mobile) DEV_INFO_SEP \ |
DEV_INFO_FLAG(is_i85x) DEV_INFO_SEP \ |
DEV_INFO_FLAG(is_i915g) DEV_INFO_SEP \ |
DEV_INFO_FLAG(is_i945gm) DEV_INFO_SEP \ |
DEV_INFO_FLAG(is_g33) DEV_INFO_SEP \ |
DEV_INFO_FLAG(need_gfx_hws) DEV_INFO_SEP \ |
DEV_INFO_FLAG(is_g4x) DEV_INFO_SEP \ |
DEV_INFO_FLAG(is_pineview) DEV_INFO_SEP \ |
DEV_INFO_FLAG(is_broadwater) DEV_INFO_SEP \ |
DEV_INFO_FLAG(is_crestline) DEV_INFO_SEP \ |
DEV_INFO_FLAG(is_ivybridge) DEV_INFO_SEP \ |
DEV_INFO_FLAG(is_valleyview) DEV_INFO_SEP \ |
DEV_INFO_FLAG(is_haswell) DEV_INFO_SEP \ |
DEV_INFO_FLAG(has_force_wake) DEV_INFO_SEP \ |
DEV_INFO_FLAG(has_fbc) DEV_INFO_SEP \ |
DEV_INFO_FLAG(has_pipe_cxsr) DEV_INFO_SEP \ |
DEV_INFO_FLAG(has_hotplug) DEV_INFO_SEP \ |
DEV_INFO_FLAG(cursor_needs_physical) DEV_INFO_SEP \ |
DEV_INFO_FLAG(has_overlay) DEV_INFO_SEP \ |
DEV_INFO_FLAG(overlay_needs_physical) DEV_INFO_SEP \ |
DEV_INFO_FLAG(supports_tv) DEV_INFO_SEP \ |
DEV_INFO_FLAG(has_bsd_ring) DEV_INFO_SEP \ |
DEV_INFO_FLAG(has_blt_ring) DEV_INFO_SEP \ |
DEV_INFO_FLAG(has_llc) |
|
struct intel_device_info { |
u8 gen; |
u8 is_mobile:1; |
245,6 → 323,9 |
u8 is_broadwater:1; |
u8 is_crestline:1; |
u8 is_ivybridge:1; |
u8 is_valleyview:1; |
u8 has_force_wake:1; |
u8 is_haswell:1; |
u8 has_fbc:1; |
u8 has_pipe_cxsr:1; |
u8 has_hotplug:1; |
254,8 → 335,30 |
u8 supports_tv:1; |
u8 has_bsd_ring:1; |
u8 has_blt_ring:1; |
u8 has_llc:1; |
}; |
|
#define I915_PPGTT_PD_ENTRIES 512 |
#define I915_PPGTT_PT_ENTRIES 1024 |
struct i915_hw_ppgtt { |
unsigned num_pd_entries; |
dma_addr_t *pt_pages; |
uint32_t pd_offset; |
dma_addr_t *pt_dma_addr; |
dma_addr_t scratch_page_dma_addr; |
}; |
|
|
/* This must match up with the value previously used for execbuf2.rsvd1. */ |
#define DEFAULT_CONTEXT_ID 0 |
struct i915_hw_context { |
int id; |
bool is_initialized; |
struct drm_i915_file_private *file_priv; |
struct intel_ring_buffer *ring; |
struct drm_i915_gem_object *obj; |
}; |
|
enum no_fbc_reason { |
FBC_NO_OUTPUT, /* no outputs enabled to compress */ |
FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */ |
268,25 → 371,38 |
}; |
|
enum intel_pch { |
PCH_NONE = 0, /* No PCH present */ |
PCH_IBX, /* Ibexpeak PCH */ |
PCH_CPT, /* Cougarpoint PCH */ |
PCH_LPT, /* Lynxpoint PCH */ |
}; |
|
#define QUIRK_PIPEA_FORCE (1<<0) |
#define QUIRK_LVDS_SSC_DISABLE (1<<1) |
#define QUIRK_INVERT_BRIGHTNESS (1<<2) |
|
struct intel_fbdev; |
struct intel_fbc_work; |
|
struct intel_gmbus { |
struct i2c_adapter adapter; |
bool force_bit; |
u32 reg0; |
u32 gpio_reg; |
struct i2c_algo_bit_data bit_algo; |
struct drm_i915_private *dev_priv; |
}; |
|
typedef struct drm_i915_private { |
struct drm_device *dev; |
|
const struct intel_device_info *info; |
|
int has_gem; |
int relative_constants_mode; |
|
void __iomem *regs; |
|
struct drm_i915_gt_funcs gt; |
/** gt_fifo_count and the subsequent register write are synchronized |
* with dev->struct_mutex. */ |
unsigned gt_fifo_count; |
295,12 → 411,17 |
/** gt_lock is also taken in irq contexts. */ |
spinlock_t gt_lock; |
|
struct intel_gmbus { |
struct i2c_adapter adapter; |
struct i2c_adapter *force_bit; |
u32 reg0; |
} *gmbus; |
struct intel_gmbus gmbus[GMBUS_NUM_PORTS]; |
|
/** gmbus_mutex protects against concurrent usage of the single hw gmbus |
* controller on different i2c buses. */ |
struct mutex gmbus_mutex; |
|
/** |
* Base address of the gmbus and gpio block. |
*/ |
uint32_t gpio_mmio_base; |
|
struct pci_dev *bridge_dev; |
struct intel_ring_buffer ring[I915_NUM_RINGS]; |
uint32_t next_seqno; |
307,22 → 428,19 |
|
drm_dma_handle_t *status_page_dmah; |
uint32_t counter; |
drm_local_map_t hws_map; |
struct drm_i915_gem_object *pwrctx; |
struct drm_i915_gem_object *renderctx; |
|
// struct resource mch_res; |
|
unsigned int cpp; |
int back_offset; |
int front_offset; |
int current_page; |
int page_flipping; |
|
atomic_t irq_received; |
|
/* protects the irq masks */ |
spinlock_t irq_lock; |
|
/* DPIO indirect register protection */ |
spinlock_t dpio_lock; |
|
/** Cached value of IMR to avoid reads in updating the bitfield */ |
u32 pipestat[2]; |
u32 irq_mask; |
332,23 → 450,18 |
u32 hotplug_supported_mask; |
struct work_struct hotplug_work; |
|
int tex_lru_log_granularity; |
int allow_batchbuffer; |
struct mem_block *agp_heap; |
unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds; |
int vblank_pipe; |
int num_pipe; |
int num_pch_pll; |
|
/* For hangcheck timer */ |
#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ |
struct timer_list hangcheck_timer; |
int hangcheck_count; |
uint32_t last_acthd; |
uint32_t last_acthd_bsd; |
uint32_t last_acthd_blt; |
uint32_t last_instdone; |
uint32_t last_instdone1; |
uint32_t last_acthd[I915_NUM_RINGS]; |
uint32_t prev_instdone[I915_NUM_INSTDONE_REG]; |
|
unsigned int stop_rings; |
|
unsigned long cfb_size; |
unsigned int cfb_fb; |
enum plane cfb_plane; |
375,6 → 488,8 |
unsigned int lvds_use_ssc:1; |
unsigned int display_clock_mode:1; |
int lvds_ssc_freq; |
unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */ |
unsigned int lvds_val; /* used for checking LVDS channel mode */ |
struct { |
int rate; |
int lanes; |
398,9 → 513,10 |
unsigned int fsb_freq, mem_freq, is_ddr3; |
|
spinlock_t error_lock; |
// struct drm_i915_error_state *first_error; |
/* Protected by dev->error_lock. */ |
struct drm_i915_error_state *first_error; |
struct work_struct error_work; |
// struct completion error_completion; |
struct completion error_completion; |
struct workqueue_struct *wq; |
|
/* Display functions */ |
573,7 → 689,13 |
struct drm_mm gtt_space; |
/** List of all objects in gtt_space. Used to restore gtt |
* mappings on resume */ |
struct list_head gtt_list; |
struct list_head bound_list; |
/** |
* List of objects which are not bound to the GTT (thus |
* are idle and not used by the GPU) but still have |
* (presumably uncached) pages still attached. |
*/ |
struct list_head unbound_list; |
|
/** Usable portion of the GTT for GEM */ |
unsigned long gtt_start; |
581,8 → 703,14 |
unsigned long gtt_end; |
|
// struct io_mapping *gtt_mapping; |
phys_addr_t gtt_base_addr; |
int gtt_mtrr; |
|
/** PPGTT used for aliasing the PPGTT with the GTT */ |
struct i915_hw_ppgtt *aliasing_ppgtt; |
|
u32 *l3_remap_info; |
|
// struct shrinker inactive_shrinker; |
|
/** |
597,17 → 725,6 |
struct list_head active_list; |
|
/** |
* List of objects which are not in the ringbuffer but which |
* still have a write_domain which needs to be flushed before |
* unbinding. |
* |
* last_rendering_seqno is 0 while an object is in this list. |
* |
* A reference is held on the buffer while on this list. |
*/ |
struct list_head flushing_list; |
|
/** |
* LRU list of objects which are not in the ringbuffer and |
* are ready to unbind, but are still in the GTT. |
* |
619,24 → 736,10 |
*/ |
struct list_head inactive_list; |
|
/** |
* LRU list of objects which are not in the ringbuffer but |
* are still pinned in the GTT. |
*/ |
struct list_head pinned_list; |
|
/** LRU list of objects with fence regs on them. */ |
struct list_head fence_list; |
|
/** |
* List of objects currently pending being freed. |
* |
* These objects are no longer in use, but due to a signal |
* we were prevented from freeing them at the appointed time. |
*/ |
struct list_head deferred_free_list; |
|
/** |
* We leave the user IRQ off as much as possible, |
* but this means that requests will finish and never |
* be retired once the system goes idle. Set a timer to |
684,6 → 787,22 |
size_t object_memory; |
u32 object_count; |
} mm; |
|
/* Old dri1 support infrastructure, beware the dragons ya fools entering |
* here! */ |
struct { |
unsigned allow_batchbuffer : 1; |
u32 __iomem *gfx_hws_cpu_addr; |
|
unsigned int cpp; |
int back_offset; |
int front_offset; |
int current_page; |
int page_flipping; |
} dri1; |
|
/* Kernel Modesetting */ |
|
struct sdvo_device_mapping sdvo_mappings[2]; |
/* indicate whether the LVDS_BORDER should be enabled or not */ |
unsigned int lvds_border_bits; |
693,16 → 812,14 |
struct drm_crtc *plane_to_crtc_mapping[3]; |
struct drm_crtc *pipe_to_crtc_mapping[3]; |
wait_queue_head_t pending_flip_queue; |
bool flip_pending_is_done; |
|
struct intel_pch_pll pch_plls[I915_NUM_PLLS]; |
|
/* Reclocking support */ |
bool render_reclock_avail; |
bool lvds_downclock_avail; |
/* indicates the reduced downclock for LVDS*/ |
int lvds_downclock; |
struct work_struct idle_work; |
struct timer_list idle_timer; |
bool busy; |
u16 orig_clock; |
int child_dev_num; |
struct child_device_config *child_dev; |
711,13 → 828,27 |
|
bool mchbar_need_disable; |
|
struct work_struct rps_work; |
spinlock_t rps_lock; |
/* gen6+ rps state */ |
struct { |
struct work_struct work; |
u32 pm_iir; |
/* lock - irqsave spinlock that protectects the work_struct and |
* pm_iir. */ |
spinlock_t lock; |
|
/* The below variables an all the rps hw state are protected by |
* dev->struct mutext. */ |
u8 cur_delay; |
u8 min_delay; |
u8 max_delay; |
} rps; |
|
/* ilk-only ips/rps state. Everything in here is protected by the global |
* mchdev_lock in intel_pm.c */ |
struct { |
u8 cur_delay; |
u8 min_delay; |
u8 max_delay; |
u8 fmax; |
u8 fstart; |
|
727,15 → 858,16 |
u64 last_count2; |
struct timespec last_time2; |
unsigned long gfx_power; |
u8 corr; |
|
int c_m; |
int r_t; |
u8 corr; |
spinlock_t *mchdev_lock; |
} ips; |
|
enum no_fbc_reason no_fbc_reason; |
|
// struct drm_mm_node *compressed_fb; |
// struct drm_mm_node *compressed_llb; |
struct drm_mm_node *compressed_fb; |
struct drm_mm_node *compressed_llb; |
|
unsigned long last_gpu_reset; |
|
744,19 → 876,54 |
|
// struct backlight_device *backlight; |
|
// struct drm_property *broadcast_rgb_property; |
// struct drm_property *force_audio_property; |
struct drm_property *broadcast_rgb_property; |
struct drm_property *force_audio_property; |
|
bool hw_contexts_disabled; |
uint32_t hw_context_size; |
} drm_i915_private_t; |
|
/* Iterate over initialised rings */ |
#define for_each_ring(ring__, dev_priv__, i__) \ |
for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \ |
if (((ring__) = &(dev_priv__)->ring[(i__)]), intel_ring_initialized((ring__))) |
|
enum hdmi_force_audio { |
HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */ |
HDMI_AUDIO_OFF, /* force turn off HDMI audio */ |
HDMI_AUDIO_AUTO, /* trust EDID */ |
HDMI_AUDIO_ON, /* force turn on HDMI audio */ |
}; |
|
enum i915_cache_level { |
I915_CACHE_NONE, |
I915_CACHE_NONE = 0, |
I915_CACHE_LLC, |
I915_CACHE_LLC_MLC, /* gen6+ */ |
I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */ |
}; |
|
struct drm_i915_gem_object_ops { |
/* Interface between the GEM object and its backing storage. |
* get_pages() is called once prior to the use of the associated set |
* of pages before to binding them into the GTT, and put_pages() is |
* called after we no longer need them. As we expect there to be |
* associated cost with migrating pages between the backing storage |
* and making them available for the GPU (e.g. clflush), we may hold |
* onto the pages after they are no longer referenced by the GPU |
* in case they may be used again shortly (for example migrating the |
* pages to a different memory domain within the GTT). put_pages() |
* will therefore most likely be called when the object itself is |
* being released or under memory pressure (where we attempt to |
* reap pages for the shrinker). |
*/ |
int (*get_pages)(struct drm_i915_gem_object *); |
void (*put_pages)(struct drm_i915_gem_object *); |
}; |
|
struct drm_i915_gem_object { |
struct drm_gem_object base; |
|
const struct drm_i915_gem_object_ops *ops; |
|
void *mapped; |
|
/** Current space allocated to this object in the GTT, if any. */ |
763,18 → 930,16 |
struct drm_mm_node *gtt_space; |
struct list_head gtt_list; |
|
/** This object's place on the active/flushing/inactive lists */ |
/** This object's place on the active/inactive lists */ |
struct list_head ring_list; |
struct list_head mm_list; |
/** This object's place on GPU write list */ |
struct list_head gpu_write_list; |
/** This object's place in the batchbuffer or on the eviction list */ |
struct list_head exec_list; |
|
/** |
* This is set if the object is on the active or flushing lists |
* (has pending rendering), and is not set if it's on inactive (ready |
* to be unbound). |
* This is set if the object is on the active lists (has pending |
* rendering and so a non-zero seqno), and is not set if it i s on |
* inactive (ready to be unbound) list. |
*/ |
unsigned int active:1; |
|
785,12 → 950,6 |
unsigned int dirty:1; |
|
/** |
* This is set if the object has been written to since the last |
* GPU flush. |
*/ |
unsigned int pending_gpu_write:1; |
|
/** |
* Fence register bits (if any) for this object. Will be set |
* as needed when mapped into the GTT. |
* Protected by dev->struct_mutex. |
806,7 → 965,14 |
* Current tiling mode for the object. |
*/ |
unsigned int tiling_mode:2; |
unsigned int tiling_changed:1; |
/** |
* Whether the tiling parameters for the currently associated fence |
* register have changed. Note that for the purposes of tracking |
* tiling changes we also treat the unfenced register, the register |
* slot that the object occupies whilst it executes a fenced |
* command (such as BLT on gen2/3), as a "fence". |
*/ |
unsigned int fence_dirty:1; |
|
/** How many users have pinned this object in GTT space. The following |
* users can each hold at most one reference: pwrite/pread, pin_ioctl |
842,14 → 1008,17 |
|
unsigned int cache_level:2; |
|
struct page **pages; |
unsigned int has_aliasing_ppgtt_mapping:1; |
unsigned int has_global_gtt_mapping:1; |
unsigned int has_dma_mapping:1; |
|
/** |
* DMAR support |
*/ |
struct scatterlist *sg_list; |
int num_sg; |
struct pagelist pages; |
int pages_pin_count; |
|
/* prime dma-buf support */ |
void *dma_buf_vmapping; |
int vmapping_count; |
|
/** |
* Used for performing relocations during execbuffer insertion. |
*/ |
864,13 → 1033,13 |
*/ |
uint32_t gtt_offset; |
|
/** Breadcrumb of last rendering to the buffer. */ |
uint32_t last_rendering_seqno; |
struct intel_ring_buffer *ring; |
|
/** Breadcrumb of last rendering to the buffer. */ |
uint32_t last_read_seqno; |
uint32_t last_write_seqno; |
/** Breadcrumb of last fenced GPU access to the buffer. */ |
uint32_t last_fenced_seqno; |
struct intel_ring_buffer *last_fenced_ring; |
|
/** Current tiling stride for the object, if it's tiled. */ |
uint32_t stride; |
878,13 → 1047,6 |
/** Record of address bit 17 of each page at last unbind. */ |
unsigned long *bit_17; |
|
|
/** |
* If present, while GEM_DOMAIN_CPU is in the read domain this array |
* flags which individual pages are valid. |
*/ |
uint8_t *page_cpu_valid; |
|
/** User space pin count and filp owning the pin */ |
uint32_t user_pin_count; |
struct drm_file *pin_filp; |
919,6 → 1081,9 |
/** GEM sequence number associated with this request. */ |
uint32_t seqno; |
|
/** Postion in the ringbuffer of the end of the request */ |
u32 tail; |
|
/** Time at which this request was emitted, in jiffies. */ |
unsigned long emitted_jiffies; |
|
935,6 → 1100,7 |
spinlock_t lock; |
struct list_head request_list; |
} mm; |
struct idr context_idr; |
}; |
|
#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info) |
958,6 → 1124,8 |
#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042) |
#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) |
#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge) |
#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview) |
#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) |
#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) |
|
/* |
975,8 → 1143,12 |
|
#define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring) |
#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring) |
#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc) |
#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) |
|
#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6) |
#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >=6 && !IS_VALLEYVIEW(dev)) |
|
#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) |
#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical) |
|
998,32 → 1170,62 |
#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) |
#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) |
|
#define HAS_PCH_SPLIT(dev) (IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev)) |
#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5) |
|
#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type) |
#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT) |
#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) |
#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) |
#define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE) |
|
//#include "i915_trace.h" |
#define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake) |
|
extern int i915_max_ioctl; |
extern unsigned int i915_fbpercrtc; |
extern int i915_panel_ignore_lid; |
extern unsigned int i915_powersave; |
extern unsigned int i915_semaphores; |
extern unsigned int i915_lvds_downclock; |
extern unsigned int i915_panel_use_ssc; |
extern int i915_vbt_sdvo_panel_type; |
extern unsigned int i915_enable_rc6; |
extern unsigned int i915_enable_fbc; |
extern bool i915_enable_hangcheck; |
#define HAS_L3_GPU_CACHE(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) |
|
extern int i915_resume(struct drm_device *dev); |
#define GT_FREQUENCY_MULTIPLIER 50 |
|
#include "i915_trace.h" |
|
/** |
* RC6 is a special power stage which allows the GPU to enter an very |
* low-voltage mode when idle, using down to 0V while at this stage. This |
* stage is entered automatically when the GPU is idle when RC6 support is |
* enabled, and as soon as new workload arises GPU wakes up automatically as well. |
* |
* There are different RC6 modes available in Intel GPU, which differentiate |
* among each other with the latency required to enter and leave RC6 and |
* voltage consumed by the GPU in different states. |
* |
* The combination of the following flags define which states GPU is allowed |
* to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and |
* RC6pp is deepest RC6. Their support by hardware varies according to the |
* GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one |
* which brings the most power savings; deeper states save more power, but |
* require higher latency to switch to and wake up. |
*/ |
#define INTEL_RC6_ENABLE (1<<0) |
#define INTEL_RC6p_ENABLE (1<<1) |
#define INTEL_RC6pp_ENABLE (1<<2) |
|
extern unsigned int i915_fbpercrtc __always_unused; |
extern int i915_panel_ignore_lid __read_mostly; |
extern unsigned int i915_powersave __read_mostly; |
extern int i915_semaphores __read_mostly; |
extern unsigned int i915_lvds_downclock __read_mostly; |
extern int i915_lvds_channel_mode __read_mostly; |
extern int i915_panel_use_ssc __read_mostly; |
extern int i915_vbt_sdvo_panel_type __read_mostly; |
extern int i915_enable_rc6 __read_mostly; |
extern int i915_enable_fbc __read_mostly; |
extern bool i915_enable_hangcheck __read_mostly; |
extern int i915_enable_ppgtt __read_mostly; |
extern unsigned int i915_preliminary_hw_support __read_mostly; |
|
extern int i915_master_create(struct drm_device *dev, struct drm_master *master); |
extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master); |
|
/* i915_dma.c */ |
void i915_update_dri1_breadcrumb(struct drm_device *dev); |
extern void i915_kernel_lost_context(struct drm_device * dev); |
extern int i915_driver_load(struct drm_device *, unsigned long flags); |
extern int i915_driver_unload(struct drm_device *); |
1034,12 → 1236,15 |
extern void i915_driver_postclose(struct drm_device *dev, |
struct drm_file *file_priv); |
extern int i915_driver_device_is_agp(struct drm_device * dev); |
#ifdef CONFIG_COMPAT |
extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, |
unsigned long arg); |
#endif |
extern int i915_emit_box(struct drm_device *dev, |
struct drm_clip_rect *box, |
int DR1, int DR4); |
extern int i915_reset(struct drm_device *dev, u8 flags); |
extern int intel_gpu_reset(struct drm_device *dev); |
extern int i915_reset(struct drm_device *dev); |
extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); |
extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); |
extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); |
1049,19 → 1254,11 |
/* i915_irq.c */ |
void i915_hangcheck_elapsed(unsigned long data); |
void i915_handle_error(struct drm_device *dev, bool wedged); |
extern int i915_irq_emit(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int i915_irq_wait(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
|
extern void intel_irq_init(struct drm_device *dev); |
extern void intel_gt_init(struct drm_device *dev); |
|
extern int i915_vblank_pipe_set(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int i915_vblank_pipe_get(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int i915_vblank_swap(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
void i915_error_state_free(struct kref *error_ref); |
|
void |
i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); |
1078,18 → 1275,6 |
#endif |
|
|
/* i915_mem.c */ |
extern int i915_mem_alloc(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int i915_mem_free(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int i915_mem_init_heap(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int i915_mem_destroy_heap(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern void i915_mem_takedown(struct mem_block **heap); |
extern void i915_mem_release(struct drm_device * dev, |
struct drm_file *file_priv, struct mem_block *heap); |
/* i915_gem.c */ |
int i915_gem_init_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
1117,6 → 1302,10 |
struct drm_file *file_priv); |
int i915_gem_busy_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file); |
int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file); |
int i915_gem_throttle_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
int i915_gem_madvise_ioctl(struct drm_device *dev, void *data, |
1131,24 → 1320,45 |
struct drm_file *file_priv); |
int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
int i915_gem_wait_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
void i915_gem_load(struct drm_device *dev); |
int i915_gem_init_object(struct drm_gem_object *obj); |
int __must_check i915_gem_flush_ring(struct intel_ring_buffer *ring, |
uint32_t invalidate_domains, |
uint32_t flush_domains); |
void i915_gem_object_init(struct drm_i915_gem_object *obj, |
const struct drm_i915_gem_object_ops *ops); |
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, |
size_t size); |
void i915_gem_free_object(struct drm_gem_object *obj); |
int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, |
uint32_t alignment, |
bool map_and_fenceable); |
bool map_and_fenceable, |
bool nonblocking); |
void i915_gem_object_unpin(struct drm_i915_gem_object *obj); |
int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj); |
void i915_gem_release_mmap(struct drm_i915_gem_object *obj); |
void i915_gem_lastclose(struct drm_device *dev); |
|
int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj); |
|
static inline dma_addr_t i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n) |
{ |
return obj->pages.page[n]; |
}; |
|
static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) |
{ |
BUG_ON(obj->pages.page == NULL); |
obj->pages_pin_count++; |
} |
static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) |
{ |
BUG_ON(obj->pages_pin_count == 0); |
obj->pages_pin_count--; |
} |
|
int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); |
int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj); |
int i915_gem_object_sync(struct drm_i915_gem_object *obj, |
struct intel_ring_buffer *to); |
void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, |
struct intel_ring_buffer *ring, |
u32 seqno); |
1169,18 → 1379,36 |
return (int32_t)(seq1 - seq2) >= 0; |
} |
|
static inline u32 |
i915_gem_next_request_seqno(struct intel_ring_buffer *ring) |
u32 i915_gem_next_request_seqno(struct intel_ring_buffer *ring); |
|
int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj); |
int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj); |
|
static inline bool |
i915_gem_object_pin_fence(struct drm_i915_gem_object *obj) |
{ |
drm_i915_private_t *dev_priv = ring->dev->dev_private; |
return ring->outstanding_lazy_request = dev_priv->next_seqno; |
if (obj->fence_reg != I915_FENCE_REG_NONE) { |
struct drm_i915_private *dev_priv = obj->base.dev->dev_private; |
dev_priv->fence_regs[obj->fence_reg].pin_count++; |
return true; |
} else |
return false; |
} |
|
int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj, |
struct intel_ring_buffer *pipelined); |
int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj); |
static inline void |
i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj) |
{ |
if (obj->fence_reg != I915_FENCE_REG_NONE) { |
struct drm_i915_private *dev_priv = obj->base.dev->dev_private; |
dev_priv->fence_regs[obj->fence_reg].pin_count--; |
} |
} |
|
void i915_gem_retire_requests(struct drm_device *dev); |
void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring); |
int __must_check i915_gem_check_wedge(struct drm_i915_private *dev_priv, |
bool interruptible); |
|
void i915_gem_reset(struct drm_device *dev); |
void i915_gem_clflush_object(struct drm_i915_gem_object *obj); |
int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj, |
1187,18 → 1415,18 |
uint32_t read_domains, |
uint32_t write_domain); |
int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj); |
int __must_check i915_gem_init_ringbuffer(struct drm_device *dev); |
int __must_check i915_gem_init(struct drm_device *dev); |
int __must_check i915_gem_init_hw(struct drm_device *dev); |
void i915_gem_l3_remap(struct drm_device *dev); |
void i915_gem_init_swizzling(struct drm_device *dev); |
void i915_gem_init_ppgtt(struct drm_device *dev); |
void i915_gem_cleanup_ringbuffer(struct drm_device *dev); |
void i915_gem_do_init(struct drm_device *dev, |
unsigned long start, |
unsigned long mappable_end, |
unsigned long end); |
int __must_check i915_gpu_idle(struct drm_device *dev); |
int __must_check i915_gem_idle(struct drm_device *dev); |
int __must_check i915_add_request(struct intel_ring_buffer *ring, |
int i915_add_request(struct intel_ring_buffer *ring, |
struct drm_file *file, |
struct drm_i915_gem_request *request); |
int __must_check i915_wait_request(struct intel_ring_buffer *ring, |
u32 *seqno); |
int __must_check i915_wait_seqno(struct intel_ring_buffer *ring, |
uint32_t seqno); |
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); |
int __must_check |
1205,6 → 1433,8 |
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, |
bool write); |
int __must_check |
i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write); |
int __must_check |
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, |
u32 alignment, |
struct intel_ring_buffer *pipelined); |
1225,21 → 1455,53 |
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, |
enum i915_cache_level cache_level); |
|
|
struct dma_buf *i915_gem_prime_export(struct drm_device *dev, |
struct drm_gem_object *gem_obj, int flags); |
|
/* i915_gem_context.c */ |
void i915_gem_context_init(struct drm_device *dev); |
void i915_gem_context_fini(struct drm_device *dev); |
void i915_gem_context_close(struct drm_device *dev, struct drm_file *file); |
int i915_switch_context(struct intel_ring_buffer *ring, |
struct drm_file *file, int to_id); |
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file); |
int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file); |
|
/* i915_gem_gtt.c */ |
int __must_check i915_gem_init_aliasing_ppgtt(struct drm_device *dev); |
void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev); |
void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, |
struct drm_i915_gem_object *obj, |
enum i915_cache_level cache_level); |
void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, |
struct drm_i915_gem_object *obj); |
|
void i915_gem_restore_gtt_mappings(struct drm_device *dev); |
int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj); |
void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj, |
int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj); |
void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj, |
enum i915_cache_level cache_level); |
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj); |
void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj); |
void i915_gem_init_global_gtt(struct drm_device *dev, |
unsigned long start, |
unsigned long mappable_end, |
unsigned long end); |
|
/* i915_gem_evict.c */ |
int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size, |
unsigned alignment, bool mappable); |
int __must_check i915_gem_evict_everything(struct drm_device *dev, |
bool purgeable_only); |
int __must_check i915_gem_evict_inactive(struct drm_device *dev, |
bool purgeable_only); |
unsigned alignment, |
unsigned cache_level, |
bool mappable, |
bool nonblock); |
int i915_gem_evict_everything(struct drm_device *dev); |
|
/* i915_gem_stolen.c */ |
int i915_gem_init_stolen(struct drm_device *dev); |
void i915_gem_cleanup_stolen(struct drm_device *dev); |
|
/* i915_gem_tiling.c */ |
void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); |
void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj); |
1270,9 → 1532,20 |
extern int i915_save_state(struct drm_device *dev); |
extern int i915_restore_state(struct drm_device *dev); |
|
/* i915_sysfs.c */ |
void i915_setup_sysfs(struct drm_device *dev_priv); |
void i915_teardown_sysfs(struct drm_device *dev_priv); |
|
/* intel_i2c.c */ |
extern int intel_setup_gmbus(struct drm_device *dev); |
extern void intel_teardown_gmbus(struct drm_device *dev); |
extern inline bool intel_gmbus_is_port_valid(unsigned port) |
{ |
return (port >= GMBUS_PORT_SSC && port <= GMBUS_PORT_DPD); |
} |
|
extern struct i2c_adapter *intel_gmbus_get_adapter( |
struct drm_i915_private *dev_priv, unsigned port); |
extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed); |
extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit); |
extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter) |
1307,23 → 1580,24 |
#endif /* CONFIG_ACPI */ |
|
/* modesetting */ |
extern void intel_modeset_init_hw(struct drm_device *dev); |
extern void intel_modeset_init(struct drm_device *dev); |
extern void intel_modeset_gem_init(struct drm_device *dev); |
extern void intel_modeset_cleanup(struct drm_device *dev); |
extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); |
extern void intel_modeset_setup_hw_state(struct drm_device *dev); |
extern bool intel_fbc_enabled(struct drm_device *dev); |
extern void intel_disable_fbc(struct drm_device *dev); |
extern bool ironlake_set_drps(struct drm_device *dev, u8 val); |
extern void ironlake_init_pch_refclk(struct drm_device *dev); |
extern void ironlake_enable_rc6(struct drm_device *dev); |
extern void gen6_set_rps(struct drm_device *dev, u8 val); |
extern void intel_detect_pch(struct drm_device *dev); |
extern int intel_trans_dp_port_sel(struct drm_crtc *crtc); |
extern int intel_enable_rc6(const struct drm_device *dev); |
|
extern void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv); |
extern void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv); |
extern void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv); |
extern void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv); |
extern bool i915_semaphore_is_enabled(struct drm_device *dev); |
int i915_reg_read_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file); |
|
/* overlay */ |
#ifdef CONFIG_DEBUG_FS |
1336,28 → 1610,6 |
struct intel_display_error_state *error); |
#endif |
|
#define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS]) |
|
#define BEGIN_LP_RING(n) \ |
intel_ring_begin(LP_RING(dev_priv), (n)) |
|
#define OUT_RING(x) \ |
intel_ring_emit(LP_RING(dev_priv), x) |
|
#define ADVANCE_LP_RING() \ |
intel_ring_advance(LP_RING(dev_priv)) |
|
/** |
* Lock test for when it's just for synchronization of ring access. |
* |
* In that case, we don't need to do it when GEM is initialized as nobody else |
* has access to the ring. |
*/ |
#define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \ |
if (LP_RING(dev->dev_private)->obj == NULL) \ |
LOCK_TEST_WITH_RETURN(dev, file); \ |
} while (0) |
|
/* On SNB platform, before reading ring registers forcewake bit |
* must be set to prevent GT core from power down and stale values being |
* returned. |
1364,14 → 1616,8 |
*/ |
void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv); |
void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv); |
void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv); |
int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv); |
|
/* We give fast paths for the really cool registers */ |
#define NEEDS_FORCE_WAKE(dev_priv, reg) \ |
(((dev_priv)->info->gen >= 6) && \ |
((reg) < 0x40000) && \ |
((reg) != FORCEWAKE)) |
|
#define __i915_read(x, y) \ |
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg); |
|
1426,6 → 1672,7 |
} |
|
|
#define ioread32(addr) readl(addr) |
|
|
|