30,6 → 30,8 |
#ifndef _I915_DRV_H_ |
#define _I915_DRV_H_ |
|
#include <uapi/drm/i915_drm.h> |
|
#include "i915_reg.h" |
#include "intel_bios.h" |
#include "intel_ringbuffer.h" |
96,7 → 98,12 |
}; |
#define port_name(p) ((p) + 'A') |
|
#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) |
#define I915_GEM_GPU_DOMAINS \ |
(I915_GEM_DOMAIN_RENDER | \ |
I915_GEM_DOMAIN_SAMPLER | \ |
I915_GEM_DOMAIN_COMMAND | \ |
I915_GEM_DOMAIN_INSTRUCTION | \ |
I915_GEM_DOMAIN_VERTEX) |
|
#define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++) |
|
114,6 → 121,19 |
}; |
#define I915_NUM_PLLS 2 |
|
/* Used by dp and fdi links */ |
struct intel_link_m_n { |
uint32_t tu; |
uint32_t gmch_m; |
uint32_t gmch_n; |
uint32_t link_m; |
uint32_t link_n; |
}; |
|
void intel_link_compute_m_n(int bpp, int nlanes, |
int pixel_clock, int link_clock, |
struct intel_link_m_n *m_n); |
|
struct intel_ddi_plls { |
int spll_refcount; |
int wrpll1_refcount; |
143,8 → 163,13 |
#define I915_GEM_PHYS_OVERLAY_REGS 3 |
#define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS) |
|
struct drm_i915_gem_phys_object { |
int id; |
struct page **page_list; |
drm_dma_handle_t *handle; |
struct drm_i915_gem_object *cur_obj; |
}; |
|
|
struct opregion_header; |
struct opregion_acpi; |
struct opregion_swsci; |
287,6 → 312,7 |
struct drm_i915_gem_object *obj); |
int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb, |
int x, int y); |
void (*hpd_irq_setup)(struct drm_device *dev); |
/* clock updates for mode set */ |
/* cursor updates */ |
/* render clock increase/decrease */ |
326,6 → 352,7 |
DEV_INFO_FLAG(has_llc) |
|
struct intel_device_info { |
u32 display_mmio_offset; |
u8 gen; |
u8 is_mobile:1; |
u8 is_i85x:1; |
353,6 → 380,50 |
u8 has_llc:1; |
}; |
|
enum i915_cache_level { |
I915_CACHE_NONE = 0, |
I915_CACHE_LLC, |
I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */ |
}; |
|
/* The Graphics Translation Table is the way in which GEN hardware translates a |
* Graphics Virtual Address into a Physical Address. In addition to the normal |
* collateral associated with any va->pa translations GEN hardware also has a |
* portion of the GTT which can be mapped by the CPU and remain both coherent |
* and correct (in cases like swizzling). That region is referred to as GMADR in |
* the spec. |
*/ |
struct i915_gtt { |
unsigned long start; /* Start offset of used GTT */ |
size_t total; /* Total size GTT can map */ |
size_t stolen_size; /* Total size of stolen memory */ |
|
unsigned long mappable_end; /* End offset that we can CPU map */ |
struct io_mapping *mappable; /* Mapping to our CPU mappable region */ |
phys_addr_t mappable_base; /* PA of our GMADR */ |
|
/** "Graphics Stolen Memory" holds the global PTEs */ |
void __iomem *gsm; |
|
bool do_idle_maps; |
dma_addr_t scratch_page_dma; |
struct page *scratch_page; |
|
/* global gtt ops */ |
int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total, |
size_t *stolen, phys_addr_t *mappable_base, |
unsigned long *mappable_end); |
void (*gtt_remove)(struct drm_device *dev); |
void (*gtt_clear_range)(struct drm_device *dev, |
unsigned int first_entry, |
unsigned int num_entries); |
void (*gtt_insert_entries)(struct drm_device *dev, |
struct sg_table *st, |
unsigned int pg_start, |
enum i915_cache_level cache_level); |
}; |
#define gtt_total_entries(gtt) ((gtt).total >> PAGE_SHIFT) |
|
#define I915_PPGTT_PD_ENTRIES 512 |
#define I915_PPGTT_PT_ENTRIES 1024 |
struct i915_hw_ppgtt { |
362,6 → 433,16 |
uint32_t pd_offset; |
dma_addr_t *pt_dma_addr; |
dma_addr_t scratch_page_dma_addr; |
|
/* pte functions, mirroring the interface of the global gtt. */ |
void (*clear_range)(struct i915_hw_ppgtt *ppgtt, |
unsigned int first_entry, |
unsigned int num_entries); |
void (*insert_entries)(struct i915_hw_ppgtt *ppgtt, |
struct sg_table *st, |
unsigned int pg_start, |
enum i915_cache_level cache_level); |
void (*cleanup)(struct i915_hw_ppgtt *ppgtt); |
}; |
|
|
588,6 → 669,9 |
struct mutex hw_lock; |
}; |
|
/* defined intel_pm.c */ |
extern spinlock_t mchdev_lock; |
|
struct intel_ilk_power_mgmt { |
u8 cur_delay; |
u8 min_delay; |
628,6 → 712,158 |
struct work_struct error_work; |
}; |
|
struct i915_gem_mm { |
/** Memory allocator for GTT stolen memory */ |
struct drm_mm stolen; |
/** Memory allocator for GTT */ |
struct drm_mm gtt_space; |
/** List of all objects in gtt_space. Used to restore gtt |
* mappings on resume */ |
struct list_head bound_list; |
/** |
* List of objects which are not bound to the GTT (thus |
* are idle and not used by the GPU) but still have |
* (presumably uncached) pages still attached. |
*/ |
struct list_head unbound_list; |
|
/** Usable portion of the GTT for GEM */ |
unsigned long stolen_base; /* limited to low memory (32-bit) */ |
|
int gtt_mtrr; |
|
/** PPGTT used for aliasing the PPGTT with the GTT */ |
struct i915_hw_ppgtt *aliasing_ppgtt; |
|
bool shrinker_no_lock_stealing; |
|
/** |
* List of objects currently involved in rendering. |
* |
* Includes buffers having the contents of their GPU caches |
* flushed, not necessarily primitives. last_rendering_seqno |
* represents when the rendering involved will be completed. |
* |
* A reference is held on the buffer while on this list. |
*/ |
struct list_head active_list; |
|
/** |
* LRU list of objects which are not in the ringbuffer and |
* are ready to unbind, but are still in the GTT. |
* |
* last_rendering_seqno is 0 while an object is in this list. |
* |
* A reference is not held on the buffer while on this list, |
* as merely being GTT-bound shouldn't prevent its being |
* freed, and we'll pull it off the list in the free path. |
*/ |
struct list_head inactive_list; |
|
/** LRU list of objects with fence regs on them. */ |
struct list_head fence_list; |
|
/** |
* We leave the user IRQ off as much as possible, |
* but this means that requests will finish and never |
* be retired once the system goes idle. Set a timer to |
* fire periodically while the ring is running. When it |
* fires, go retire requests. |
*/ |
struct delayed_work retire_work; |
|
/** |
* Are we in a non-interruptible section of code like |
* modesetting? |
*/ |
bool interruptible; |
|
/** |
* Flag if the X Server, and thus DRM, is not currently in |
* control of the device. |
* |
* This is set between LeaveVT and EnterVT. It needs to be |
* replaced with a semaphore. It also needs to be |
* transitioned away from for kernel modesetting. |
*/ |
int suspended; |
|
/** Bit 6 swizzling required for X tiling */ |
uint32_t bit_6_swizzle_x; |
/** Bit 6 swizzling required for Y tiling */ |
uint32_t bit_6_swizzle_y; |
|
/* storage for physical objects */ |
struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT]; |
|
/* accounting, useful for userland debugging */ |
size_t object_memory; |
u32 object_count; |
}; |
|
struct i915_gpu_error { |
/* For hangcheck timer */ |
#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ |
#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD) |
struct timer_list hangcheck_timer; |
int hangcheck_count; |
uint32_t last_acthd[I915_NUM_RINGS]; |
uint32_t prev_instdone[I915_NUM_INSTDONE_REG]; |
|
/* For reset and error_state handling. */ |
spinlock_t lock; |
/* Protected by the above dev->gpu_error.lock. */ |
struct drm_i915_error_state *first_error; |
struct work_struct work; |
|
unsigned long last_reset; |
|
/** |
* State variable and reset counter controlling the reset flow |
* |
* Upper bits are for the reset counter. This counter is used by the |
* wait_seqno code to race-free noticed that a reset event happened and |
* that it needs to restart the entire ioctl (since most likely the |
* seqno it waited for won't ever signal anytime soon). |
* |
* This is important for lock-free wait paths, where no contended lock |
* naturally enforces the correct ordering between the bail-out of the |
* waiter and the gpu reset work code. |
* |
* Lowest bit controls the reset state machine: Set means a reset is in |
* progress. This state will (presuming we don't have any bugs) decay |
* into either unset (successful reset) or the special WEDGED value (hw |
* terminally sour). All waiters on the reset_queue will be woken when |
* that happens. |
*/ |
atomic_t reset_counter; |
|
/** |
* Special values/flags for reset_counter |
* |
* Note that the code relies on |
* I915_WEDGED & I915_RESET_IN_PROGRESS_FLAG |
* being true. |
*/ |
#define I915_RESET_IN_PROGRESS_FLAG 1 |
#define I915_WEDGED 0xffffffff |
|
/** |
* Waitqueue to signal when the reset has completed. Used by clients |
* that wait for dev_priv->mm.wedged to settle. |
*/ |
wait_queue_head_t reset_queue; |
|
/* For gpu hang simulation. */ |
unsigned int stop_rings; |
}; |
|
enum modeset_restore { |
MODESET_ON_LID_OPEN, |
MODESET_DONE, |
MODESET_SUSPENDED, |
}; |
|
typedef struct drm_i915_private { |
struct drm_device *dev; |
|
644,10 → 880,11 |
/** forcewake_count is protected by gt_lock */ |
unsigned forcewake_count; |
/** gt_lock is also taken in irq contexts. */ |
struct spinlock gt_lock; |
spinlock_t gt_lock; |
|
struct intel_gmbus gmbus[GMBUS_NUM_PORTS]; |
|
|
/** gmbus_mutex protects against concurrent usage of the single hw gmbus |
* controller on different i2c buses. */ |
struct mutex gmbus_mutex; |
657,9 → 894,11 |
*/ |
uint32_t gpio_mmio_base; |
|
wait_queue_head_t gmbus_wait_queue; |
|
struct pci_dev *bridge_dev; |
struct intel_ring_buffer ring[I915_NUM_RINGS]; |
uint32_t next_seqno; |
uint32_t last_seqno, next_seqno; |
|
drm_dma_handle_t *status_page_dmah; |
struct resource mch_res; |
669,31 → 908,24 |
/* protects the irq masks */ |
spinlock_t irq_lock; |
|
/* To control wakeup latency, e.g. for irq-driven dp aux transfers. */ |
// struct pm_qos_request pm_qos; |
|
/* DPIO indirect register protection */ |
spinlock_t dpio_lock; |
struct mutex dpio_lock; |
|
/** Cached value of IMR to avoid reads in updating the bitfield */ |
u32 pipestat[2]; |
u32 irq_mask; |
u32 gt_irq_mask; |
u32 pch_irq_mask; |
|
u32 hotplug_supported_mask; |
struct work_struct hotplug_work; |
bool enable_hotplug_processing; |
|
int num_pipe; |
int num_pch_pll; |
|
/* For hangcheck timer */ |
#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ |
#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD) |
struct timer_list hangcheck_timer; |
int hangcheck_count; |
uint32_t last_acthd[I915_NUM_RINGS]; |
uint32_t prev_instdone[I915_NUM_INSTDONE_REG]; |
|
unsigned int stop_rings; |
|
unsigned long cfb_size; |
unsigned int cfb_fb; |
enum plane cfb_plane; |
704,7 → 936,7 |
|
/* overlay */ |
struct intel_overlay *overlay; |
bool sprite_scaling_enabled; |
unsigned int sprite_scaling_enabled; |
|
/* LVDS info */ |
int backlight_level; /* restore backlight to this value */ |
721,7 → 953,6 |
unsigned int display_clock_mode:1; |
int lvds_ssc_freq; |
unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */ |
unsigned int lvds_val; /* used for checking LVDS channel mode */ |
struct { |
int rate; |
int lanes; |
742,11 → 973,6 |
|
unsigned int fsb_freq, mem_freq, is_ddr3; |
|
spinlock_t error_lock; |
/* Protected by dev->error_lock. */ |
struct drm_i915_error_state *first_error; |
struct work_struct error_work; |
struct completion error_completion; |
struct workqueue_struct *wq; |
|
/* Display functions */ |
758,116 → 984,13 |
|
unsigned long quirks; |
|
/* Register state */ |
bool modeset_on_lid; |
enum modeset_restore modeset_restore; |
struct mutex modeset_restore_lock; |
|
struct { |
/** Bridge to intel-gtt-ko */ |
struct intel_gtt *gtt; |
/** Memory allocator for GTT stolen memory */ |
struct drm_mm stolen; |
/** Memory allocator for GTT */ |
struct drm_mm gtt_space; |
/** List of all objects in gtt_space. Used to restore gtt |
* mappings on resume */ |
struct list_head bound_list; |
/** |
* List of objects which are not bound to the GTT (thus |
* are idle and not used by the GPU) but still have |
* (presumably uncached) pages still attached. |
*/ |
struct list_head unbound_list; |
struct i915_gtt gtt; |
|
/** Usable portion of the GTT for GEM */ |
unsigned long gtt_start; |
unsigned long gtt_mappable_end; |
unsigned long gtt_end; |
struct i915_gem_mm mm; |
|
// struct io_mapping *gtt_mapping; |
phys_addr_t gtt_base_addr; |
int gtt_mtrr; |
|
/** PPGTT used for aliasing the PPGTT with the GTT */ |
struct i915_hw_ppgtt *aliasing_ppgtt; |
|
// struct shrinker inactive_shrinker; |
bool shrinker_no_lock_stealing; |
|
/** |
* List of objects currently involved in rendering. |
* |
* Includes buffers having the contents of their GPU caches |
* flushed, not necessarily primitives. last_rendering_seqno |
* represents when the rendering involved will be completed. |
* |
* A reference is held on the buffer while on this list. |
*/ |
struct list_head active_list; |
|
/** |
* LRU list of objects which are not in the ringbuffer and |
* are ready to unbind, but are still in the GTT. |
* |
* last_rendering_seqno is 0 while an object is in this list. |
* |
* A reference is not held on the buffer while on this list, |
* as merely being GTT-bound shouldn't prevent its being |
* freed, and we'll pull it off the list in the free path. |
*/ |
struct list_head inactive_list; |
|
/** LRU list of objects with fence regs on them. */ |
struct list_head fence_list; |
|
/** |
* We leave the user IRQ off as much as possible, |
* but this means that requests will finish and never |
* be retired once the system goes idle. Set a timer to |
* fire periodically while the ring is running. When it |
* fires, go retire requests. |
*/ |
struct delayed_work retire_work; |
|
/** |
* Are we in a non-interruptible section of code like |
* modesetting? |
*/ |
bool interruptible; |
|
/** |
* Flag if the X Server, and thus DRM, is not currently in |
* control of the device. |
* |
* This is set between LeaveVT and EnterVT. It needs to be |
* replaced with a semaphore. It also needs to be |
* transitioned away from for kernel modesetting. |
*/ |
int suspended; |
|
/** |
* Flag if the hardware appears to be wedged. |
* |
* This is set when attempts to idle the device timeout. |
* It prevents command submission from occurring and makes |
* every pending request fail |
*/ |
atomic_t wedged; |
|
/** Bit 6 swizzling required for X tiling */ |
uint32_t bit_6_swizzle_x; |
/** Bit 6 swizzling required for Y tiling */ |
uint32_t bit_6_swizzle_y; |
|
/* storage for physical objects */ |
// struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT]; |
|
/* accounting, useful for userland debugging */ |
size_t gtt_total; |
size_t mappable_gtt_total; |
size_t object_memory; |
u32 object_count; |
} mm; |
|
/* Kernel Modesetting */ |
|
struct sdvo_device_mapping sdvo_mappings[2]; |
908,7 → 1031,7 |
struct drm_mm_node *compressed_fb; |
struct drm_mm_node *compressed_llb; |
|
unsigned long last_gpu_reset; |
struct i915_gpu_error gpu_error; |
|
/* list of fbdev register on this device */ |
struct intel_fbdev *fbdev; |
927,7 → 1050,7 |
bool hw_contexts_disabled; |
uint32_t hw_context_size; |
|
bool fdi_rx_polarity_reversed; |
u32 fdi_rx_config; |
|
struct i915_suspend_saved_registers regfile; |
|
948,11 → 1071,7 |
HDMI_AUDIO_ON, /* force turn on HDMI audio */ |
}; |
|
enum i915_cache_level { |
I915_CACHE_NONE = 0, |
I915_CACHE_LLC, |
I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */ |
}; |
#define I915_GTT_RESERVED ((struct drm_mm_node *)0x1) |
|
struct drm_i915_gem_object_ops { |
/* Interface between the GEM object and its backing storage. |
977,10 → 1096,10 |
|
const struct drm_i915_gem_object_ops *ops; |
|
// void *mapped; |
|
/** Current space allocated to this object in the GTT, if any. */ |
struct drm_mm_node *gtt_space; |
/** Stolen memory for this object, instead of being backed by shmem. */ |
struct drm_mm_node *stolen; |
struct list_head gtt_list; |
|
/** This object's place on the active/inactive lists */ |
1065,7 → 1184,6 |
unsigned int has_global_gtt_mapping:1; |
unsigned int has_dma_mapping:1; |
|
// dma_addr_t *allocated_pages; |
struct sg_table *pages; |
int pages_pin_count; |
|
1107,13 → 1225,6 |
|
/** for phy allocated objects */ |
struct drm_i915_gem_phys_object *phys_obj; |
|
/** |
* Number of crtcs where this object is currently the fb, but |
* will be page flipped away on the next vblank. When it |
* reaches 0, dev_priv->pending_flip_queue will be woken up. |
*/ |
atomic_t pending_flip; |
}; |
#define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base) |
|
1152,7 → 1263,7 |
|
struct drm_i915_file_private { |
struct { |
struct spinlock lock; |
spinlock_t lock; |
struct list_head request_list; |
} mm; |
struct idr context_idr; |
1238,6 → 1349,8 |
|
#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5) |
|
#define HAS_DDI(dev) (IS_HASWELL(dev)) |
|
#define INTEL_PCH_DEVICE_ID_MASK 0xff00 |
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 |
#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 |
1293,6 → 1406,7 |
extern bool i915_enable_hangcheck __read_mostly; |
extern int i915_enable_ppgtt __read_mostly; |
extern unsigned int i915_preliminary_hw_support __read_mostly; |
extern int i915_disable_power_well __read_mostly; |
|
extern int i915_master_create(struct drm_device *dev, struct drm_master *master); |
extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master); |
1329,6 → 1443,7 |
void i915_handle_error(struct drm_device *dev, bool wedged); |
|
extern void intel_irq_init(struct drm_device *dev); |
extern void intel_hpd_init(struct drm_device *dev); |
extern void intel_gt_init(struct drm_device *dev); |
extern void intel_gt_reset(struct drm_device *dev); |
|
1397,6 → 1512,8 |
int i915_gem_wait_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
void i915_gem_load(struct drm_device *dev); |
void *i915_gem_object_alloc(struct drm_device *dev); |
void i915_gem_object_free(struct drm_i915_gem_object *obj); |
int i915_gem_init_object(struct drm_gem_object *obj); |
void i915_gem_object_init(struct drm_i915_gem_object *obj, |
const struct drm_i915_gem_object_ops *ops); |
1403,6 → 1520,7 |
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, |
size_t size); |
void i915_gem_free_object(struct drm_gem_object *obj); |
|
int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, |
uint32_t alignment, |
bool map_and_fenceable, |
1409,6 → 1527,7 |
bool nonblocking); |
void i915_gem_object_unpin(struct drm_i915_gem_object *obj); |
int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj); |
int i915_gem_object_put_pages(struct drm_i915_gem_object *obj); |
void i915_gem_release_mmap(struct drm_i915_gem_object *obj); |
void i915_gem_lastclose(struct drm_device *dev); |
|
1460,8 → 1579,8 |
return (int32_t)(seq1 - seq2) >= 0; |
} |
|
extern int i915_gem_get_seqno(struct drm_device *dev, u32 *seqno); |
|
int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno); |
int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno); |
int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj); |
int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj); |
|
1487,9 → 1606,19 |
|
void i915_gem_retire_requests(struct drm_device *dev); |
void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring); |
int __must_check i915_gem_check_wedge(struct drm_i915_private *dev_priv, |
int __must_check i915_gem_check_wedge(struct i915_gpu_error *error, |
bool interruptible); |
static inline bool i915_reset_in_progress(struct i915_gpu_error *error) |
{ |
return unlikely(atomic_read(&error->reset_counter) |
& I915_RESET_IN_PROGRESS_FLAG); |
} |
|
static inline bool i915_terminally_wedged(struct i915_gpu_error *error) |
{ |
return atomic_read(&error->reset_counter) == I915_WEDGED; |
} |
|
void i915_gem_reset(struct drm_device *dev); |
void i915_gem_clflush_object(struct drm_i915_gem_object *obj); |
int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj, |
1529,9 → 1658,10 |
void i915_gem_release(struct drm_device *dev, struct drm_file *file); |
|
uint32_t |
i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev, |
uint32_t size, |
int tiling_mode); |
i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode); |
uint32_t |
i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size, |
int tiling_mode, bool fenced); |
|
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, |
enum i915_cache_level cache_level); |
1552,7 → 1682,6 |
struct drm_file *file); |
|
/* i915_gem_gtt.c */ |
int __must_check i915_gem_init_aliasing_ppgtt(struct drm_device *dev); |
void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev); |
void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, |
struct drm_i915_gem_object *obj, |
1566,12 → 1695,10 |
enum i915_cache_level cache_level); |
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj); |
void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj); |
void i915_gem_init_global_gtt(struct drm_device *dev, |
unsigned long start, |
unsigned long mappable_end, |
unsigned long end); |
void i915_gem_init_global_gtt(struct drm_device *dev); |
void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start, |
unsigned long mappable_end, unsigned long end); |
int i915_gem_gtt_init(struct drm_device *dev); |
void i915_gem_gtt_fini(struct drm_device *dev); |
static inline void i915_gem_chipset_flush(struct drm_device *dev) |
{ |
if (INTEL_INFO(dev)->gen < 6) |
1589,9 → 1716,22 |
|
/* i915_gem_stolen.c */ |
int i915_gem_init_stolen(struct drm_device *dev); |
int i915_gem_stolen_setup_compression(struct drm_device *dev, int size); |
void i915_gem_stolen_cleanup_compression(struct drm_device *dev); |
void i915_gem_cleanup_stolen(struct drm_device *dev); |
struct drm_i915_gem_object * |
i915_gem_object_create_stolen(struct drm_device *dev, u32 size); |
void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj); |
|
/* i915_gem_tiling.c */ |
inline static bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) |
{ |
drm_i915_private_t *dev_priv = obj->base.dev->dev_private; |
|
return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && |
obj->tiling_mode != I915_TILING_NONE; |
} |
|
void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); |
void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj); |
void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj); |
1617,9 → 1757,9 |
extern int i915_save_state(struct drm_device *dev); |
extern int i915_restore_state(struct drm_device *dev); |
|
/* i915_suspend.c */ |
extern int i915_save_state(struct drm_device *dev); |
extern int i915_restore_state(struct drm_device *dev); |
/* i915_ums.c */ |
void i915_save_display_reg(struct drm_device *dev); |
void i915_restore_display_reg(struct drm_device *dev); |
|
/* i915_sysfs.c */ |
void i915_setup_sysfs(struct drm_device *dev_priv); |
1676,6 → 1816,7 |
extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); |
extern void intel_modeset_setup_hw_state(struct drm_device *dev, |
bool force_restore); |
extern void i915_redisable_vga(struct drm_device *dev); |
extern bool intel_fbc_enabled(struct drm_device *dev); |
extern void intel_disable_fbc(struct drm_device *dev); |
extern bool ironlake_set_drps(struct drm_device *dev, u8 val); |
1748,6 → 1889,21 |
#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) |
#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) |
|
/* "Broadcast RGB" property */ |
#define INTEL_BROADCAST_RGB_AUTO 0 |
#define INTEL_BROADCAST_RGB_FULL 1 |
#define INTEL_BROADCAST_RGB_LIMITED 2 |
|
static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev) |
{ |
if (HAS_PCH_SPLIT(dev)) |
return CPU_VGACNTRL; |
else if (IS_VALLEYVIEW(dev)) |
return VLV_VGACNTRL; |
else |
return VGACNTRL; |
} |
|
typedef struct |
{ |
int width; |