Rev 3243 | Rev 3746 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 3243 | Rev 3480 | ||
---|---|---|---|
Line 28... | Line 28... | ||
28 | */ |
28 | */ |
Line 29... | Line 29... | ||
29 | 29 | ||
30 | #ifndef _I915_DRV_H_ |
30 | #ifndef _I915_DRV_H_ |
Line -... | Line 31... | ||
- | 31 | #define _I915_DRV_H_ |
|
- | 32 | ||
31 | #define _I915_DRV_H_ |
33 | #include |
32 | 34 | ||
33 | #include "i915_reg.h" |
35 | #include "i915_reg.h" |
34 | #include "intel_bios.h" |
36 | #include "intel_bios.h" |
35 | #include "intel_ringbuffer.h" |
37 | #include "intel_ringbuffer.h" |
Line 94... | Line 96... | ||
94 | PORT_E, |
96 | PORT_E, |
95 | I915_MAX_PORTS |
97 | I915_MAX_PORTS |
96 | }; |
98 | }; |
97 | #define port_name(p) ((p) + 'A') |
99 | #define port_name(p) ((p) + 'A') |
Line 98... | Line 100... | ||
98 | 100 | ||
- | 101 | #define I915_GEM_GPU_DOMAINS \ |
|
- | 102 | (I915_GEM_DOMAIN_RENDER | \ |
|
- | 103 | I915_GEM_DOMAIN_SAMPLER | \ |
|
- | 104 | I915_GEM_DOMAIN_COMMAND | \ |
|
- | 105 | I915_GEM_DOMAIN_INSTRUCTION | \ |
|
Line 99... | Line 106... | ||
99 | #define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) |
106 | I915_GEM_DOMAIN_VERTEX) |
Line 100... | Line 107... | ||
100 | 107 | ||
101 | #define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++) |
108 | #define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++) |
Line 112... | Line 119... | ||
112 | int fp0_reg; |
119 | int fp0_reg; |
113 | int fp1_reg; |
120 | int fp1_reg; |
114 | }; |
121 | }; |
115 | #define I915_NUM_PLLS 2 |
122 | #define I915_NUM_PLLS 2 |
Line -... | Line 123... | ||
- | 123 | ||
- | 124 | /* Used by dp and fdi links */ |
|
- | 125 | struct intel_link_m_n { |
|
- | 126 | uint32_t tu; |
|
- | 127 | uint32_t gmch_m; |
|
- | 128 | uint32_t gmch_n; |
|
- | 129 | uint32_t link_m; |
|
- | 130 | uint32_t link_n; |
|
- | 131 | }; |
|
- | 132 | ||
- | 133 | void intel_link_compute_m_n(int bpp, int nlanes, |
|
- | 134 | int pixel_clock, int link_clock, |
|
- | 135 | struct intel_link_m_n *m_n); |
|
116 | 136 | ||
117 | struct intel_ddi_plls { |
137 | struct intel_ddi_plls { |
118 | int spll_refcount; |
138 | int spll_refcount; |
119 | int wrpll1_refcount; |
139 | int wrpll1_refcount; |
120 | int wrpll2_refcount; |
140 | int wrpll2_refcount; |
Line 141... | Line 161... | ||
141 | #define I915_GEM_PHYS_CURSOR_0 1 |
161 | #define I915_GEM_PHYS_CURSOR_0 1 |
142 | #define I915_GEM_PHYS_CURSOR_1 2 |
162 | #define I915_GEM_PHYS_CURSOR_1 2 |
143 | #define I915_GEM_PHYS_OVERLAY_REGS 3 |
163 | #define I915_GEM_PHYS_OVERLAY_REGS 3 |
144 | #define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS) |
164 | #define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS) |
Line -... | Line 165... | ||
- | 165 | ||
- | 166 | struct drm_i915_gem_phys_object { |
|
- | 167 | int id; |
|
- | 168 | struct page **page_list; |
|
- | 169 | drm_dma_handle_t *handle; |
|
145 | 170 | struct drm_i915_gem_object *cur_obj; |
|
Line 146... | Line 171... | ||
146 | 171 | }; |
|
147 | 172 | ||
148 | struct opregion_header; |
173 | struct opregion_header; |
149 | struct opregion_acpi; |
174 | struct opregion_acpi; |
Line 285... | Line 310... | ||
285 | int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc, |
310 | int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc, |
286 | struct drm_framebuffer *fb, |
311 | struct drm_framebuffer *fb, |
287 | struct drm_i915_gem_object *obj); |
312 | struct drm_i915_gem_object *obj); |
288 | int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb, |
313 | int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb, |
289 | int x, int y); |
314 | int x, int y); |
- | 315 | void (*hpd_irq_setup)(struct drm_device *dev); |
|
290 | /* clock updates for mode set */ |
316 | /* clock updates for mode set */ |
291 | /* cursor updates */ |
317 | /* cursor updates */ |
292 | /* render clock increase/decrease */ |
318 | /* render clock increase/decrease */ |
293 | /* display clock increase/decrease */ |
319 | /* display clock increase/decrease */ |
294 | /* pll clock increase/decrease */ |
320 | /* pll clock increase/decrease */ |
Line 324... | Line 350... | ||
324 | DEV_INFO_FLAG(has_bsd_ring) DEV_INFO_SEP \ |
350 | DEV_INFO_FLAG(has_bsd_ring) DEV_INFO_SEP \ |
325 | DEV_INFO_FLAG(has_blt_ring) DEV_INFO_SEP \ |
351 | DEV_INFO_FLAG(has_blt_ring) DEV_INFO_SEP \ |
326 | DEV_INFO_FLAG(has_llc) |
352 | DEV_INFO_FLAG(has_llc) |
Line 327... | Line 353... | ||
327 | 353 | ||
- | 354 | struct intel_device_info { |
|
328 | struct intel_device_info { |
355 | u32 display_mmio_offset; |
329 | u8 gen; |
356 | u8 gen; |
330 | u8 is_mobile:1; |
357 | u8 is_mobile:1; |
331 | u8 is_i85x:1; |
358 | u8 is_i85x:1; |
332 | u8 is_i915g:1; |
359 | u8 is_i915g:1; |
Line 351... | Line 378... | ||
351 | u8 has_bsd_ring:1; |
378 | u8 has_bsd_ring:1; |
352 | u8 has_blt_ring:1; |
379 | u8 has_blt_ring:1; |
353 | u8 has_llc:1; |
380 | u8 has_llc:1; |
354 | }; |
381 | }; |
Line -... | Line 382... | ||
- | 382 | ||
- | 383 | enum i915_cache_level { |
|
- | 384 | I915_CACHE_NONE = 0, |
|
- | 385 | I915_CACHE_LLC, |
|
- | 386 | I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */ |
|
- | 387 | }; |
|
- | 388 | ||
- | 389 | /* The Graphics Translation Table is the way in which GEN hardware translates a |
|
- | 390 | * Graphics Virtual Address into a Physical Address. In addition to the normal |
|
- | 391 | * collateral associated with any va->pa translations GEN hardware also has a |
|
- | 392 | * portion of the GTT which can be mapped by the CPU and remain both coherent |
|
- | 393 | * and correct (in cases like swizzling). That region is referred to as GMADR in |
|
- | 394 | * the spec. |
|
- | 395 | */ |
|
- | 396 | struct i915_gtt { |
|
- | 397 | unsigned long start; /* Start offset of used GTT */ |
|
- | 398 | size_t total; /* Total size GTT can map */ |
|
- | 399 | size_t stolen_size; /* Total size of stolen memory */ |
|
- | 400 | ||
- | 401 | unsigned long mappable_end; /* End offset that we can CPU map */ |
|
- | 402 | struct io_mapping *mappable; /* Mapping to our CPU mappable region */ |
|
- | 403 | phys_addr_t mappable_base; /* PA of our GMADR */ |
|
- | 404 | ||
- | 405 | /** "Graphics Stolen Memory" holds the global PTEs */ |
|
- | 406 | void __iomem *gsm; |
|
- | 407 | ||
- | 408 | bool do_idle_maps; |
|
- | 409 | dma_addr_t scratch_page_dma; |
|
- | 410 | struct page *scratch_page; |
|
- | 411 | ||
- | 412 | /* global gtt ops */ |
|
- | 413 | int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total, |
|
- | 414 | size_t *stolen, phys_addr_t *mappable_base, |
|
- | 415 | unsigned long *mappable_end); |
|
- | 416 | void (*gtt_remove)(struct drm_device *dev); |
|
- | 417 | void (*gtt_clear_range)(struct drm_device *dev, |
|
- | 418 | unsigned int first_entry, |
|
- | 419 | unsigned int num_entries); |
|
- | 420 | void (*gtt_insert_entries)(struct drm_device *dev, |
|
- | 421 | struct sg_table *st, |
|
- | 422 | unsigned int pg_start, |
|
- | 423 | enum i915_cache_level cache_level); |
|
- | 424 | }; |
|
- | 425 | #define gtt_total_entries(gtt) ((gtt).total >> PAGE_SHIFT) |
|
355 | 426 | ||
356 | #define I915_PPGTT_PD_ENTRIES 512 |
427 | #define I915_PPGTT_PD_ENTRIES 512 |
357 | #define I915_PPGTT_PT_ENTRIES 1024 |
428 | #define I915_PPGTT_PT_ENTRIES 1024 |
358 | struct i915_hw_ppgtt { |
429 | struct i915_hw_ppgtt { |
359 | struct drm_device *dev; |
430 | struct drm_device *dev; |
360 | unsigned num_pd_entries; |
431 | unsigned num_pd_entries; |
361 | struct page **pt_pages; |
432 | struct page **pt_pages; |
362 | uint32_t pd_offset; |
433 | uint32_t pd_offset; |
363 | dma_addr_t *pt_dma_addr; |
434 | dma_addr_t *pt_dma_addr; |
- | 435 | dma_addr_t scratch_page_dma_addr; |
|
- | 436 | ||
- | 437 | /* pte functions, mirroring the interface of the global gtt. */ |
|
- | 438 | void (*clear_range)(struct i915_hw_ppgtt *ppgtt, |
|
- | 439 | unsigned int first_entry, |
|
- | 440 | unsigned int num_entries); |
|
- | 441 | void (*insert_entries)(struct i915_hw_ppgtt *ppgtt, |
|
- | 442 | struct sg_table *st, |
|
- | 443 | unsigned int pg_start, |
|
- | 444 | enum i915_cache_level cache_level); |
|
364 | dma_addr_t scratch_page_dma_addr; |
445 | void (*cleanup)(struct i915_hw_ppgtt *ppgtt); |
Line 365... | Line 446... | ||
365 | }; |
446 | }; |
366 | 447 | ||
Line 586... | Line 667... | ||
586 | * Must be taken after struct_mutex if nested. |
667 | * Must be taken after struct_mutex if nested. |
587 | */ |
668 | */ |
588 | struct mutex hw_lock; |
669 | struct mutex hw_lock; |
589 | }; |
670 | }; |
Line -... | Line 671... | ||
- | 671 | ||
- | 672 | /* defined intel_pm.c */ |
|
- | 673 | extern spinlock_t mchdev_lock; |
|
590 | 674 | ||
591 | struct intel_ilk_power_mgmt { |
675 | struct intel_ilk_power_mgmt { |
592 | u8 cur_delay; |
676 | u8 cur_delay; |
593 | u8 min_delay; |
677 | u8 min_delay; |
594 | u8 max_delay; |
678 | u8 max_delay; |
Line 626... | Line 710... | ||
626 | struct intel_l3_parity { |
710 | struct intel_l3_parity { |
627 | u32 *remap_info; |
711 | u32 *remap_info; |
628 | struct work_struct error_work; |
712 | struct work_struct error_work; |
629 | }; |
713 | }; |
Line -... | Line 714... | ||
- | 714 | ||
- | 715 | struct i915_gem_mm { |
|
- | 716 | /** Memory allocator for GTT stolen memory */ |
|
- | 717 | struct drm_mm stolen; |
|
- | 718 | /** Memory allocator for GTT */ |
|
- | 719 | struct drm_mm gtt_space; |
|
- | 720 | /** List of all objects in gtt_space. Used to restore gtt |
|
- | 721 | * mappings on resume */ |
|
- | 722 | struct list_head bound_list; |
|
- | 723 | /** |
|
- | 724 | * List of objects which are not bound to the GTT (thus |
|
- | 725 | * are idle and not used by the GPU) but still have |
|
- | 726 | * (presumably uncached) pages still attached. |
|
- | 727 | */ |
|
- | 728 | struct list_head unbound_list; |
|
- | 729 | ||
- | 730 | /** Usable portion of the GTT for GEM */ |
|
- | 731 | unsigned long stolen_base; /* limited to low memory (32-bit) */ |
|
- | 732 | ||
- | 733 | int gtt_mtrr; |
|
- | 734 | ||
- | 735 | /** PPGTT used for aliasing the PPGTT with the GTT */ |
|
- | 736 | struct i915_hw_ppgtt *aliasing_ppgtt; |
|
- | 737 | ||
- | 738 | bool shrinker_no_lock_stealing; |
|
- | 739 | ||
- | 740 | /** |
|
- | 741 | * List of objects currently involved in rendering. |
|
- | 742 | * |
|
- | 743 | * Includes buffers having the contents of their GPU caches |
|
- | 744 | * flushed, not necessarily primitives. last_rendering_seqno |
|
- | 745 | * represents when the rendering involved will be completed. |
|
- | 746 | * |
|
- | 747 | * A reference is held on the buffer while on this list. |
|
- | 748 | */ |
|
- | 749 | struct list_head active_list; |
|
- | 750 | ||
- | 751 | /** |
|
- | 752 | * LRU list of objects which are not in the ringbuffer and |
|
- | 753 | * are ready to unbind, but are still in the GTT. |
|
- | 754 | * |
|
- | 755 | * last_rendering_seqno is 0 while an object is in this list. |
|
- | 756 | * |
|
- | 757 | * A reference is not held on the buffer while on this list, |
|
- | 758 | * as merely being GTT-bound shouldn't prevent its being |
|
- | 759 | * freed, and we'll pull it off the list in the free path. |
|
- | 760 | */ |
|
- | 761 | struct list_head inactive_list; |
|
- | 762 | ||
- | 763 | /** LRU list of objects with fence regs on them. */ |
|
- | 764 | struct list_head fence_list; |
|
- | 765 | ||
- | 766 | /** |
|
- | 767 | * We leave the user IRQ off as much as possible, |
|
- | 768 | * but this means that requests will finish and never |
|
- | 769 | * be retired once the system goes idle. Set a timer to |
|
- | 770 | * fire periodically while the ring is running. When it |
|
- | 771 | * fires, go retire requests. |
|
- | 772 | */ |
|
- | 773 | struct delayed_work retire_work; |
|
- | 774 | ||
- | 775 | /** |
|
- | 776 | * Are we in a non-interruptible section of code like |
|
- | 777 | * modesetting? |
|
- | 778 | */ |
|
- | 779 | bool interruptible; |
|
- | 780 | ||
- | 781 | /** |
|
- | 782 | * Flag if the X Server, and thus DRM, is not currently in |
|
- | 783 | * control of the device. |
|
- | 784 | * |
|
- | 785 | * This is set between LeaveVT and EnterVT. It needs to be |
|
- | 786 | * replaced with a semaphore. It also needs to be |
|
- | 787 | * transitioned away from for kernel modesetting. |
|
- | 788 | */ |
|
- | 789 | int suspended; |
|
- | 790 | ||
- | 791 | /** Bit 6 swizzling required for X tiling */ |
|
- | 792 | uint32_t bit_6_swizzle_x; |
|
- | 793 | /** Bit 6 swizzling required for Y tiling */ |
|
- | 794 | uint32_t bit_6_swizzle_y; |
|
- | 795 | ||
- | 796 | /* storage for physical objects */ |
|
- | 797 | struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT]; |
|
- | 798 | ||
- | 799 | /* accounting, useful for userland debugging */ |
|
- | 800 | size_t object_memory; |
|
- | 801 | u32 object_count; |
|
- | 802 | }; |
|
- | 803 | ||
- | 804 | struct i915_gpu_error { |
|
- | 805 | /* For hangcheck timer */ |
|
- | 806 | #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ |
|
- | 807 | #define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD) |
|
- | 808 | struct timer_list hangcheck_timer; |
|
- | 809 | int hangcheck_count; |
|
- | 810 | uint32_t last_acthd[I915_NUM_RINGS]; |
|
- | 811 | uint32_t prev_instdone[I915_NUM_INSTDONE_REG]; |
|
- | 812 | ||
- | 813 | /* For reset and error_state handling. */ |
|
- | 814 | spinlock_t lock; |
|
- | 815 | /* Protected by the above dev->gpu_error.lock. */ |
|
- | 816 | struct drm_i915_error_state *first_error; |
|
- | 817 | struct work_struct work; |
|
- | 818 | ||
- | 819 | unsigned long last_reset; |
|
- | 820 | ||
- | 821 | /** |
|
- | 822 | * State variable and reset counter controlling the reset flow |
|
- | 823 | * |
|
- | 824 | * Upper bits are for the reset counter. This counter is used by the |
|
- | 825 | * wait_seqno code to race-free noticed that a reset event happened and |
|
- | 826 | * that it needs to restart the entire ioctl (since most likely the |
|
- | 827 | * seqno it waited for won't ever signal anytime soon). |
|
- | 828 | * |
|
- | 829 | * This is important for lock-free wait paths, where no contended lock |
|
- | 830 | * naturally enforces the correct ordering between the bail-out of the |
|
- | 831 | * waiter and the gpu reset work code. |
|
- | 832 | * |
|
- | 833 | * Lowest bit controls the reset state machine: Set means a reset is in |
|
- | 834 | * progress. This state will (presuming we don't have any bugs) decay |
|
- | 835 | * into either unset (successful reset) or the special WEDGED value (hw |
|
- | 836 | * terminally sour). All waiters on the reset_queue will be woken when |
|
- | 837 | * that happens. |
|
- | 838 | */ |
|
- | 839 | atomic_t reset_counter; |
|
- | 840 | ||
- | 841 | /** |
|
- | 842 | * Special values/flags for reset_counter |
|
- | 843 | * |
|
- | 844 | * Note that the code relies on |
|
- | 845 | * I915_WEDGED & I915_RESET_IN_PROGRESS_FLAG |
|
- | 846 | * being true. |
|
- | 847 | */ |
|
- | 848 | #define I915_RESET_IN_PROGRESS_FLAG 1 |
|
- | 849 | #define I915_WEDGED 0xffffffff |
|
- | 850 | ||
- | 851 | /** |
|
- | 852 | * Waitqueue to signal when the reset has completed. Used by clients |
|
- | 853 | * that wait for dev_priv->mm.wedged to settle. |
|
- | 854 | */ |
|
- | 855 | wait_queue_head_t reset_queue; |
|
- | 856 | ||
- | 857 | /* For gpu hang simulation. */ |
|
- | 858 | unsigned int stop_rings; |
|
- | 859 | }; |
|
- | 860 | ||
- | 861 | enum modeset_restore { |
|
- | 862 | MODESET_ON_LID_OPEN, |
|
- | 863 | MODESET_DONE, |
|
- | 864 | MODESET_SUSPENDED, |
|
- | 865 | }; |
|
630 | 866 | ||
631 | typedef struct drm_i915_private { |
867 | typedef struct drm_i915_private { |
Line 632... | Line 868... | ||
632 | struct drm_device *dev; |
868 | struct drm_device *dev; |
Line 642... | Line 878... | ||
642 | * with dev->struct_mutex. */ |
878 | * with dev->struct_mutex. */ |
643 | unsigned gt_fifo_count; |
879 | unsigned gt_fifo_count; |
644 | /** forcewake_count is protected by gt_lock */ |
880 | /** forcewake_count is protected by gt_lock */ |
645 | unsigned forcewake_count; |
881 | unsigned forcewake_count; |
646 | /** gt_lock is also taken in irq contexts. */ |
882 | /** gt_lock is also taken in irq contexts. */ |
647 | struct spinlock gt_lock; |
883 | spinlock_t gt_lock; |
Line 648... | Line 884... | ||
648 | 884 | ||
Line -... | Line 885... | ||
- | 885 | struct intel_gmbus gmbus[GMBUS_NUM_PORTS]; |
|
649 | struct intel_gmbus gmbus[GMBUS_NUM_PORTS]; |
886 | |
650 | 887 | ||
651 | /** gmbus_mutex protects against concurrent usage of the single hw gmbus |
888 | /** gmbus_mutex protects against concurrent usage of the single hw gmbus |
Line 652... | Line 889... | ||
652 | * controller on different i2c buses. */ |
889 | * controller on different i2c buses. */ |
653 | struct mutex gmbus_mutex; |
890 | struct mutex gmbus_mutex; |
654 | 891 | ||
655 | /** |
892 | /** |
Line -... | Line 893... | ||
- | 893 | * Base address of the gmbus and gpio block. |
|
- | 894 | */ |
|
656 | * Base address of the gmbus and gpio block. |
895 | uint32_t gpio_mmio_base; |
657 | */ |
896 | |
658 | uint32_t gpio_mmio_base; |
897 | wait_queue_head_t gmbus_wait_queue; |
Line 659... | Line 898... | ||
659 | 898 | ||
660 | struct pci_dev *bridge_dev; |
899 | struct pci_dev *bridge_dev; |
Line 661... | Line 900... | ||
661 | struct intel_ring_buffer ring[I915_NUM_RINGS]; |
900 | struct intel_ring_buffer ring[I915_NUM_RINGS]; |
Line 662... | Line 901... | ||
662 | uint32_t next_seqno; |
901 | uint32_t last_seqno, next_seqno; |
663 | 902 | ||
Line -... | Line 903... | ||
- | 903 | drm_dma_handle_t *status_page_dmah; |
|
- | 904 | struct resource mch_res; |
|
- | 905 | ||
664 | drm_dma_handle_t *status_page_dmah; |
906 | atomic_t irq_received; |
665 | struct resource mch_res; |
907 | |
Line 666... | Line 908... | ||
666 | 908 | /* protects the irq masks */ |
|
667 | atomic_t irq_received; |
909 | spinlock_t irq_lock; |
668 | 910 | ||
669 | /* protects the irq masks */ |
911 | /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */ |
670 | spinlock_t irq_lock; |
- | |
Line 671... | Line 912... | ||
671 | 912 | // struct pm_qos_request pm_qos; |
|
672 | /* DPIO indirect register protection */ |
913 | |
- | 914 | /* DPIO indirect register protection */ |
|
Line 673... | Line 915... | ||
673 | spinlock_t dpio_lock; |
915 | struct mutex dpio_lock; |
674 | 916 | ||
Line 675... | Line -... | ||
675 | /** Cached value of IMR to avoid reads in updating the bitfield */ |
- | |
676 | u32 pipestat[2]; |
- | |
677 | u32 irq_mask; |
- | |
678 | u32 gt_irq_mask; |
- | |
679 | u32 pch_irq_mask; |
- | |
680 | - | ||
681 | u32 hotplug_supported_mask; |
- | |
682 | struct work_struct hotplug_work; |
- | |
683 | - | ||
684 | int num_pipe; |
- | |
685 | int num_pch_pll; |
917 | /** Cached value of IMR to avoid reads in updating the bitfield */ |
686 | 918 | u32 pipestat[2]; |
|
687 | /* For hangcheck timer */ |
919 | u32 irq_mask; |
688 | #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ |
920 | u32 gt_irq_mask; |
689 | #define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD) |
921 | |
Line 690... | Line 922... | ||
690 | struct timer_list hangcheck_timer; |
922 | u32 hotplug_supported_mask; |
Line 691... | Line 923... | ||
691 | int hangcheck_count; |
923 | struct work_struct hotplug_work; |
692 | uint32_t last_acthd[I915_NUM_RINGS]; |
924 | bool enable_hotplug_processing; |
693 | uint32_t prev_instdone[I915_NUM_INSTDONE_REG]; |
925 | |
Line 694... | Line 926... | ||
694 | 926 | int num_pipe; |
|
695 | unsigned int stop_rings; |
927 | int num_pch_pll; |
696 | 928 | ||
697 | unsigned long cfb_size; |
929 | unsigned long cfb_size; |
Line 719... | Line 951... | ||
719 | unsigned int int_crt_support:1; |
951 | unsigned int int_crt_support:1; |
720 | unsigned int lvds_use_ssc:1; |
952 | unsigned int lvds_use_ssc:1; |
721 | unsigned int display_clock_mode:1; |
953 | unsigned int display_clock_mode:1; |
722 | int lvds_ssc_freq; |
954 | int lvds_ssc_freq; |
723 | unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */ |
955 | unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */ |
724 | unsigned int lvds_val; /* used for checking LVDS channel mode */ |
- | |
725 | struct { |
956 | struct { |
726 | int rate; |
957 | int rate; |
727 | int lanes; |
958 | int lanes; |
728 | int preemphasis; |
959 | int preemphasis; |
729 | int vswing; |
960 | int vswing; |
Line 740... | Line 971... | ||
740 | int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ |
971 | int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ |
741 | int num_fence_regs; /* 8 on pre-965, 16 otherwise */ |
972 | int num_fence_regs; /* 8 on pre-965, 16 otherwise */ |
Line 742... | Line 973... | ||
742 | 973 | ||
Line 743... | Line -... | ||
743 | unsigned int fsb_freq, mem_freq, is_ddr3; |
- | |
744 | - | ||
745 | spinlock_t error_lock; |
- | |
746 | /* Protected by dev->error_lock. */ |
- | |
747 | struct drm_i915_error_state *first_error; |
- | |
748 | struct work_struct error_work; |
974 | unsigned int fsb_freq, mem_freq, is_ddr3; |
Line 749... | Line 975... | ||
749 | struct completion error_completion; |
975 | |
750 | struct workqueue_struct *wq; |
976 | struct workqueue_struct *wq; |
Line 756... | Line 982... | ||
756 | enum intel_pch pch_type; |
982 | enum intel_pch pch_type; |
757 | unsigned short pch_id; |
983 | unsigned short pch_id; |
Line 758... | Line 984... | ||
758 | 984 | ||
Line 759... | Line -... | ||
759 | unsigned long quirks; |
- | |
760 | - | ||
761 | /* Register state */ |
- | |
762 | bool modeset_on_lid; |
- | |
763 | - | ||
764 | struct { |
- | |
765 | /** Bridge to intel-gtt-ko */ |
- | |
766 | struct intel_gtt *gtt; |
- | |
767 | /** Memory allocator for GTT stolen memory */ |
- | |
768 | struct drm_mm stolen; |
985 | unsigned long quirks; |
769 | /** Memory allocator for GTT */ |
- | |
770 | struct drm_mm gtt_space; |
- | |
771 | /** List of all objects in gtt_space. Used to restore gtt |
- | |
772 | * mappings on resume */ |
- | |
773 | struct list_head bound_list; |
- | |
774 | /** |
- | |
775 | * List of objects which are not bound to the GTT (thus |
- | |
776 | * are idle and not used by the GPU) but still have |
- | |
777 | * (presumably uncached) pages still attached. |
- | |
778 | */ |
- | |
779 | struct list_head unbound_list; |
- | |
780 | - | ||
781 | /** Usable portion of the GTT for GEM */ |
- | |
782 | unsigned long gtt_start; |
- | |
783 | unsigned long gtt_mappable_end; |
- | |
784 | unsigned long gtt_end; |
- | |
785 | - | ||
786 | // struct io_mapping *gtt_mapping; |
- | |
787 | phys_addr_t gtt_base_addr; |
- | |
788 | int gtt_mtrr; |
- | |
789 | - | ||
790 | /** PPGTT used for aliasing the PPGTT with the GTT */ |
- | |
791 | struct i915_hw_ppgtt *aliasing_ppgtt; |
- | |
792 | - | ||
793 | // struct shrinker inactive_shrinker; |
- | |
794 | bool shrinker_no_lock_stealing; |
- | |
795 | - | ||
796 | /** |
- | |
797 | * List of objects currently involved in rendering. |
- | |
798 | * |
- | |
799 | * Includes buffers having the contents of their GPU caches |
- | |
800 | * flushed, not necessarily primitives. last_rendering_seqno |
- | |
801 | * represents when the rendering involved will be completed. |
- | |
802 | * |
- | |
803 | * A reference is held on the buffer while on this list. |
986 | |
Line 804... | Line -... | ||
804 | */ |
- | |
805 | struct list_head active_list; |
- | |
806 | - | ||
807 | /** |
- | |
808 | * LRU list of objects which are not in the ringbuffer and |
- | |
809 | * are ready to unbind, but are still in the GTT. |
- | |
810 | * |
- | |
811 | * last_rendering_seqno is 0 while an object is in this list. |
- | |
812 | * |
- | |
813 | * A reference is not held on the buffer while on this list, |
- | |
814 | * as merely being GTT-bound shouldn't prevent its being |
987 | enum modeset_restore modeset_restore; |
Line 815... | Line -... | ||
815 | * freed, and we'll pull it off the list in the free path. |
- | |
816 | */ |
988 | struct mutex modeset_restore_lock; |
817 | struct list_head inactive_list; |
- | |
818 | - | ||
819 | /** LRU list of objects with fence regs on them. */ |
- | |
820 | struct list_head fence_list; |
- | |
821 | - | ||
822 | /** |
- | |
823 | * We leave the user IRQ off as much as possible, |
- | |
824 | * but this means that requests will finish and never |
- | |
825 | * be retired once the system goes idle. Set a timer to |
- | |
826 | * fire periodically while the ring is running. When it |
- | |
827 | * fires, go retire requests. |
- | |
828 | */ |
- | |
829 | struct delayed_work retire_work; |
- | |
830 | - | ||
831 | /** |
- | |
832 | * Are we in a non-interruptible section of code like |
- | |
833 | * modesetting? |
- | |
834 | */ |
- | |
835 | bool interruptible; |
- | |
836 | - | ||
837 | /** |
- | |
838 | * Flag if the X Server, and thus DRM, is not currently in |
- | |
839 | * control of the device. |
- | |
840 | * |
- | |
841 | * This is set between LeaveVT and EnterVT. It needs to be |
- | |
842 | * replaced with a semaphore. It also needs to be |
- | |
843 | * transitioned away from for kernel modesetting. |
- | |
844 | */ |
- | |
845 | int suspended; |
- | |
846 | - | ||
847 | /** |
- | |
848 | * Flag if the hardware appears to be wedged. |
- | |
849 | * |
- | |
850 | * This is set when attempts to idle the device timeout. |
- | |
851 | * It prevents command submission from occurring and makes |
- | |
852 | * every pending request fail |
- | |
853 | */ |
- | |
854 | atomic_t wedged; |
- | |
855 | - | ||
856 | /** Bit 6 swizzling required for X tiling */ |
- | |
857 | uint32_t bit_6_swizzle_x; |
- | |
858 | /** Bit 6 swizzling required for Y tiling */ |
- | |
859 | uint32_t bit_6_swizzle_y; |
- | |
860 | - | ||
861 | /* storage for physical objects */ |
- | |
862 | // struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT]; |
- | |
863 | - | ||
864 | /* accounting, useful for userland debugging */ |
- | |
865 | size_t gtt_total; |
- | |
Line 866... | Line 989... | ||
866 | size_t mappable_gtt_total; |
989 | |
Line 867... | Line 990... | ||
867 | size_t object_memory; |
990 | struct i915_gtt gtt; |
868 | u32 object_count; |
991 | |
Line 906... | Line 1029... | ||
906 | enum no_fbc_reason no_fbc_reason; |
1029 | enum no_fbc_reason no_fbc_reason; |
Line 907... | Line 1030... | ||
907 | 1030 | ||
908 | struct drm_mm_node *compressed_fb; |
1031 | struct drm_mm_node *compressed_fb; |
Line 909... | Line 1032... | ||
909 | struct drm_mm_node *compressed_llb; |
1032 | struct drm_mm_node *compressed_llb; |
Line 910... | Line 1033... | ||
910 | 1033 | ||
911 | unsigned long last_gpu_reset; |
1034 | struct i915_gpu_error gpu_error; |
Line 912... | Line 1035... | ||
912 | 1035 | ||
Line 925... | Line 1048... | ||
925 | struct drm_property *force_audio_property; |
1048 | struct drm_property *force_audio_property; |
Line 926... | Line 1049... | ||
926 | 1049 | ||
927 | bool hw_contexts_disabled; |
1050 | bool hw_contexts_disabled; |
Line 928... | Line 1051... | ||
928 | uint32_t hw_context_size; |
1051 | uint32_t hw_context_size; |
Line 929... | Line 1052... | ||
929 | 1052 | ||
Line 930... | Line 1053... | ||
930 | bool fdi_rx_polarity_reversed; |
1053 | u32 fdi_rx_config; |
931 | 1054 | ||
Line 946... | Line 1069... | ||
946 | HDMI_AUDIO_OFF, /* force turn off HDMI audio */ |
1069 | HDMI_AUDIO_OFF, /* force turn off HDMI audio */ |
947 | HDMI_AUDIO_AUTO, /* trust EDID */ |
1070 | HDMI_AUDIO_AUTO, /* trust EDID */ |
948 | HDMI_AUDIO_ON, /* force turn on HDMI audio */ |
1071 | HDMI_AUDIO_ON, /* force turn on HDMI audio */ |
949 | }; |
1072 | }; |
Line 950... | Line -... | ||
950 | - | ||
951 | enum i915_cache_level { |
- | |
952 | I915_CACHE_NONE = 0, |
- | |
953 | I915_CACHE_LLC, |
1073 | |
954 | I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */ |
- | |
Line 955... | Line 1074... | ||
955 | }; |
1074 | #define I915_GTT_RESERVED ((struct drm_mm_node *)0x1) |
956 | 1075 | ||
957 | struct drm_i915_gem_object_ops { |
1076 | struct drm_i915_gem_object_ops { |
958 | /* Interface between the GEM object and its backing storage. |
1077 | /* Interface between the GEM object and its backing storage. |
Line 975... | Line 1094... | ||
975 | struct drm_i915_gem_object { |
1094 | struct drm_i915_gem_object { |
976 | struct drm_gem_object base; |
1095 | struct drm_gem_object base; |
Line 977... | Line 1096... | ||
977 | 1096 | ||
Line 978... | Line -... | ||
978 | const struct drm_i915_gem_object_ops *ops; |
- | |
979 | - | ||
980 | // void *mapped; |
1097 | const struct drm_i915_gem_object_ops *ops; |
981 | 1098 | ||
- | 1099 | /** Current space allocated to this object in the GTT, if any. */ |
|
- | 1100 | struct drm_mm_node *gtt_space; |
|
982 | /** Current space allocated to this object in the GTT, if any. */ |
1101 | /** Stolen memory for this object, instead of being backed by shmem. */ |
Line 983... | Line 1102... | ||
983 | struct drm_mm_node *gtt_space; |
1102 | struct drm_mm_node *stolen; |
984 | struct list_head gtt_list; |
1103 | struct list_head gtt_list; |
985 | 1104 | ||
Line 1063... | Line 1182... | ||
1063 | 1182 | ||
1064 | unsigned int has_aliasing_ppgtt_mapping:1; |
1183 | unsigned int has_aliasing_ppgtt_mapping:1; |
1065 | unsigned int has_global_gtt_mapping:1; |
1184 | unsigned int has_global_gtt_mapping:1; |
Line 1066... | Line -... | ||
1066 | unsigned int has_dma_mapping:1; |
- | |
1067 | 1185 | unsigned int has_dma_mapping:1; |
|
1068 | // dma_addr_t *allocated_pages; |
1186 | |
Line 1069... | Line 1187... | ||
1069 | struct sg_table *pages; |
1187 | struct sg_table *pages; |
1070 | int pages_pin_count; |
1188 | int pages_pin_count; |
Line 1105... | Line 1223... | ||
1105 | uint32_t user_pin_count; |
1223 | uint32_t user_pin_count; |
1106 | struct drm_file *pin_filp; |
1224 | struct drm_file *pin_filp; |
Line 1107... | Line 1225... | ||
1107 | 1225 | ||
1108 | /** for phy allocated objects */ |
1226 | /** for phy allocated objects */ |
1109 | struct drm_i915_gem_phys_object *phys_obj; |
- | |
1110 | - | ||
1111 | /** |
- | |
1112 | * Number of crtcs where this object is currently the fb, but |
- | |
1113 | * will be page flipped away on the next vblank. When it |
- | |
1114 | * reaches 0, dev_priv->pending_flip_queue will be woken up. |
- | |
1115 | */ |
- | |
1116 | atomic_t pending_flip; |
1227 | struct drm_i915_gem_phys_object *phys_obj; |
1117 | }; |
1228 | }; |
Line 1118... | Line 1229... | ||
1118 | #define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base) |
1229 | #define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base) |
Line 1150... | Line 1261... | ||
1150 | struct list_head client_list; |
1261 | struct list_head client_list; |
1151 | }; |
1262 | }; |
Line 1152... | Line 1263... | ||
1152 | 1263 | ||
1153 | struct drm_i915_file_private { |
1264 | struct drm_i915_file_private { |
1154 | struct { |
1265 | struct { |
1155 | struct spinlock lock; |
1266 | spinlock_t lock; |
1156 | struct list_head request_list; |
1267 | struct list_head request_list; |
1157 | } mm; |
1268 | } mm; |
1158 | struct idr context_idr; |
1269 | struct idr context_idr; |
Line 1236... | Line 1347... | ||
1236 | #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) |
1347 | #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) |
1237 | #define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) |
1348 | #define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) |
Line 1238... | Line 1349... | ||
1238 | 1349 | ||
Line -... | Line 1350... | ||
- | 1350 | #define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5) |
|
- | 1351 | ||
1239 | #define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5) |
1352 | #define HAS_DDI(dev) (IS_HASWELL(dev)) |
1240 | 1353 | ||
1241 | #define INTEL_PCH_DEVICE_ID_MASK 0xff00 |
1354 | #define INTEL_PCH_DEVICE_ID_MASK 0xff00 |
1242 | #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 |
1355 | #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 |
1243 | #define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 |
1356 | #define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 |
Line 1291... | Line 1404... | ||
1291 | extern int i915_enable_rc6 __read_mostly; |
1404 | extern int i915_enable_rc6 __read_mostly; |
1292 | extern int i915_enable_fbc __read_mostly; |
1405 | extern int i915_enable_fbc __read_mostly; |
1293 | extern bool i915_enable_hangcheck __read_mostly; |
1406 | extern bool i915_enable_hangcheck __read_mostly; |
1294 | extern int i915_enable_ppgtt __read_mostly; |
1407 | extern int i915_enable_ppgtt __read_mostly; |
1295 | extern unsigned int i915_preliminary_hw_support __read_mostly; |
1408 | extern unsigned int i915_preliminary_hw_support __read_mostly; |
- | 1409 | extern int i915_disable_power_well __read_mostly; |
|
Line 1296... | Line 1410... | ||
1296 | 1410 | ||
1297 | extern int i915_master_create(struct drm_device *dev, struct drm_master *master); |
1411 | extern int i915_master_create(struct drm_device *dev, struct drm_master *master); |
Line 1298... | Line 1412... | ||
1298 | extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master); |
1412 | extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master); |
Line 1327... | Line 1441... | ||
1327 | /* i915_irq.c */ |
1441 | /* i915_irq.c */ |
1328 | void i915_hangcheck_elapsed(unsigned long data); |
1442 | void i915_hangcheck_elapsed(unsigned long data); |
1329 | void i915_handle_error(struct drm_device *dev, bool wedged); |
1443 | void i915_handle_error(struct drm_device *dev, bool wedged); |
Line 1330... | Line 1444... | ||
1330 | 1444 | ||
- | 1445 | extern void intel_irq_init(struct drm_device *dev); |
|
1331 | extern void intel_irq_init(struct drm_device *dev); |
1446 | extern void intel_hpd_init(struct drm_device *dev); |
1332 | extern void intel_gt_init(struct drm_device *dev); |
1447 | extern void intel_gt_init(struct drm_device *dev); |
Line 1333... | Line 1448... | ||
1333 | extern void intel_gt_reset(struct drm_device *dev); |
1448 | extern void intel_gt_reset(struct drm_device *dev); |
Line 1395... | Line 1510... | ||
1395 | int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, |
1510 | int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, |
1396 | struct drm_file *file_priv); |
1511 | struct drm_file *file_priv); |
1397 | int i915_gem_wait_ioctl(struct drm_device *dev, void *data, |
1512 | int i915_gem_wait_ioctl(struct drm_device *dev, void *data, |
1398 | struct drm_file *file_priv); |
1513 | struct drm_file *file_priv); |
1399 | void i915_gem_load(struct drm_device *dev); |
1514 | void i915_gem_load(struct drm_device *dev); |
- | 1515 | void *i915_gem_object_alloc(struct drm_device *dev); |
|
- | 1516 | void i915_gem_object_free(struct drm_i915_gem_object *obj); |
|
1400 | int i915_gem_init_object(struct drm_gem_object *obj); |
1517 | int i915_gem_init_object(struct drm_gem_object *obj); |
1401 | void i915_gem_object_init(struct drm_i915_gem_object *obj, |
1518 | void i915_gem_object_init(struct drm_i915_gem_object *obj, |
1402 | const struct drm_i915_gem_object_ops *ops); |
1519 | const struct drm_i915_gem_object_ops *ops); |
1403 | struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, |
1520 | struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, |
1404 | size_t size); |
1521 | size_t size); |
1405 | void i915_gem_free_object(struct drm_gem_object *obj); |
1522 | void i915_gem_free_object(struct drm_gem_object *obj); |
- | 1523 | ||
1406 | int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, |
1524 | int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, |
1407 | uint32_t alignment, |
1525 | uint32_t alignment, |
1408 | bool map_and_fenceable, |
1526 | bool map_and_fenceable, |
1409 | bool nonblocking); |
1527 | bool nonblocking); |
1410 | void i915_gem_object_unpin(struct drm_i915_gem_object *obj); |
1528 | void i915_gem_object_unpin(struct drm_i915_gem_object *obj); |
1411 | int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj); |
1529 | int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj); |
- | 1530 | int i915_gem_object_put_pages(struct drm_i915_gem_object *obj); |
|
1412 | void i915_gem_release_mmap(struct drm_i915_gem_object *obj); |
1531 | void i915_gem_release_mmap(struct drm_i915_gem_object *obj); |
1413 | void i915_gem_lastclose(struct drm_device *dev); |
1532 | void i915_gem_lastclose(struct drm_device *dev); |
Line 1414... | Line 1533... | ||
1414 | 1533 | ||
1415 | int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj); |
1534 | int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj); |
Line 1458... | Line 1577... | ||
1458 | i915_seqno_passed(uint32_t seq1, uint32_t seq2) |
1577 | i915_seqno_passed(uint32_t seq1, uint32_t seq2) |
1459 | { |
1578 | { |
1460 | return (int32_t)(seq1 - seq2) >= 0; |
1579 | return (int32_t)(seq1 - seq2) >= 0; |
1461 | } |
1580 | } |
Line 1462... | Line 1581... | ||
1462 | 1581 | ||
1463 | extern int i915_gem_get_seqno(struct drm_device *dev, u32 *seqno); |
- | |
- | 1582 | int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno); |
|
1464 | 1583 | int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno); |
|
1465 | int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj); |
1584 | int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj); |
Line 1466... | Line 1585... | ||
1466 | int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj); |
1585 | int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj); |
1467 | 1586 | ||
Line 1485... | Line 1604... | ||
1485 | } |
1604 | } |
1486 | } |
1605 | } |
Line 1487... | Line 1606... | ||
1487 | 1606 | ||
1488 | void i915_gem_retire_requests(struct drm_device *dev); |
1607 | void i915_gem_retire_requests(struct drm_device *dev); |
1489 | void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring); |
1608 | void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring); |
1490 | int __must_check i915_gem_check_wedge(struct drm_i915_private *dev_priv, |
1609 | int __must_check i915_gem_check_wedge(struct i915_gpu_error *error, |
- | 1610 | bool interruptible); |
|
- | 1611 | static inline bool i915_reset_in_progress(struct i915_gpu_error *error) |
|
- | 1612 | { |
|
- | 1613 | return unlikely(atomic_read(&error->reset_counter) |
|
- | 1614 | & I915_RESET_IN_PROGRESS_FLAG); |
|
- | 1615 | } |
|
- | 1616 | ||
- | 1617 | static inline bool i915_terminally_wedged(struct i915_gpu_error *error) |
|
- | 1618 | { |
|
- | 1619 | return atomic_read(&error->reset_counter) == I915_WEDGED; |
|
Line 1491... | Line 1620... | ||
1491 | bool interruptible); |
1620 | } |
1492 | 1621 | ||
1493 | void i915_gem_reset(struct drm_device *dev); |
1622 | void i915_gem_reset(struct drm_device *dev); |
1494 | void i915_gem_clflush_object(struct drm_i915_gem_object *obj); |
1623 | void i915_gem_clflush_object(struct drm_i915_gem_object *obj); |
Line 1527... | Line 1656... | ||
1527 | struct drm_i915_gem_object *obj); |
1656 | struct drm_i915_gem_object *obj); |
1528 | void i915_gem_free_all_phys_object(struct drm_device *dev); |
1657 | void i915_gem_free_all_phys_object(struct drm_device *dev); |
1529 | void i915_gem_release(struct drm_device *dev, struct drm_file *file); |
1658 | void i915_gem_release(struct drm_device *dev, struct drm_file *file); |
Line 1530... | Line 1659... | ||
1530 | 1659 | ||
1531 | uint32_t |
1660 | uint32_t |
1532 | i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev, |
1661 | i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode); |
- | 1662 | uint32_t |
|
1533 | uint32_t size, |
1663 | i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size, |
Line 1534... | Line 1664... | ||
1534 | int tiling_mode); |
1664 | int tiling_mode, bool fenced); |
1535 | 1665 | ||
Line 1550... | Line 1680... | ||
1550 | struct drm_file *file); |
1680 | struct drm_file *file); |
1551 | int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, |
1681 | int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, |
1552 | struct drm_file *file); |
1682 | struct drm_file *file); |
Line 1553... | Line 1683... | ||
1553 | 1683 | ||
1554 | /* i915_gem_gtt.c */ |
- | |
1555 | int __must_check i915_gem_init_aliasing_ppgtt(struct drm_device *dev); |
1684 | /* i915_gem_gtt.c */ |
1556 | void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev); |
1685 | void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev); |
1557 | void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, |
1686 | void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, |
1558 | struct drm_i915_gem_object *obj, |
1687 | struct drm_i915_gem_object *obj, |
1559 | enum i915_cache_level cache_level); |
1688 | enum i915_cache_level cache_level); |
Line 1564... | Line 1693... | ||
1564 | int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj); |
1693 | int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj); |
1565 | void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj, |
1694 | void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj, |
1566 | enum i915_cache_level cache_level); |
1695 | enum i915_cache_level cache_level); |
1567 | void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj); |
1696 | void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj); |
1568 | void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj); |
1697 | void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj); |
1569 | void i915_gem_init_global_gtt(struct drm_device *dev, |
1698 | void i915_gem_init_global_gtt(struct drm_device *dev); |
1570 | unsigned long start, |
1699 | void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start, |
1571 | unsigned long mappable_end, |
1700 | unsigned long mappable_end, unsigned long end); |
1572 | unsigned long end); |
- | |
1573 | int i915_gem_gtt_init(struct drm_device *dev); |
1701 | int i915_gem_gtt_init(struct drm_device *dev); |
1574 | void i915_gem_gtt_fini(struct drm_device *dev); |
- | |
1575 | static inline void i915_gem_chipset_flush(struct drm_device *dev) |
1702 | static inline void i915_gem_chipset_flush(struct drm_device *dev) |
1576 | { |
1703 | { |
1577 | if (INTEL_INFO(dev)->gen < 6) |
1704 | if (INTEL_INFO(dev)->gen < 6) |
1578 | intel_gtt_chipset_flush(); |
1705 | intel_gtt_chipset_flush(); |
1579 | } |
1706 | } |
Line 1587... | Line 1714... | ||
1587 | bool nonblock); |
1714 | bool nonblock); |
1588 | int i915_gem_evict_everything(struct drm_device *dev); |
1715 | int i915_gem_evict_everything(struct drm_device *dev); |
Line 1589... | Line 1716... | ||
1589 | 1716 | ||
1590 | /* i915_gem_stolen.c */ |
1717 | /* i915_gem_stolen.c */ |
- | 1718 | int i915_gem_init_stolen(struct drm_device *dev); |
|
- | 1719 | int i915_gem_stolen_setup_compression(struct drm_device *dev, int size); |
|
1591 | int i915_gem_init_stolen(struct drm_device *dev); |
1720 | void i915_gem_stolen_cleanup_compression(struct drm_device *dev); |
- | 1721 | void i915_gem_cleanup_stolen(struct drm_device *dev); |
|
- | 1722 | struct drm_i915_gem_object * |
|
- | 1723 | i915_gem_object_create_stolen(struct drm_device *dev, u32 size); |
|
Line 1592... | Line 1724... | ||
1592 | void i915_gem_cleanup_stolen(struct drm_device *dev); |
1724 | void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj); |
- | 1725 | ||
- | 1726 | /* i915_gem_tiling.c */ |
|
- | 1727 | inline static bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) |
|
- | 1728 | { |
|
- | 1729 | drm_i915_private_t *dev_priv = obj->base.dev->dev_private; |
|
- | 1730 | ||
- | 1731 | return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && |
|
- | 1732 | obj->tiling_mode != I915_TILING_NONE; |
|
1593 | 1733 | } |
|
1594 | /* i915_gem_tiling.c */ |
1734 | |
1595 | void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); |
1735 | void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); |
Line 1596... | Line 1736... | ||
1596 | void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj); |
1736 | void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj); |
Line 1615... | Line 1755... | ||
1615 | 1755 | ||
1616 | /* i915_suspend.c */ |
1756 | /* i915_suspend.c */ |
1617 | extern int i915_save_state(struct drm_device *dev); |
1757 | extern int i915_save_state(struct drm_device *dev); |
Line 1618... | Line 1758... | ||
1618 | extern int i915_restore_state(struct drm_device *dev); |
1758 | extern int i915_restore_state(struct drm_device *dev); |
1619 | 1759 | ||
1620 | /* i915_suspend.c */ |
1760 | /* i915_ums.c */ |
Line 1621... | Line 1761... | ||
1621 | extern int i915_save_state(struct drm_device *dev); |
1761 | void i915_save_display_reg(struct drm_device *dev); |
1622 | extern int i915_restore_state(struct drm_device *dev); |
1762 | void i915_restore_display_reg(struct drm_device *dev); |
1623 | 1763 | ||
Line 1674... | Line 1814... | ||
1674 | extern void intel_modeset_gem_init(struct drm_device *dev); |
1814 | extern void intel_modeset_gem_init(struct drm_device *dev); |
1675 | extern void intel_modeset_cleanup(struct drm_device *dev); |
1815 | extern void intel_modeset_cleanup(struct drm_device *dev); |
1676 | extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); |
1816 | extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); |
1677 | extern void intel_modeset_setup_hw_state(struct drm_device *dev, |
1817 | extern void intel_modeset_setup_hw_state(struct drm_device *dev, |
1678 | bool force_restore); |
1818 | bool force_restore); |
- | 1819 | extern void i915_redisable_vga(struct drm_device *dev); |
|
1679 | extern bool intel_fbc_enabled(struct drm_device *dev); |
1820 | extern bool intel_fbc_enabled(struct drm_device *dev); |
1680 | extern void intel_disable_fbc(struct drm_device *dev); |
1821 | extern void intel_disable_fbc(struct drm_device *dev); |
1681 | extern bool ironlake_set_drps(struct drm_device *dev, u8 val); |
1822 | extern bool ironlake_set_drps(struct drm_device *dev, u8 val); |
1682 | extern void intel_init_pch_refclk(struct drm_device *dev); |
1823 | extern void intel_init_pch_refclk(struct drm_device *dev); |
1683 | extern void gen6_set_rps(struct drm_device *dev, u8 val); |
1824 | extern void gen6_set_rps(struct drm_device *dev, u8 val); |
Line 1746... | Line 1887... | ||
1746 | #define I915_READ64(reg) i915_read64(dev_priv, (reg)) |
1887 | #define I915_READ64(reg) i915_read64(dev_priv, (reg)) |
Line 1747... | Line 1888... | ||
1747 | 1888 | ||
1748 | #define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) |
1889 | #define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) |
Line -... | Line 1890... | ||
- | 1890 | #define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) |
|
- | 1891 | ||
- | 1892 | /* "Broadcast RGB" property */ |
|
- | 1893 | #define INTEL_BROADCAST_RGB_AUTO 0 |
|
- | 1894 | #define INTEL_BROADCAST_RGB_FULL 1 |
|
- | 1895 | #define INTEL_BROADCAST_RGB_LIMITED 2 |
|
- | 1896 | ||
- | 1897 | static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev) |
|
- | 1898 | { |
|
- | 1899 | if (HAS_PCH_SPLIT(dev)) |
|
- | 1900 | return CPU_VGACNTRL; |
|
- | 1901 | else if (IS_VALLEYVIEW(dev)) |
|
- | 1902 | return VLV_VGACNTRL; |
|
- | 1903 | else |
|
- | 1904 | return VGACNTRL; |
|
1749 | #define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) |
1905 | } |
1750 | 1906 | ||
1751 | typedef struct |
1907 | typedef struct |
1752 | { |
1908 | { |
1753 | int width; |
1909 | int width; |