/drivers/video/drm/i915/Gtt/intel-gtt.c |
---|
38,6 → 38,10 |
#define PCI_VENDOR_ID_INTEL 0x8086 |
#define PCI_DEVICE_ID_INTEL_82830_HB 0x3575 |
#define PCI_DEVICE_ID_INTEL_82845G_HB 0x2560 |
#define PCI_DEVICE_ID_INTEL_82915G_IG 0x2582 |
#define PCI_DEVICE_ID_INTEL_82915GM_IG 0x2592 |
#define PCI_DEVICE_ID_INTEL_82945G_IG 0x2772 |
#define PCI_DEVICE_ID_INTEL_82945GM_IG 0x27A2 |
#define AGP_NORMAL_MEMORY 0 |
71,10 → 75,10 |
/* |
* If we have Intel graphics, we're not going to have anything other than |
* an Intel IOMMU. So make the correct use of the PCI DMA API contingent |
* on the Intel IOMMU support (CONFIG_DMAR). |
* on the Intel IOMMU support (CONFIG_INTEL_IOMMU). |
* Only newer chipsets need to bother with this, of course. |
*/ |
#ifdef CONFIG_DMAR |
#ifdef CONFIG_INTEL_IOMMU |
#define USE_PCI_DMA_API 1 |
#else |
#define USE_PCI_DMA_API 0 |
440,7 → 444,7 |
FreeKernelSpace(intel_private.gtt); |
FreeKernelSpace(intel_private.registers); |
// intel_gtt_teardown_scratch_page(); |
intel_gtt_teardown_scratch_page(); |
} |
static int intel_gtt_init(void) |
469,7 → 473,8 |
if (HAS_PGTBL_EN) |
intel_private.PGETBL_save |= I810_PGETBL_ENABLED; |
dbgprintf("detected gtt size: %dK total, %dK mappable\n", |
dev_info(&intel_private.bridge_dev->dev, |
"detected gtt size: %dK total, %dK mappable\n", |
intel_private.base.gtt_total_entries * 4, |
intel_private.base.gtt_mappable_entries * 4); |
502,6 → 507,17 |
return 0; |
} |
static void i830_write_entry(dma_addr_t addr, unsigned int entry, |
unsigned int flags) |
{ |
u32 pte_flags = I810_PTE_VALID; |
if (flags == AGP_USER_CACHED_MEMORY) |
pte_flags |= I830_PTE_SYSTEM_CACHED; |
writel(addr | pte_flags, intel_private.gtt + entry); |
} |
static bool intel_enable_gtt(void) |
{ |
u32 gma_addr; |
559,7 → 575,19 |
return true; |
} |
static bool i830_check_flags(unsigned int flags) |
{ |
switch (flags) { |
case 0: |
case AGP_PHYS_MEMORY: |
case AGP_USER_CACHED_MEMORY: |
case AGP_USER_MEMORY: |
return true; |
} |
return false; |
} |
void intel_gtt_insert_pages(unsigned int first_entry, unsigned int num_entries, |
struct page **pages, unsigned int flags) |
{ |
585,7 → 613,6 |
readl(intel_private.gtt+i-1); |
} |
static void intel_i9xx_setup_flush(void) |
{ |
/* return if already configured */ |
595,25 → 622,34 |
if (INTEL_GTT_GEN == 6) |
return; |
#if 0 |
/* setup a resource for this object */ |
intel_private.ifp_resource.name = "Intel Flush Page"; |
intel_private.ifp_resource.flags = IORESOURCE_MEM; |
// intel_private.ifp_resource.name = "Intel Flush Page"; |
// intel_private.ifp_resource.flags = IORESOURCE_MEM; |
intel_private.resource_valid = 0; |
/* Setup chipset flush for 915 */ |
if (IS_G33 || INTEL_GTT_GEN >= 4) { |
intel_i965_g33_setup_chipset_flush(); |
} else { |
intel_i915_setup_chipset_flush(); |
} |
// if (IS_G33 || INTEL_GTT_GEN >= 4) { |
// intel_i965_g33_setup_chipset_flush(); |
// } else { |
// intel_i915_setup_chipset_flush(); |
// } |
if (intel_private.ifp_resource.start) |
intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE); |
// if (intel_private.ifp_resource.start) |
// intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE); |
if (!intel_private.i9xx_flush_page) |
dev_err(&intel_private.pcidev->dev, |
"can't ioremap flush page - no chipset flushing\n"); |
#endif |
} |
static void i9xx_cleanup(void) |
{ |
if (intel_private.i9xx_flush_page) |
iounmap(intel_private.i9xx_flush_page); |
// if (intel_private.resource_valid) |
// release_resource(&intel_private.ifp_resource); |
intel_private.ifp_resource.start = 0; |
intel_private.resource_valid = 0; |
} |
static void i9xx_chipset_flush(void) |
622,6 → 658,21 |
writel(1, intel_private.i9xx_flush_page); |
} |
static void i965_write_entry(dma_addr_t addr, |
unsigned int entry, |
unsigned int flags) |
{ |
u32 pte_flags; |
pte_flags = I810_PTE_VALID; |
if (flags == AGP_USER_CACHED_MEMORY) |
pte_flags |= I830_PTE_SYSTEM_CACHED; |
/* Shift high bits down */ |
addr |= (addr >> 28) & 0xf0; |
writel(addr | pte_flags, intel_private.gtt + entry); |
} |
static bool gen6_check_flags(unsigned int flags) |
{ |
return true; |
655,6 → 706,26 |
{ |
} |
/* Certain Gen5 chipsets require require idling the GPU before |
* unmapping anything from the GTT when VT-d is enabled. |
*/ |
static inline int needs_idle_maps(void) |
{ |
#ifdef CONFIG_INTEL_IOMMU |
const unsigned short gpu_devid = intel_private.pcidev->device; |
extern int intel_iommu_gfx_mapped; |
/* Query intel_iommu to see if we need the workaround. Presumably that |
* was loaded first. |
*/ |
if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB || |
gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) && |
intel_iommu_gfx_mapped) |
return 1; |
#endif |
return 0; |
} |
static int i9xx_setup(void) |
{ |
u32 reg_addr; |
690,11 → 761,74 |
intel_private.gtt_bus_addr = reg_addr + gtt_offset; |
} |
if (needs_idle_maps()) |
intel_private.base.do_idle_maps = 1; |
intel_i9xx_setup_flush(); |
return 0; |
} |
static const struct intel_gtt_driver i915_gtt_driver = { |
.gen = 3, |
.has_pgtbl_enable = 1, |
.setup = i9xx_setup, |
.cleanup = i9xx_cleanup, |
/* i945 is the last gpu to need phys mem (for overlay and cursors). */ |
.write_entry = i830_write_entry, |
.dma_mask_size = 32, |
.check_flags = i830_check_flags, |
.chipset_flush = i9xx_chipset_flush, |
}; |
static const struct intel_gtt_driver g33_gtt_driver = { |
.gen = 3, |
.is_g33 = 1, |
.setup = i9xx_setup, |
.cleanup = i9xx_cleanup, |
.write_entry = i965_write_entry, |
.dma_mask_size = 36, |
.check_flags = i830_check_flags, |
.chipset_flush = i9xx_chipset_flush, |
}; |
static const struct intel_gtt_driver pineview_gtt_driver = { |
.gen = 3, |
.is_pineview = 1, .is_g33 = 1, |
.setup = i9xx_setup, |
.cleanup = i9xx_cleanup, |
.write_entry = i965_write_entry, |
.dma_mask_size = 36, |
.check_flags = i830_check_flags, |
.chipset_flush = i9xx_chipset_flush, |
}; |
static const struct intel_gtt_driver i965_gtt_driver = { |
.gen = 4, |
.has_pgtbl_enable = 1, |
.setup = i9xx_setup, |
.cleanup = i9xx_cleanup, |
.write_entry = i965_write_entry, |
.dma_mask_size = 36, |
.check_flags = i830_check_flags, |
.chipset_flush = i9xx_chipset_flush, |
}; |
static const struct intel_gtt_driver g4x_gtt_driver = { |
.gen = 5, |
.setup = i9xx_setup, |
.cleanup = i9xx_cleanup, |
.write_entry = i965_write_entry, |
.dma_mask_size = 36, |
.check_flags = i830_check_flags, |
.chipset_flush = i9xx_chipset_flush, |
}; |
static const struct intel_gtt_driver ironlake_gtt_driver = { |
.gen = 5, |
.is_ironlake = 1, |
.setup = i9xx_setup, |
.cleanup = i9xx_cleanup, |
.write_entry = i965_write_entry, |
.dma_mask_size = 36, |
.check_flags = i830_check_flags, |
.chipset_flush = i9xx_chipset_flush, |
}; |
static const struct intel_gtt_driver sandybridge_gtt_driver = { |
.gen = 6, |
.setup = i9xx_setup, |
714,6 → 848,58 |
char *name; |
const struct intel_gtt_driver *gtt_driver; |
} intel_gtt_chipsets[] = { |
{ PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)", |
&i915_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_82915G_IG, "915G", |
&i915_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM", |
&i915_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_82945G_IG, "945G", |
&i915_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM", |
&i915_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME", |
&i915_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ", |
&i965_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_82G35_IG, "G35", |
&i965_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q", |
&i965_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_82965G_IG, "965G", |
&i965_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM", |
&i965_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE", |
&i965_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_G33_IG, "G33", |
&g33_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_Q35_IG, "Q35", |
&g33_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_Q33_IG, "Q33", |
&g33_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150", |
&pineview_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150", |
&pineview_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_GM45_IG, "GM45", |
&g4x_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, "Eaglelake", |
&g4x_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_Q45_IG, "Q45/Q43", |
&g4x_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_G45_IG, "G45/G43", |
&g4x_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_B43_IG, "B43", |
&g4x_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_B43_1_IG, "B43", |
&g4x_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_G41_IG, "G41", |
&g4x_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG, |
"HD Graphics", &ironlake_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, |
"HD Graphics", &ironlake_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG, |
"Sandybridge", &sandybridge_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG, |
728,6 → 914,16 |
"Sandybridge", &sandybridge_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG, |
"Sandybridge", &sandybridge_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT1_IG, |
"Ivybridge", &sandybridge_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT2_IG, |
"Ivybridge", &sandybridge_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT1_IG, |
"Ivybridge", &sandybridge_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT2_IG, |
"Ivybridge", &sandybridge_gtt_driver }, |
{ PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG, |
"Ivybridge", &sandybridge_gtt_driver }, |
{ 0, NULL, NULL } |
}; |
789,11 → 985,13 |
return 1; |
} |
EXPORT_SYMBOL(intel_gmch_probe); |
const struct intel_gtt *intel_gtt_get(void) |
{ |
return &intel_private.base; |
} |
EXPORT_SYMBOL(intel_gtt_get); |
void intel_gtt_chipset_flush(void) |
{ |
800,6 → 998,7 |
if (intel_private.driver->chipset_flush) |
intel_private.driver->chipset_flush(); |
} |
EXPORT_SYMBOL(intel_gtt_chipset_flush); |
phys_addr_t get_bus_addr(void) |
/drivers/video/drm/i915/Gtt/intel-gtt.h |
---|
1,41 → 1,43 |
/* Common header for intel-gtt.ko and i915.ko */ |
#ifndef _DRM_INTEL_GTT_H |
#define _DRM_INTEL_GTT_H |
const struct intel_gtt { |
/* Size of memory reserved for graphics by the BIOS */ |
unsigned int stolen_size; |
/* Total number of gtt entries. */ |
unsigned int gtt_total_entries; |
/* Part of the gtt that is mappable by the cpu, for those chips where |
* this is not the full gtt. */ |
unsigned int gtt_mappable_entries; |
/* Whether i915 needs to use the dmar apis or not. */ |
unsigned int needs_dmar : 1; |
} *intel_gtt_get(void); |
void intel_gtt_chipset_flush(void); |
void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg); |
void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries); |
int intel_gtt_map_memory(struct page **pages, unsigned int num_entries, |
struct scatterlist **sg_list, int *num_sg); |
void intel_gtt_insert_sg_entries(struct scatterlist *sg_list, |
unsigned int sg_len, |
unsigned int pg_start, |
unsigned int flags); |
void intel_gtt_insert_pages(unsigned int first_entry, unsigned int num_entries, |
struct page **pages, unsigned int flags); |
/* Special gtt memory types */ |
#define AGP_DCACHE_MEMORY 1 |
#define AGP_PHYS_MEMORY 2 |
/* New caching attributes for gen6/sandybridge */ |
#define AGP_USER_CACHED_MEMORY_LLC_MLC (AGP_USER_TYPES + 2) |
#define AGP_USER_UNCACHED_MEMORY (AGP_USER_TYPES + 4) |
/* flag for GFDT type */ |
#define AGP_USER_CACHED_MEMORY_GFDT (1 << 3) |
#endif |
/* Common header for intel-gtt.ko and i915.ko */ |
#ifndef _DRM_INTEL_GTT_H |
#define _DRM_INTEL_GTT_H |
const struct intel_gtt { |
/* Size of memory reserved for graphics by the BIOS */ |
unsigned int stolen_size; |
/* Total number of gtt entries. */ |
unsigned int gtt_total_entries; |
/* Part of the gtt that is mappable by the cpu, for those chips where |
* this is not the full gtt. */ |
unsigned int gtt_mappable_entries; |
/* Whether i915 needs to use the dmar apis or not. */ |
unsigned int needs_dmar : 1; |
/* Whether we idle the gpu before mapping/unmapping */ |
unsigned int do_idle_maps : 1; |
} *intel_gtt_get(void); |
void intel_gtt_chipset_flush(void); |
void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg); |
void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries); |
int intel_gtt_map_memory(struct page **pages, unsigned int num_entries, |
struct scatterlist **sg_list, int *num_sg); |
void intel_gtt_insert_sg_entries(struct scatterlist *sg_list, |
unsigned int sg_len, |
unsigned int pg_start, |
unsigned int flags); |
void intel_gtt_insert_pages(unsigned int first_entry, unsigned int num_entries, |
struct page **pages, unsigned int flags); |
/* Special gtt memory types */ |
#define AGP_DCACHE_MEMORY 1 |
#define AGP_PHYS_MEMORY 2 |
/* New caching attributes for gen6/sandybridge */ |
#define AGP_USER_CACHED_MEMORY_LLC_MLC (AGP_USER_TYPES + 2) |
#define AGP_USER_UNCACHED_MEMORY (AGP_USER_TYPES + 4) |
/* flag for GFDT type */ |
#define AGP_USER_CACHED_MEMORY_GFDT (1 << 3) |
#endif |
/drivers/video/drm/i915/i915_dma.c |
---|
622,7 → 622,6 |
out_rmmap: |
pci_iounmap(dev->pdev, dev_priv->regs); |
put_bridge: |
// pci_dev_put(dev_priv->bridge_dev); |
free_priv: |
/drivers/video/drm/i915/i915_drv.c |
---|
1,4 → 1,3 |
/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*- |
*/ |
/* |
74,6 → 73,100 |
.subdevice = PCI_ANY_ID, \ |
.driver_data = (unsigned long) info } |
static const struct intel_device_info intel_i830_info = { |
.gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, |
.has_overlay = 1, .overlay_needs_physical = 1, |
}; |
static const struct intel_device_info intel_845g_info = { |
.gen = 2, |
.has_overlay = 1, .overlay_needs_physical = 1, |
}; |
static const struct intel_device_info intel_i85x_info = { |
.gen = 2, .is_i85x = 1, .is_mobile = 1, |
.cursor_needs_physical = 1, |
.has_overlay = 1, .overlay_needs_physical = 1, |
}; |
static const struct intel_device_info intel_i865g_info = { |
.gen = 2, |
.has_overlay = 1, .overlay_needs_physical = 1, |
}; |
static const struct intel_device_info intel_i915g_info = { |
.gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, |
.has_overlay = 1, .overlay_needs_physical = 1, |
}; |
static const struct intel_device_info intel_i915gm_info = { |
.gen = 3, .is_mobile = 1, |
.cursor_needs_physical = 1, |
.has_overlay = 1, .overlay_needs_physical = 1, |
.supports_tv = 1, |
}; |
static const struct intel_device_info intel_i945g_info = { |
.gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, |
.has_overlay = 1, .overlay_needs_physical = 1, |
}; |
static const struct intel_device_info intel_i945gm_info = { |
.gen = 3, .is_i945gm = 1, .is_mobile = 1, |
.has_hotplug = 1, .cursor_needs_physical = 1, |
.has_overlay = 1, .overlay_needs_physical = 1, |
.supports_tv = 1, |
}; |
static const struct intel_device_info intel_i965g_info = { |
.gen = 4, .is_broadwater = 1, |
.has_hotplug = 1, |
.has_overlay = 1, |
}; |
static const struct intel_device_info intel_i965gm_info = { |
.gen = 4, .is_crestline = 1, |
.is_mobile = 1, .has_fbc = 1, .has_hotplug = 1, |
.has_overlay = 1, |
.supports_tv = 1, |
}; |
static const struct intel_device_info intel_g33_info = { |
.gen = 3, .is_g33 = 1, |
.need_gfx_hws = 1, .has_hotplug = 1, |
.has_overlay = 1, |
}; |
static const struct intel_device_info intel_g45_info = { |
.gen = 4, .is_g4x = 1, .need_gfx_hws = 1, |
.has_pipe_cxsr = 1, .has_hotplug = 1, |
.has_bsd_ring = 1, |
}; |
static const struct intel_device_info intel_gm45_info = { |
.gen = 4, .is_g4x = 1, |
.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, |
.has_pipe_cxsr = 1, .has_hotplug = 1, |
.supports_tv = 1, |
.has_bsd_ring = 1, |
}; |
static const struct intel_device_info intel_pineview_info = { |
.gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, |
.need_gfx_hws = 1, .has_hotplug = 1, |
.has_overlay = 1, |
}; |
static const struct intel_device_info intel_ironlake_d_info = { |
.gen = 5, |
.need_gfx_hws = 1, .has_pipe_cxsr = 1, .has_hotplug = 1, |
.has_bsd_ring = 1, |
}; |
static const struct intel_device_info intel_ironlake_m_info = { |
.gen = 5, .is_mobile = 1, |
.need_gfx_hws = 1, .has_hotplug = 1, |
.has_fbc = 1, |
.has_bsd_ring = 1, |
}; |
static const struct intel_device_info intel_sandybridge_d_info = { |
.gen = 6, |
.need_gfx_hws = 1, .has_hotplug = 1, |
89,8 → 182,48 |
.has_blt_ring = 1, |
}; |
static const struct intel_device_info intel_ivybridge_d_info = { |
.is_ivybridge = 1, .gen = 7, |
.need_gfx_hws = 1, .has_hotplug = 1, |
.has_bsd_ring = 1, |
.has_blt_ring = 1, |
}; |
static const struct intel_device_info intel_ivybridge_m_info = { |
.is_ivybridge = 1, .gen = 7, .is_mobile = 1, |
.need_gfx_hws = 1, .has_hotplug = 1, |
.has_fbc = 0, /* FBC is not enabled on Ivybridge mobile yet */ |
.has_bsd_ring = 1, |
.has_blt_ring = 1, |
}; |
static const struct pci_device_id pciidlist[] = { /* aka */ |
INTEL_VGA_DEVICE(0x2582, &intel_i915g_info), /* I915_G */ |
INTEL_VGA_DEVICE(0x258a, &intel_i915g_info), /* E7221_G */ |
INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info), /* I915_GM */ |
INTEL_VGA_DEVICE(0x2772, &intel_i945g_info), /* I945_G */ |
INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info), /* I945_GM */ |
INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info), /* I945_GME */ |
INTEL_VGA_DEVICE(0x2972, &intel_i965g_info), /* I946_GZ */ |
INTEL_VGA_DEVICE(0x2982, &intel_i965g_info), /* G35_G */ |
INTEL_VGA_DEVICE(0x2992, &intel_i965g_info), /* I965_Q */ |
INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info), /* I965_G */ |
INTEL_VGA_DEVICE(0x29b2, &intel_g33_info), /* Q35_G */ |
INTEL_VGA_DEVICE(0x29c2, &intel_g33_info), /* G33_G */ |
INTEL_VGA_DEVICE(0x29d2, &intel_g33_info), /* Q33_G */ |
INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info), /* I965_GM */ |
INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info), /* I965_GME */ |
INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info), /* GM45_G */ |
INTEL_VGA_DEVICE(0x2e02, &intel_g45_info), /* IGD_E_G */ |
INTEL_VGA_DEVICE(0x2e12, &intel_g45_info), /* Q45_G */ |
INTEL_VGA_DEVICE(0x2e22, &intel_g45_info), /* G45_G */ |
INTEL_VGA_DEVICE(0x2e32, &intel_g45_info), /* G41_G */ |
INTEL_VGA_DEVICE(0x2e42, &intel_g45_info), /* B43_G */ |
INTEL_VGA_DEVICE(0x2e92, &intel_g45_info), /* B43_G.1 */ |
INTEL_VGA_DEVICE(0xa001, &intel_pineview_info), |
INTEL_VGA_DEVICE(0xa011, &intel_pineview_info), |
INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info), |
INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info), |
INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info), |
INTEL_VGA_DEVICE(0x0112, &intel_sandybridge_d_info), |
INTEL_VGA_DEVICE(0x0122, &intel_sandybridge_d_info), |
98,6 → 231,11 |
INTEL_VGA_DEVICE(0x0116, &intel_sandybridge_m_info), |
INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info), |
INTEL_VGA_DEVICE(0x010A, &intel_sandybridge_d_info), |
INTEL_VGA_DEVICE(0x0156, &intel_ivybridge_m_info), /* GT1 mobile */ |
INTEL_VGA_DEVICE(0x0166, &intel_ivybridge_m_info), /* GT2 mobile */ |
INTEL_VGA_DEVICE(0x0152, &intel_ivybridge_d_info), /* GT1 desktop */ |
INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */ |
INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */ |
{0, 0, 0} |
}; |
/drivers/video/drm/i915/intel_display.c |
---|
2181,9 → 2181,6 |
ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y, |
LEAVE_ATOMIC_MODE_SET); |
dbgprintf("set base atomic done ret= %d\n", ret); |
if (ret) { |
// i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj); |
mutex_unlock(&dev->struct_mutex); |
7252,12 → 7249,26 |
} |
} |
static void ironlake_disable_rc6(struct drm_device *dev) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
if (I915_READ(PWRCTXA)) { |
/* Wake the GPU, prevent RC6, then restore RSTDBYCTL */ |
I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT); |
wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON), |
50); |
I915_WRITE(PWRCTXA, 0); |
POSTING_READ(PWRCTXA); |
I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); |
POSTING_READ(RSTDBYCTL); |
} |
ironlake_teardown_rc6(dev); |
} |
static int ironlake_setup_rc6(struct drm_device *dev) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
/drivers/video/drm/i915/intel_ringbuffer.c |
---|
208,8 → 208,6 |
return 0; |
} |
#if 0 |
/* |
* 965+ support PIPE_CONTROL commands, which provide finer grained control |
* over cache flushing. |
241,7 → 239,7 |
goto err; |
} |
i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); |
// i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); |
ret = i915_gem_object_pin(obj, 4096, true); |
if (ret) |
248,7 → 246,7 |
goto err_unref; |
pc->gtt_offset = obj->gtt_offset; |
pc->cpu_page = kmap(obj->pages[0]); |
pc->cpu_page = (void*)MapIoMem(obj->pages[0], 4096, PG_SW); |
if (pc->cpu_page == NULL) |
goto err_unpin; |
257,9 → 255,9 |
return 0; |
err_unpin: |
i915_gem_object_unpin(obj); |
// i915_gem_object_unpin(obj); |
err_unref: |
drm_gem_object_unreference(&obj->base); |
// drm_gem_object_unreference(&obj->base); |
err: |
kfree(pc); |
return ret; |
275,16 → 273,14 |
return; |
obj = pc->obj; |
kunmap(obj->pages[0]); |
i915_gem_object_unpin(obj); |
drm_gem_object_unreference(&obj->base); |
// kunmap(obj->pages[0]); |
// i915_gem_object_unpin(obj); |
// drm_gem_object_unreference(&obj->base); |
kfree(pc); |
ring->private = NULL; |
} |
#endif |
static int init_render_ring(struct intel_ring_buffer *ring) |
{ |
struct drm_device *dev = ring->dev; |
307,7 → 303,7 |
if (INTEL_INFO(dev)->gen >= 6) { |
} else if (IS_GEN5(dev)) { |
// ret = init_pipe_control(ring); |
ret = init_pipe_control(ring); |
if (ret) |
return ret; |
} |
317,8 → 313,6 |
return ret; |
} |
#if 0 |
static void render_ring_cleanup(struct intel_ring_buffer *ring) |
{ |
if (!ring->private) |
527,6 → 521,7 |
POSTING_READ(IMR); |
} |
#if 0 |
static bool |
render_ring_get_irq(struct intel_ring_buffer *ring) |
{ |
616,8 → 611,6 |
return 0; |
} |
#if 0 |
static int |
ring_add_request(struct intel_ring_buffer *ring, |
u32 *result) |
641,6 → 634,8 |
return 0; |
} |
#if 0 |
static bool |
gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag) |
{ |
777,8 → 772,8 |
return; |
kunmap(obj->pages[0]); |
i915_gem_object_unpin(obj); |
drm_gem_object_unreference(&obj->base); |
// i915_gem_object_unpin(obj); |
// drm_gem_object_unreference(&obj->base); |
ring->status_page.obj = NULL; |
memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); |
798,7 → 793,7 |
goto err; |
} |
i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); |
// i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); |
ret = i915_gem_object_pin(obj, 4096, true); |
if (ret != 0) { |
907,7 → 902,6 |
return ret; |
} |
void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring) |
{ |
struct drm_i915_private *dev_priv; |
937,7 → 931,6 |
// cleanup_status_page(ring); |
} |
static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring) |
{ |
unsigned int *virt; |
1036,7 → 1029,6 |
ring->write_tail(ring, ring->tail); |
} |
static const struct intel_ring_buffer render_ring = { |
.name = "render ring", |
.id = RING_RENDER, |
1045,7 → 1037,7 |
.init = init_render_ring, |
.write_tail = ring_write_tail, |
.flush = render_ring_flush, |
// .add_request = render_ring_add_request, |
.add_request = render_ring_add_request, |
// .get_seqno = ring_get_seqno, |
// .irq_get = render_ring_get_irq, |
// .irq_put = render_ring_put_irq, |
1063,7 → 1055,7 |
.init = init_ring_common, |
.write_tail = ring_write_tail, |
.flush = bsd_ring_flush, |
// .add_request = ring_add_request, |
.add_request = ring_add_request, |
// .get_seqno = ring_get_seqno, |
// .irq_get = bsd_ring_get_irq, |
// .irq_put = bsd_ring_put_irq, |
1093,7 → 1085,6 |
GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE); |
} |
static int gen6_ring_flush(struct intel_ring_buffer *ring, |
u32 invalidate, u32 flush) |
{ |
1177,7 → 1168,7 |
.init = init_ring_common, |
.write_tail = gen6_bsd_ring_write_tail, |
.flush = gen6_ring_flush, |
// .add_request = gen6_add_request, |
.add_request = gen6_add_request, |
// .get_seqno = ring_get_seqno, |
// .irq_get = gen6_bsd_ring_get_irq, |
// .irq_put = gen6_bsd_ring_put_irq, |
1219,7 → 1210,6 |
return ring->private; |
} |
static int blt_ring_init(struct intel_ring_buffer *ring) |
{ |
if (NEED_BLT_WORKAROUND(ring->dev)) { |
1302,7 → 1292,6 |
ring->private = NULL; |
} |
static const struct intel_ring_buffer gen6_blt_ring = { |
.name = "blt ring", |
.id = RING_BLT, |
1311,7 → 1300,7 |
.init = blt_ring_init, |
.write_tail = ring_write_tail, |
.flush = blt_ring_flush, |
// .add_request = gen6_add_request, |
.add_request = gen6_add_request, |
// .get_seqno = ring_get_seqno, |
// .irq_get = blt_ring_get_irq, |
// .irq_put = blt_ring_put_irq, |
1319,8 → 1308,6 |
// .cleanup = blt_ring_cleanup, |
}; |
int intel_init_render_ring_buffer(struct drm_device *dev) |
{ |
drm_i915_private_t *dev_priv = dev->dev_private; |
1328,11 → 1315,11 |
ENTER(); |
*ring = render_ring; |
if (INTEL_INFO(dev)->gen >= 6) { |
// ring->add_request = gen6_add_request; |
ring->add_request = gen6_add_request; |
// ring->irq_get = gen6_render_ring_get_irq; |
// ring->irq_put = gen6_render_ring_put_irq; |
} else if (IS_GEN5(dev)) { |
// ring->add_request = pc_render_add_request; |
ring->add_request = pc_render_add_request; |
// ring->get_seqno = pc_render_get_seqno; |
} |