/drivers/video/drm/i915/Gtt/intel-gtt.c |
---|
35,9 → 35,6 |
static bool intel_enable_gtt(void); |
#define PG_SW 0x003 |
#define PG_NOCACHE 0x018 |
#define PCI_VENDOR_ID_INTEL 0x8086 |
#define PCI_DEVICE_ID_INTEL_82830_HB 0x3575 |
#define PCI_DEVICE_ID_INTEL_82845G_HB 0x2560 |
49,43 → 46,7 |
#define AGP_USER_MEMORY (AGP_USER_TYPES) |
#define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1) |
static inline uint8_t __raw_readb(const volatile void __iomem *addr) |
{ |
return *(const volatile uint8_t __force *) addr; |
} |
static inline uint16_t __raw_readw(const volatile void __iomem *addr) |
{ |
return *(const volatile uint16_t __force *) addr; |
} |
static inline uint32_t __raw_readl(const volatile void __iomem *addr) |
{ |
return *(const volatile uint32_t __force *) addr; |
} |
#define readb __raw_readb |
#define readw __raw_readw |
#define readl __raw_readl |
static inline void __raw_writeb(uint8_t b, volatile void __iomem *addr) |
{ *(volatile uint8_t __force *) addr = b;} |
static inline void __raw_writew(uint16_t b, volatile void __iomem *addr) |
{ *(volatile uint16_t __force *) addr = b;} |
static inline void __raw_writel(uint32_t b, volatile void __iomem *addr) |
{ *(volatile uint32_t __force *) addr = b;} |
static inline void __raw_writeq(__u64 b, volatile void __iomem *addr) |
{ *(volatile __u64 *)addr = b;} |
#define writeb __raw_writeb |
#define writew __raw_writew |
#define writel __raw_writel |
#define writeq __raw_writeq |
static inline int pci_read_config_word(struct pci_dev *dev, int where, |
u16 *val) |
{ |
800,3 → 761,8 |
return 1; |
} |
const struct intel_gtt *intel_gtt_get(void) |
{ |
return &intel_private.base; |
} |
/drivers/video/drm/i915/i915_dma.c |
---|
0,0 → 1,441 |
/* i915_dma.c -- DMA support for the I915 -*- linux-c -*- |
*/ |
/* |
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. |
* All Rights Reserved. |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the |
* "Software"), to deal in the Software without restriction, including |
* without limitation the rights to use, copy, modify, merge, publish, |
* distribute, sub license, and/or sell copies of the Software, and to |
* permit persons to whom the Software is furnished to do so, subject to |
* the following conditions: |
* |
* The above copyright notice and this permission notice (including the |
* next paragraph) shall be included in all copies or substantial portions |
* of the Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS |
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. |
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR |
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, |
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE |
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
* |
*/ |
#include "drmP.h" |
#include "drm.h" |
#include "drm_crtc_helper.h" |
#include "drm_fb_helper.h" |
#include "intel_drv.h" |
//#include "i915_drm.h" |
#include "i915_drv.h" |
#include <drm/intel-gtt.h> |
//#include "i915_trace.h" |
//#include "../../../platform/x86/intel_ips.h" |
#include <linux/pci.h> |
//#include <linux/vgaarb.h> |
//#include <linux/acpi.h> |
//#include <linux/pnp.h> |
//#include <linux/vga_switcheroo.h> |
//#include <linux/slab.h> |
//#include <acpi/video.h> |
void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen); |
static void i915_write_hws_pga(struct drm_device *dev) |
{ |
drm_i915_private_t *dev_priv = dev->dev_private; |
u32 addr; |
addr = dev_priv->status_page_dmah->busaddr; |
if (INTEL_INFO(dev)->gen >= 4) |
addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0; |
I915_WRITE(HWS_PGA, addr); |
} |
/** |
* Sets up the hardware status page for devices that need a physical address |
* in the register. |
*/ |
static int i915_init_phys_hws(struct drm_device *dev) |
{ |
drm_i915_private_t *dev_priv = dev->dev_private; |
/* Program Hardware Status Page */ |
dev_priv->status_page_dmah = |
drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE); |
if (!dev_priv->status_page_dmah) { |
DRM_ERROR("Can not allocate hardware status page\n"); |
return -ENOMEM; |
} |
i915_write_hws_pga(dev); |
dbgprintf("Enabled hardware status page\n"); |
return 0; |
} |
static void i915_pineview_get_mem_freq(struct drm_device *dev) |
{ |
drm_i915_private_t *dev_priv = dev->dev_private; |
u32 tmp; |
tmp = I915_READ(CLKCFG); |
switch (tmp & CLKCFG_FSB_MASK) { |
case CLKCFG_FSB_533: |
dev_priv->fsb_freq = 533; /* 133*4 */ |
break; |
case CLKCFG_FSB_800: |
dev_priv->fsb_freq = 800; /* 200*4 */ |
break; |
case CLKCFG_FSB_667: |
dev_priv->fsb_freq = 667; /* 167*4 */ |
break; |
case CLKCFG_FSB_400: |
dev_priv->fsb_freq = 400; /* 100*4 */ |
break; |
} |
switch (tmp & CLKCFG_MEM_MASK) { |
case CLKCFG_MEM_533: |
dev_priv->mem_freq = 533; |
break; |
case CLKCFG_MEM_667: |
dev_priv->mem_freq = 667; |
break; |
case CLKCFG_MEM_800: |
dev_priv->mem_freq = 800; |
break; |
} |
/* detect pineview DDR3 setting */ |
tmp = I915_READ(CSHRDDR3CTL); |
dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0; |
} |
static void i915_ironlake_get_mem_freq(struct drm_device *dev) |
{ |
drm_i915_private_t *dev_priv = dev->dev_private; |
u16 ddrpll, csipll; |
ddrpll = I915_READ16(DDRMPLL1); |
csipll = I915_READ16(CSIPLL0); |
switch (ddrpll & 0xff) { |
case 0xc: |
dev_priv->mem_freq = 800; |
break; |
case 0x10: |
dev_priv->mem_freq = 1066; |
break; |
case 0x14: |
dev_priv->mem_freq = 1333; |
break; |
case 0x18: |
dev_priv->mem_freq = 1600; |
break; |
default: |
DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n", |
ddrpll & 0xff); |
dev_priv->mem_freq = 0; |
break; |
} |
dev_priv->r_t = dev_priv->mem_freq; |
switch (csipll & 0x3ff) { |
case 0x00c: |
dev_priv->fsb_freq = 3200; |
break; |
case 0x00e: |
dev_priv->fsb_freq = 3733; |
break; |
case 0x010: |
dev_priv->fsb_freq = 4266; |
break; |
case 0x012: |
dev_priv->fsb_freq = 4800; |
break; |
case 0x014: |
dev_priv->fsb_freq = 5333; |
break; |
case 0x016: |
dev_priv->fsb_freq = 5866; |
break; |
case 0x018: |
dev_priv->fsb_freq = 6400; |
break; |
default: |
DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n", |
csipll & 0x3ff); |
dev_priv->fsb_freq = 0; |
break; |
} |
if (dev_priv->fsb_freq == 3200) { |
dev_priv->c_m = 0; |
} else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) { |
dev_priv->c_m = 1; |
} else { |
dev_priv->c_m = 2; |
} |
} |
static int i915_get_bridge_dev(struct drm_device *dev) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0)); |
if (!dev_priv->bridge_dev) { |
DRM_ERROR("bridge device not found\n"); |
return -1; |
} |
return 0; |
} |
/* Global for IPS driver to get at the current i915 device */ |
static struct drm_i915_private *i915_mch_dev; |
/* |
* Lock protecting IPS related data structures |
* - i915_mch_dev |
* - dev_priv->max_delay |
* - dev_priv->min_delay |
* - dev_priv->fmax |
* - dev_priv->gpu_busy |
*/ |
static DEFINE_SPINLOCK(mchdev_lock); |
/** |
* i915_driver_load - setup chip and create an initial config |
* @dev: DRM device |
* @flags: startup flags |
* |
* The driver load routine has to do several things: |
* - drive output discovery via intel_modeset_init() |
* - initialize the memory manager |
* - allocate initial config memory |
* - setup the DRM framebuffer with the allocated memory |
*/ |
int i915_driver_load(struct drm_device *dev, unsigned long flags) |
{ |
struct drm_i915_private *dev_priv; |
int ret = 0, mmio_bar; |
uint32_t agp_size; |
ENTER(); |
dev_priv = kzalloc(sizeof(drm_i915_private_t), GFP_KERNEL); |
if (dev_priv == NULL) |
return -ENOMEM; |
dev->dev_private = (void *)dev_priv; |
dev_priv->dev = dev; |
dev_priv->info = (struct intel_device_info *) flags; |
if (i915_get_bridge_dev(dev)) { |
ret = -EIO; |
goto free_priv; |
} |
/* overlay on gen2 is broken and can't address above 1G */ |
// if (IS_GEN2(dev)) |
// dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); |
/* 965GM sometimes incorrectly writes to hardware status page (HWS) |
* using 32bit addressing, overwriting memory if HWS is located |
* above 4GB. |
* |
* The documentation also mentions an issue with undefined |
* behaviour if any general state is accessed within a page above 4GB, |
* which also needs to be handled carefully. |
*/ |
// if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) |
// dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32)); |
mmio_bar = IS_GEN2(dev) ? 1 : 0; |
dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0); |
if (!dev_priv->regs) { |
DRM_ERROR("failed to map registers\n"); |
ret = -EIO; |
goto put_bridge; |
} |
dev_priv->mm.gtt = intel_gtt_get(); |
if (!dev_priv->mm.gtt) { |
DRM_ERROR("Failed to initialize GTT\n"); |
ret = -ENODEV; |
goto out_rmmap; |
} |
// agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; |
/* agp_bridge->gart_bus_addr = intel_private.gma_bus_addr; */ |
// dev_priv->mm.gtt_mapping = |
// io_mapping_create_wc(dev->agp->base, agp_size); |
// if (dev_priv->mm.gtt_mapping == NULL) { |
// ret = -EIO; |
// goto out_rmmap; |
// } |
/* Set up a WC MTRR for non-PAT systems. This is more common than |
* one would think, because the kernel disables PAT on first |
* generation Core chips because WC PAT gets overridden by a UC |
* MTRR if present. Even if a UC MTRR isn't present. |
*/ |
// dev_priv->mm.gtt_mtrr = mtrr_add(dev->agp->base, |
// agp_size, |
// MTRR_TYPE_WRCOMB, 1); |
// if (dev_priv->mm.gtt_mtrr < 0) { |
// DRM_INFO("MTRR allocation failed. Graphics " |
// "performance may suffer.\n"); |
// } |
/* The i915 workqueue is primarily used for batched retirement of |
* requests (and thus managing bo) once the task has been completed |
* by the GPU. i915_gem_retire_requests() is called directly when we |
* need high-priority retirement, such as waiting for an explicit |
* bo. |
* |
* It is also used for periodic low-priority events, such as |
* idle-timers and recording error state. |
* |
* All tasks on the workqueue are expected to acquire the dev mutex |
* so there is no point in running more than one instance of the |
* workqueue at any time: max_active = 1 and NON_REENTRANT. |
*/ |
// dev_priv->wq = alloc_workqueue("i915", |
// WQ_UNBOUND | WQ_NON_REENTRANT, |
// 1); |
// if (dev_priv->wq == NULL) { |
// DRM_ERROR("Failed to create our workqueue.\n"); |
// ret = -ENOMEM; |
// goto out_mtrrfree; |
// } |
/* enable GEM by default */ |
dev_priv->has_gem = 1; |
// intel_irq_init(dev); |
/* Try to make sure MCHBAR is enabled before poking at it */ |
// intel_setup_mchbar(dev); |
intel_setup_gmbus(dev); |
// intel_opregion_setup(dev); |
/* Make sure the bios did its job and set up vital registers */ |
// intel_setup_bios(dev); |
i915_gem_load(dev); |
/* Init HWS */ |
if (!I915_NEED_GFX_HWS(dev)) { |
ret = i915_init_phys_hws(dev); |
if (ret) |
goto out_gem_unload; |
} |
if (IS_PINEVIEW(dev)) |
i915_pineview_get_mem_freq(dev); |
else if (IS_GEN5(dev)) |
i915_ironlake_get_mem_freq(dev); |
/* On the 945G/GM, the chipset reports the MSI capability on the |
* integrated graphics even though the support isn't actually there |
* according to the published specs. It doesn't appear to function |
* correctly in testing on 945G. |
* This may be a side effect of MSI having been made available for PEG |
* and the registers being closely associated. |
* |
* According to chipset errata, on the 965GM, MSI interrupts may |
* be lost or delayed, but we use them anyways to avoid |
* stuck interrupts on some machines. |
*/ |
// if (!IS_I945G(dev) && !IS_I945GM(dev)) |
// pci_enable_msi(dev->pdev); |
spin_lock_init(&dev_priv->irq_lock); |
spin_lock_init(&dev_priv->error_lock); |
spin_lock_init(&dev_priv->rps_lock); |
if (IS_MOBILE(dev) || !IS_GEN2(dev)) |
dev_priv->num_pipe = 2; |
else |
dev_priv->num_pipe = 1; |
// ret = drm_vblank_init(dev, dev_priv->num_pipe); |
// if (ret) |
// goto out_gem_unload; |
/* Start out suspended */ |
dev_priv->mm.suspended = 1; |
intel_detect_pch(dev); |
// if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
// ret = i915_load_modeset_init(dev); |
// if (ret < 0) { |
// DRM_ERROR("failed to init modeset\n"); |
// goto out_gem_unload; |
// } |
// } |
/* Must be done after probing outputs */ |
// intel_opregion_init(dev); |
// acpi_video_register(); |
// setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed, |
// (unsigned long) dev); |
spin_lock(&mchdev_lock); |
i915_mch_dev = dev_priv; |
dev_priv->mchdev_lock = &mchdev_lock; |
spin_unlock(&mchdev_lock); |
// ips_ping_for_i915_load(); |
LEAVE(); |
return 0; |
out_gem_unload: |
// if (dev_priv->mm.inactive_shrinker.shrink) |
// unregister_shrinker(&dev_priv->mm.inactive_shrinker); |
// if (dev->pdev->msi_enabled) |
// pci_disable_msi(dev->pdev); |
// intel_teardown_gmbus(dev); |
// intel_teardown_mchbar(dev); |
// destroy_workqueue(dev_priv->wq); |
out_mtrrfree: |
// if (dev_priv->mm.gtt_mtrr >= 0) { |
// mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base, |
// dev->agp->agp_info.aper_size * 1024 * 1024); |
// dev_priv->mm.gtt_mtrr = -1; |
// } |
// io_mapping_free(dev_priv->mm.gtt_mapping); |
out_rmmap: |
pci_iounmap(dev->pdev, dev_priv->regs); |
put_bridge: |
// pci_dev_put(dev_priv->bridge_dev); |
free_priv: |
kfree(dev_priv); |
return ret; |
} |
/drivers/video/drm/i915/i915_drv.c |
---|
37,18 → 37,11 |
#include <errno-base.h> |
#include <linux/pci.h> |
enum { |
RCS = 0x0, |
VCS, |
BCS, |
I915_NUM_RINGS, |
}; |
#include "i915_drv.h" |
#include <syscall.h> |
#define PCI_VENDOR_ID_INTEL 0x8086 |
#define INTEL_VGA_DEVICE(id, info) { \ |
.class = PCI_CLASS_DISPLAY_VGA << 8, \ |
.class_mask = 0xff0000, \ |
88,7 → 81,110 |
{0, 0, 0} |
}; |
#define INTEL_PCH_DEVICE_ID_MASK 0xff00 |
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 |
#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 |
#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00 |
void intel_detect_pch (struct drm_device *dev) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct pci_dev *pch; |
/* |
* The reason to probe ISA bridge instead of Dev31:Fun0 is to |
* make graphics device passthrough work easy for VMM, that only |
* need to expose ISA bridge to let driver know the real hardware |
* underneath. This is a requirement from virtualization team. |
*/ |
pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL); |
if (pch) { |
if (pch->vendor == PCI_VENDOR_ID_INTEL) { |
int id; |
id = pch->device & INTEL_PCH_DEVICE_ID_MASK; |
if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) { |
dev_priv->pch_type = PCH_IBX; |
DRM_DEBUG_KMS("Found Ibex Peak PCH\n"); |
} else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) { |
dev_priv->pch_type = PCH_CPT; |
DRM_DEBUG_KMS("Found CougarPoint PCH\n"); |
} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) { |
/* PantherPoint is CPT compatible */ |
dev_priv->pch_type = PCH_CPT; |
DRM_DEBUG_KMS("Found PatherPoint PCH\n"); |
} |
} |
} |
} |
static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) |
{ |
int count; |
count = 0; |
while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1)) |
udelay(10); |
I915_WRITE_NOTRACE(FORCEWAKE, 1); |
POSTING_READ(FORCEWAKE); |
count = 0; |
while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0) |
udelay(10); |
} |
/* |
* Generally this is called implicitly by the register read function. However, |
* if some sequence requires the GT to not power down then this function should |
* be called at the beginning of the sequence followed by a call to |
* gen6_gt_force_wake_put() at the end of the sequence. |
*/ |
void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) |
{ |
// WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex)); |
/* Forcewake is atomic in case we get in here without the lock */ |
if (atomic_add_return(1, &dev_priv->forcewake_count) == 1) |
__gen6_gt_force_wake_get(dev_priv); |
} |
static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) |
{ |
I915_WRITE_NOTRACE(FORCEWAKE, 0); |
POSTING_READ(FORCEWAKE); |
} |
/* |
* see gen6_gt_force_wake_get() |
*/ |
void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) |
{ |
// WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex)); |
if (atomic_dec_and_test(&dev_priv->forcewake_count)) |
__gen6_gt_force_wake_put(dev_priv); |
} |
void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) |
{ |
if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES ) { |
int loop = 500; |
u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); |
while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) { |
udelay(10); |
fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); |
} |
// WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES); |
dev_priv->gt_fifo_count = fifo; |
} |
dev_priv->gt_fifo_count--; |
} |
int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent); |
int i915_init(void) |
156,7 → 252,7 |
//int i915_driver_load(struct drm_device *dev, unsigned long flags) |
// ret = radeon_driver_load_kms(dev, ent->driver_data ); |
ret = i915_driver_load(dev, ent->driver_data ); |
// if (ret) |
// goto err_g4; |
/drivers/video/drm/i915/i915_drv.h |
---|
32,7 → 32,7 |
#include "i915_reg.h" |
//#include "intel_bios.h" |
//#include "intel_ringbuffer.h" |
#include "intel_ringbuffer.h" |
//#include <linux/io-mapping.h> |
//#include <linux/i2c.h> |
//#include <drm/intel-gtt.h> |
274,17 → 274,17 |
void __iomem *regs; |
u32 gt_fifo_count; |
// struct intel_gmbus { |
// struct i2c_adapter adapter; |
// struct i2c_adapter *force_bit; |
// u32 reg0; |
// } *gmbus; |
struct intel_gmbus { |
struct i2c_adapter adapter; |
struct i2c_adapter *force_bit; |
u32 reg0; |
} *gmbus; |
struct pci_dev *bridge_dev; |
// struct intel_ring_buffer ring[I915_NUM_RINGS]; |
struct intel_ring_buffer ring[I915_NUM_RINGS]; |
uint32_t next_seqno; |
// drm_dma_handle_t *status_page_dmah; |
drm_dma_handle_t *status_page_dmah; |
// uint32_t counter; |
// drm_local_map_t hws_map; |
// struct drm_i915_gem_object *pwrctx; |
367,7 → 367,7 |
// struct notifier_block lid_notifier; |
int crt_ddc_pin; |
// struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */ |
struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */ |
int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ |
int num_fence_regs; /* 8 on pre-965, 16 otherwise */ |
667,8 → 667,8 |
u32 pch_pf_pos, pch_pf_size; |
int panel_t3, panel_t12; |
// struct drm_crtc *plane_to_crtc_mapping[2]; |
// struct drm_crtc *pipe_to_crtc_mapping[2]; |
struct drm_crtc *plane_to_crtc_mapping[2]; |
struct drm_crtc *pipe_to_crtc_mapping[2]; |
// wait_queue_head_t pending_flip_queue; |
bool flip_pending_is_done; |
1209,7 → 1209,7 |
} else { \ |
val = read##y(dev_priv->regs + reg); \ |
} \ |
trace_i915_reg_rw(false, reg, val, sizeof(val)); \ |
/* trace_i915_reg_rw(false, reg, val, sizeof(val)); */\ |
return val; \ |
} |
1221,7 → 1221,7 |
#define __i915_write(x, y) \ |
static inline void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \ |
trace_i915_reg_rw(true, reg, val, sizeof(val)); \ |
/* trace_i915_reg_rw(true, reg, val, sizeof(val));*/ \ |
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ |
__gen6_gt_wait_for_fifo(dev_priv); \ |
} \ |
/drivers/video/drm/i915/i915_gem.c |
---|
0,0 → 1,148 |
/* |
* Copyright © 2008 Intel Corporation |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the "Software"), |
* to deal in the Software without restriction, including without limitation |
* the rights to use, copy, modify, merge, publish, distribute, sublicense, |
* and/or sell copies of the Software, and to permit persons to whom the |
* Software is furnished to do so, subject to the following conditions: |
* |
* The above copyright notice and this permission notice (including the next |
* paragraph) shall be included in all copies or substantial portions of the |
* Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
* IN THE SOFTWARE. |
* |
* Authors: |
* Eric Anholt <eric@anholt.net> |
* |
*/ |
#include "drmP.h" |
#include "drm.h" |
//#include "i915_drm.h" |
#include "i915_drv.h" |
//#include "i915_trace.h" |
#include "intel_drv.h" |
//#include <linux/shmem_fs.h> |
//#include <linux/slab.h> |
//#include <linux/swap.h> |
#include <linux/pci.h> |
#define I915_EXEC_CONSTANTS_MASK (3<<6) |
#define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */ |
#define I915_EXEC_CONSTANTS_ABSOLUTE (1<<6) |
#define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */ |
/** |
* i915_gem_clear_fence_reg - clear out fence register info |
* @obj: object to clear |
* |
* Zeroes out the fence register itself and clears out the associated |
* data structures in dev_priv and obj. |
*/ |
static void |
i915_gem_clear_fence_reg(struct drm_device *dev, |
struct drm_i915_fence_reg *reg) |
{ |
drm_i915_private_t *dev_priv = dev->dev_private; |
uint32_t fence_reg = reg - dev_priv->fence_regs; |
switch (INTEL_INFO(dev)->gen) { |
case 7: |
case 6: |
I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + fence_reg*8, 0); |
break; |
case 5: |
case 4: |
I915_WRITE64(FENCE_REG_965_0 + fence_reg*8, 0); |
break; |
case 3: |
if (fence_reg >= 8) |
fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4; |
else |
case 2: |
fence_reg = FENCE_REG_830_0 + fence_reg * 4; |
I915_WRITE(fence_reg, 0); |
break; |
} |
list_del_init(®->lru_list); |
reg->obj = NULL; |
reg->setup_seqno = 0; |
} |
static void |
init_ring_lists(struct intel_ring_buffer *ring) |
{ |
INIT_LIST_HEAD(&ring->active_list); |
INIT_LIST_HEAD(&ring->request_list); |
INIT_LIST_HEAD(&ring->gpu_write_list); |
} |
void |
i915_gem_load(struct drm_device *dev) |
{ |
int i; |
drm_i915_private_t *dev_priv = dev->dev_private; |
INIT_LIST_HEAD(&dev_priv->mm.active_list); |
INIT_LIST_HEAD(&dev_priv->mm.flushing_list); |
INIT_LIST_HEAD(&dev_priv->mm.inactive_list); |
INIT_LIST_HEAD(&dev_priv->mm.pinned_list); |
INIT_LIST_HEAD(&dev_priv->mm.fence_list); |
INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list); |
INIT_LIST_HEAD(&dev_priv->mm.gtt_list); |
for (i = 0; i < I915_NUM_RINGS; i++) |
init_ring_lists(&dev_priv->ring[i]); |
for (i = 0; i < 16; i++) |
INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); |
// INIT_DELAYED_WORK(&dev_priv->mm.retire_work, |
// i915_gem_retire_work_handler); |
// init_completion(&dev_priv->error_completion); |
/* On GEN3 we really need to make sure the ARB C3 LP bit is set */ |
if (IS_GEN3(dev)) { |
u32 tmp = I915_READ(MI_ARB_STATE); |
if (!(tmp & MI_ARB_C3_LP_WRITE_ENABLE)) { |
/* arb state is a masked write, so set bit + bit in mask */ |
tmp = MI_ARB_C3_LP_WRITE_ENABLE | (MI_ARB_C3_LP_WRITE_ENABLE << MI_ARB_MASK_SHIFT); |
I915_WRITE(MI_ARB_STATE, tmp); |
} |
} |
dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL; |
if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) |
dev_priv->num_fence_regs = 16; |
else |
dev_priv->num_fence_regs = 8; |
/* Initialize fence registers to zero */ |
for (i = 0; i < dev_priv->num_fence_regs; i++) { |
i915_gem_clear_fence_reg(dev, &dev_priv->fence_regs[i]); |
} |
i915_gem_detect_bit_6_swizzle(dev); |
// init_waitqueue_head(&dev_priv->pending_flip_queue); |
dev_priv->mm.interruptible = true; |
// dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink; |
// dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS; |
// register_shrinker(&dev_priv->mm.inactive_shrinker); |
} |
/drivers/video/drm/i915/i915_gem_tiling.c |
---|
0,0 → 1,512 |
/* |
* Copyright © 2008 Intel Corporation |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the "Software"), |
* to deal in the Software without restriction, including without limitation |
* the rights to use, copy, modify, merge, publish, distribute, sublicense, |
* and/or sell copies of the Software, and to permit persons to whom the |
* Software is furnished to do so, subject to the following conditions: |
* |
* The above copyright notice and this permission notice (including the next |
* paragraph) shall be included in all copies or substantial portions of the |
* Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
* IN THE SOFTWARE. |
* |
* Authors: |
* Eric Anholt <eric@anholt.net> |
* |
*/ |
#include "linux/string.h" |
#include "linux/bitops.h" |
#include "drmP.h" |
#include "drm.h" |
//#include "i915_drm.h" |
#include "i915_drv.h" |
/** @file i915_gem_tiling.c |
* |
* Support for managing tiling state of buffer objects. |
* |
* The idea behind tiling is to increase cache hit rates by rearranging |
* pixel data so that a group of pixel accesses are in the same cacheline. |
* Performance improvement from doing this on the back/depth buffer are on |
* the order of 30%. |
* |
* Intel architectures make this somewhat more complicated, though, by |
* adjustments made to addressing of data when the memory is in interleaved |
* mode (matched pairs of DIMMS) to improve memory bandwidth. |
* For interleaved memory, the CPU sends every sequential 64 bytes |
* to an alternate memory channel so it can get the bandwidth from both. |
* |
* The GPU also rearranges its accesses for increased bandwidth to interleaved |
* memory, and it matches what the CPU does for non-tiled. However, when tiled |
* it does it a little differently, since one walks addresses not just in the |
* X direction but also Y. So, along with alternating channels when bit |
* 6 of the address flips, it also alternates when other bits flip -- Bits 9 |
* (every 512 bytes, an X tile scanline) and 10 (every two X tile scanlines) |
* are common to both the 915 and 965-class hardware. |
* |
* The CPU also sometimes XORs in higher bits as well, to improve |
* bandwidth doing strided access like we do so frequently in graphics. This |
* is called "Channel XOR Randomization" in the MCH documentation. The result |
* is that the CPU is XORing in either bit 11 or bit 17 to bit 6 of its address |
* decode. |
* |
* All of this bit 6 XORing has an effect on our memory management, |
* as we need to make sure that the 3d driver can correctly address object |
* contents. |
* |
* If we don't have interleaved memory, all tiling is safe and no swizzling is |
* required. |
* |
* When bit 17 is XORed in, we simply refuse to tile at all. Bit |
* 17 is not just a page offset, so as we page an objet out and back in, |
* individual pages in it will have different bit 17 addresses, resulting in |
* each 64 bytes being swapped with its neighbor! |
* |
* Otherwise, if interleaved, we have to tell the 3d driver what the address |
* swizzling it needs to do is, since it's writing with the CPU to the pages |
* (bit 6 and potentially bit 11 XORed in), and the GPU is reading from the |
* pages (bit 6, 9, and 10 XORed in), resulting in a cumulative bit swizzling |
* required by the CPU of XORing in bit 6, 9, 10, and potentially 11, in order |
* to match what the GPU expects. |
*/ |
#define I915_TILING_NONE 0 |
#define I915_TILING_X 1 |
#define I915_TILING_Y 2 |
#define I915_BIT_6_SWIZZLE_NONE 0 |
#define I915_BIT_6_SWIZZLE_9 1 |
#define I915_BIT_6_SWIZZLE_9_10 2 |
#define I915_BIT_6_SWIZZLE_9_11 3 |
#define I915_BIT_6_SWIZZLE_9_10_11 4 |
/* Not seen by userland */ |
#define I915_BIT_6_SWIZZLE_UNKNOWN 5 |
/* Seen by userland. */ |
#define I915_BIT_6_SWIZZLE_9_17 6 |
#define I915_BIT_6_SWIZZLE_9_10_17 7 |
/** |
* Detects bit 6 swizzling of address lookup between IGD access and CPU |
* access through main memory. |
*/ |
void |
i915_gem_detect_bit_6_swizzle(struct drm_device *dev) |
{ |
drm_i915_private_t *dev_priv = dev->dev_private; |
uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; |
uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; |
if (INTEL_INFO(dev)->gen >= 5) { |
/* On Ironlake whatever DRAM config, GPU always do |
* same swizzling setup. |
*/ |
swizzle_x = I915_BIT_6_SWIZZLE_9_10; |
swizzle_y = I915_BIT_6_SWIZZLE_9; |
} else if (IS_GEN2(dev)) { |
/* As far as we know, the 865 doesn't have these bit 6 |
* swizzling issues. |
*/ |
swizzle_x = I915_BIT_6_SWIZZLE_NONE; |
swizzle_y = I915_BIT_6_SWIZZLE_NONE; |
} else if (IS_MOBILE(dev)) { |
uint32_t dcc; |
/* On mobile 9xx chipsets, channel interleave by the CPU is |
* determined by DCC. For single-channel, neither the CPU |
* nor the GPU do swizzling. For dual channel interleaved, |
* the GPU's interleave is bit 9 and 10 for X tiled, and bit |
* 9 for Y tiled. The CPU's interleave is independent, and |
* can be based on either bit 11 (haven't seen this yet) or |
* bit 17 (common). |
*/ |
dcc = I915_READ(DCC); |
switch (dcc & DCC_ADDRESSING_MODE_MASK) { |
case DCC_ADDRESSING_MODE_SINGLE_CHANNEL: |
case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC: |
swizzle_x = I915_BIT_6_SWIZZLE_NONE; |
swizzle_y = I915_BIT_6_SWIZZLE_NONE; |
break; |
case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED: |
if (dcc & DCC_CHANNEL_XOR_DISABLE) { |
/* This is the base swizzling by the GPU for |
* tiled buffers. |
*/ |
swizzle_x = I915_BIT_6_SWIZZLE_9_10; |
swizzle_y = I915_BIT_6_SWIZZLE_9; |
} else if ((dcc & DCC_CHANNEL_XOR_BIT_17) == 0) { |
/* Bit 11 swizzling by the CPU in addition. */ |
swizzle_x = I915_BIT_6_SWIZZLE_9_10_11; |
swizzle_y = I915_BIT_6_SWIZZLE_9_11; |
} else { |
/* Bit 17 swizzling by the CPU in addition. */ |
swizzle_x = I915_BIT_6_SWIZZLE_9_10_17; |
swizzle_y = I915_BIT_6_SWIZZLE_9_17; |
} |
break; |
} |
if (dcc == 0xffffffff) { |
DRM_ERROR("Couldn't read from MCHBAR. " |
"Disabling tiling.\n"); |
swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; |
swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; |
} |
} else { |
/* The 965, G33, and newer, have a very flexible memory |
* configuration. It will enable dual-channel mode |
* (interleaving) on as much memory as it can, and the GPU |
* will additionally sometimes enable different bit 6 |
* swizzling for tiled objects from the CPU. |
* |
* Here's what I found on the G965: |
* slot fill memory size swizzling |
* 0A 0B 1A 1B 1-ch 2-ch |
* 512 0 0 0 512 0 O |
* 512 0 512 0 16 1008 X |
* 512 0 0 512 16 1008 X |
* 0 512 0 512 16 1008 X |
* 1024 1024 1024 0 2048 1024 O |
* |
* We could probably detect this based on either the DRB |
* matching, which was the case for the swizzling required in |
* the table above, or from the 1-ch value being less than |
* the minimum size of a rank. |
*/ |
if (I915_READ16(C0DRB3) != I915_READ16(C1DRB3)) { |
swizzle_x = I915_BIT_6_SWIZZLE_NONE; |
swizzle_y = I915_BIT_6_SWIZZLE_NONE; |
} else { |
swizzle_x = I915_BIT_6_SWIZZLE_9_10; |
swizzle_y = I915_BIT_6_SWIZZLE_9; |
} |
} |
dev_priv->mm.bit_6_swizzle_x = swizzle_x; |
dev_priv->mm.bit_6_swizzle_y = swizzle_y; |
} |
#if 0 |
/* Check pitch constriants for all chips & tiling formats */ |
static bool |
i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) |
{ |
int tile_width; |
/* Linear is always fine */ |
if (tiling_mode == I915_TILING_NONE) |
return true; |
if (IS_GEN2(dev) || |
(tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))) |
tile_width = 128; |
else |
tile_width = 512; |
/* check maximum stride & object size */ |
if (INTEL_INFO(dev)->gen >= 4) { |
/* i965 stores the end address of the gtt mapping in the fence |
* reg, so dont bother to check the size */ |
if (stride / 128 > I965_FENCE_MAX_PITCH_VAL) |
return false; |
} else { |
if (stride > 8192) |
return false; |
if (IS_GEN3(dev)) { |
if (size > I830_FENCE_MAX_SIZE_VAL << 20) |
return false; |
} else { |
if (size > I830_FENCE_MAX_SIZE_VAL << 19) |
return false; |
} |
} |
/* 965+ just needs multiples of tile width */ |
if (INTEL_INFO(dev)->gen >= 4) { |
if (stride & (tile_width - 1)) |
return false; |
return true; |
} |
/* Pre-965 needs power of two tile widths */ |
if (stride < tile_width) |
return false; |
if (stride & (stride - 1)) |
return false; |
return true; |
} |
/* Is the current GTT allocation valid for the change in tiling? */ |
static bool |
i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode) |
{ |
u32 size; |
if (tiling_mode == I915_TILING_NONE) |
return true; |
if (INTEL_INFO(obj->base.dev)->gen >= 4) |
return true; |
if (INTEL_INFO(obj->base.dev)->gen == 3) { |
if (obj->gtt_offset & ~I915_FENCE_START_MASK) |
return false; |
} else { |
if (obj->gtt_offset & ~I830_FENCE_START_MASK) |
return false; |
} |
/* |
* Previous chips need to be aligned to the size of the smallest |
* fence register that can contain the object. |
*/ |
if (INTEL_INFO(obj->base.dev)->gen == 3) |
size = 1024*1024; |
else |
size = 512*1024; |
while (size < obj->base.size) |
size <<= 1; |
if (obj->gtt_space->size != size) |
return false; |
if (obj->gtt_offset & (size - 1)) |
return false; |
return true; |
} |
/** |
* Sets the tiling mode of an object, returning the required swizzling of |
* bit 6 of addresses in the object. |
*/ |
int |
i915_gem_set_tiling(struct drm_device *dev, void *data, |
struct drm_file *file) |
{ |
struct drm_i915_gem_set_tiling *args = data; |
drm_i915_private_t *dev_priv = dev->dev_private; |
struct drm_i915_gem_object *obj; |
int ret = 0; |
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); |
if (&obj->base == NULL) |
return -ENOENT; |
if (!i915_tiling_ok(dev, |
args->stride, obj->base.size, args->tiling_mode)) { |
drm_gem_object_unreference_unlocked(&obj->base); |
return -EINVAL; |
} |
if (obj->pin_count) { |
drm_gem_object_unreference_unlocked(&obj->base); |
return -EBUSY; |
} |
if (args->tiling_mode == I915_TILING_NONE) { |
args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; |
args->stride = 0; |
} else { |
if (args->tiling_mode == I915_TILING_X) |
args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; |
else |
args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y; |
/* Hide bit 17 swizzling from the user. This prevents old Mesa |
* from aborting the application on sw fallbacks to bit 17, |
* and we use the pread/pwrite bit17 paths to swizzle for it. |
* If there was a user that was relying on the swizzle |
* information for drm_intel_bo_map()ed reads/writes this would |
* break it, but we don't have any of those. |
*/ |
if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17) |
args->swizzle_mode = I915_BIT_6_SWIZZLE_9; |
if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17) |
args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10; |
/* If we can't handle the swizzling, make it untiled. */ |
if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) { |
args->tiling_mode = I915_TILING_NONE; |
args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; |
args->stride = 0; |
} |
} |
mutex_lock(&dev->struct_mutex); |
if (args->tiling_mode != obj->tiling_mode || |
args->stride != obj->stride) { |
/* We need to rebind the object if its current allocation |
* no longer meets the alignment restrictions for its new |
* tiling mode. Otherwise we can just leave it alone, but |
* need to ensure that any fence register is cleared. |
*/ |
i915_gem_release_mmap(obj); |
obj->map_and_fenceable = |
obj->gtt_space == NULL || |
(obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end && |
i915_gem_object_fence_ok(obj, args->tiling_mode)); |
/* Rebind if we need a change of alignment */ |
if (!obj->map_and_fenceable) { |
u32 unfenced_alignment = |
i915_gem_get_unfenced_gtt_alignment(dev, |
obj->base.size, |
args->tiling_mode); |
if (obj->gtt_offset & (unfenced_alignment - 1)) |
ret = i915_gem_object_unbind(obj); |
} |
if (ret == 0) { |
obj->tiling_changed = true; |
obj->tiling_mode = args->tiling_mode; |
obj->stride = args->stride; |
} |
} |
/* we have to maintain this existing ABI... */ |
args->stride = obj->stride; |
args->tiling_mode = obj->tiling_mode; |
drm_gem_object_unreference(&obj->base); |
mutex_unlock(&dev->struct_mutex); |
return ret; |
} |
/** |
* Returns the current tiling mode and required bit 6 swizzling for the object. |
*/ |
int |
i915_gem_get_tiling(struct drm_device *dev, void *data, |
struct drm_file *file) |
{ |
struct drm_i915_gem_get_tiling *args = data; |
drm_i915_private_t *dev_priv = dev->dev_private; |
struct drm_i915_gem_object *obj; |
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); |
if (&obj->base == NULL) |
return -ENOENT; |
mutex_lock(&dev->struct_mutex); |
args->tiling_mode = obj->tiling_mode; |
switch (obj->tiling_mode) { |
case I915_TILING_X: |
args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; |
break; |
case I915_TILING_Y: |
args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y; |
break; |
case I915_TILING_NONE: |
args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; |
break; |
default: |
DRM_ERROR("unknown tiling mode\n"); |
} |
/* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */ |
if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17) |
args->swizzle_mode = I915_BIT_6_SWIZZLE_9; |
if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17) |
args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10; |
drm_gem_object_unreference(&obj->base); |
mutex_unlock(&dev->struct_mutex); |
return 0; |
} |
/** |
* Swap every 64 bytes of this page around, to account for it having a new |
* bit 17 of its physical address and therefore being interpreted differently |
* by the GPU. |
*/ |
static void |
i915_gem_swizzle_page(struct page *page) |
{ |
char temp[64]; |
char *vaddr; |
int i; |
vaddr = kmap(page); |
for (i = 0; i < PAGE_SIZE; i += 128) { |
memcpy(temp, &vaddr[i], 64); |
memcpy(&vaddr[i], &vaddr[i + 64], 64); |
memcpy(&vaddr[i + 64], temp, 64); |
} |
kunmap(page); |
} |
void |
i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj) |
{ |
struct drm_device *dev = obj->base.dev; |
drm_i915_private_t *dev_priv = dev->dev_private; |
int page_count = obj->base.size >> PAGE_SHIFT; |
int i; |
if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17) |
return; |
if (obj->bit_17 == NULL) |
return; |
for (i = 0; i < page_count; i++) { |
char new_bit_17 = page_to_phys(obj->pages[i]) >> 17; |
if ((new_bit_17 & 0x1) != |
(test_bit(i, obj->bit_17) != 0)) { |
i915_gem_swizzle_page(obj->pages[i]); |
set_page_dirty(obj->pages[i]); |
} |
} |
} |
void |
i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj) |
{ |
struct drm_device *dev = obj->base.dev; |
drm_i915_private_t *dev_priv = dev->dev_private; |
int page_count = obj->base.size >> PAGE_SHIFT; |
int i; |
if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17) |
return; |
if (obj->bit_17 == NULL) { |
obj->bit_17 = kmalloc(BITS_TO_LONGS(page_count) * |
sizeof(long), GFP_KERNEL); |
if (obj->bit_17 == NULL) { |
DRM_ERROR("Failed to allocate memory for bit 17 " |
"record\n"); |
return; |
} |
} |
for (i = 0; i < page_count; i++) { |
if (page_to_phys(obj->pages[i]) & (1 << 17)) |
__set_bit(i, obj->bit_17); |
else |
__clear_bit(i, obj->bit_17); |
} |
} |
#endif |
/drivers/video/drm/i915/intel_drv.h |
---|
0,0 → 1,383 |
/* |
* Copyright (c) 2006 Dave Airlie <airlied@linux.ie> |
* Copyright (c) 2007-2008 Intel Corporation |
* Jesse Barnes <jesse.barnes@intel.com> |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the "Software"), |
* to deal in the Software without restriction, including without limitation |
* the rights to use, copy, modify, merge, publish, distribute, sublicense, |
* and/or sell copies of the Software, and to permit persons to whom the |
* Software is furnished to do so, subject to the following conditions: |
* |
* The above copyright notice and this permission notice (including the next |
* paragraph) shall be included in all copies or substantial portions of the |
* Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
* IN THE SOFTWARE. |
*/ |
#ifndef __INTEL_DRV_H__ |
#define __INTEL_DRV_H__ |
#include <linux/i2c.h> |
#include "i915_drv.h" |
#include "drm_crtc.h" |
#include "drm_crtc_helper.h" |
#include "drm_fb_helper.h" |
#define _wait_for(COND, MS, W) ({ \ |
unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \ |
int ret__ = 0; \ |
while (! (COND)) { \ |
if (time_after(jiffies, timeout__)) { \ |
ret__ = -ETIMEDOUT; \ |
break; \ |
} \ |
if (W && !(/*in_atomic()||*/ in_dbg_master())) msleep(W); \ |
} \ |
ret__; \ |
}) |
#define wait_for(COND, MS) _wait_for(COND, MS, 1) |
#define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0) |
#define MSLEEP(x) do { \ |
if (in_dbg_master()) \ |
mdelay(x); \ |
else \ |
msleep(x); \ |
} while(0) |
#define KHz(x) (1000*x) |
#define MHz(x) KHz(1000*x) |
/* |
* Display related stuff |
*/ |
/* store information about an Ixxx DVO */ |
/* The i830->i865 use multiple DVOs with multiple i2cs */ |
/* the i915, i945 have a single sDVO i2c bus - which is different */ |
#define MAX_OUTPUTS 6 |
/* maximum connectors per crtcs in the mode set */ |
#define INTELFB_CONN_LIMIT 4 |
#define INTEL_I2C_BUS_DVO 1 |
#define INTEL_I2C_BUS_SDVO 2 |
/* these are outputs from the chip - integrated only |
external chips are via DVO or SDVO output */ |
#define INTEL_OUTPUT_UNUSED 0 |
#define INTEL_OUTPUT_ANALOG 1 |
#define INTEL_OUTPUT_DVO 2 |
#define INTEL_OUTPUT_SDVO 3 |
#define INTEL_OUTPUT_LVDS 4 |
#define INTEL_OUTPUT_TVOUT 5 |
#define INTEL_OUTPUT_HDMI 6 |
#define INTEL_OUTPUT_DISPLAYPORT 7 |
#define INTEL_OUTPUT_EDP 8 |
/* Intel Pipe Clone Bit */ |
#define INTEL_HDMIB_CLONE_BIT 1 |
#define INTEL_HDMIC_CLONE_BIT 2 |
#define INTEL_HDMID_CLONE_BIT 3 |
#define INTEL_HDMIE_CLONE_BIT 4 |
#define INTEL_HDMIF_CLONE_BIT 5 |
#define INTEL_SDVO_NON_TV_CLONE_BIT 6 |
#define INTEL_SDVO_TV_CLONE_BIT 7 |
#define INTEL_SDVO_LVDS_CLONE_BIT 8 |
#define INTEL_ANALOG_CLONE_BIT 9 |
#define INTEL_TV_CLONE_BIT 10 |
#define INTEL_DP_B_CLONE_BIT 11 |
#define INTEL_DP_C_CLONE_BIT 12 |
#define INTEL_DP_D_CLONE_BIT 13 |
#define INTEL_LVDS_CLONE_BIT 14 |
#define INTEL_DVO_TMDS_CLONE_BIT 15 |
#define INTEL_DVO_LVDS_CLONE_BIT 16 |
#define INTEL_EDP_CLONE_BIT 17 |
#define INTEL_DVO_CHIP_NONE 0 |
#define INTEL_DVO_CHIP_LVDS 1 |
#define INTEL_DVO_CHIP_TMDS 2 |
#define INTEL_DVO_CHIP_TVOUT 4 |
/* drm_display_mode->private_flags */ |
#define INTEL_MODE_PIXEL_MULTIPLIER_SHIFT (0x0) |
#define INTEL_MODE_PIXEL_MULTIPLIER_MASK (0xf << INTEL_MODE_PIXEL_MULTIPLIER_SHIFT) |
static inline void |
intel_mode_set_pixel_multiplier(struct drm_display_mode *mode, |
int multiplier) |
{ |
mode->clock *= multiplier; |
mode->private_flags |= multiplier; |
} |
static inline int |
intel_mode_get_pixel_multiplier(const struct drm_display_mode *mode) |
{ |
return (mode->private_flags & INTEL_MODE_PIXEL_MULTIPLIER_MASK) >> INTEL_MODE_PIXEL_MULTIPLIER_SHIFT; |
} |
struct intel_framebuffer { |
struct drm_framebuffer base; |
struct drm_i915_gem_object *obj; |
}; |
struct intel_fbdev { |
struct drm_fb_helper helper; |
struct intel_framebuffer ifb; |
struct list_head fbdev_list; |
struct drm_display_mode *our_mode; |
}; |
struct intel_encoder { |
struct drm_encoder base; |
int type; |
bool needs_tv_clock; |
void (*hot_plug)(struct intel_encoder *); |
int crtc_mask; |
int clone_mask; |
}; |
struct intel_connector { |
struct drm_connector base; |
struct intel_encoder *encoder; |
}; |
struct intel_crtc { |
struct drm_crtc base; |
enum pipe pipe; |
enum plane plane; |
u8 lut_r[256], lut_g[256], lut_b[256]; |
int dpms_mode; |
bool active; /* is the crtc on? independent of the dpms mode */ |
bool busy; /* is scanout buffer being updated frequently? */ |
struct timer_list idle_timer; |
bool lowfreq_avail; |
struct intel_overlay *overlay; |
struct intel_unpin_work *unpin_work; |
int fdi_lanes; |
struct drm_i915_gem_object *cursor_bo; |
uint32_t cursor_addr; |
int16_t cursor_x, cursor_y; |
int16_t cursor_width, cursor_height; |
bool cursor_visible; |
unsigned int bpp; |
}; |
#define to_intel_crtc(x) container_of(x, struct intel_crtc, base) |
#define to_intel_connector(x) container_of(x, struct intel_connector, base) |
#define to_intel_encoder(x) container_of(x, struct intel_encoder, base) |
#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) |
#define DIP_HEADER_SIZE 5 |
#define DIP_TYPE_AVI 0x82 |
#define DIP_VERSION_AVI 0x2 |
#define DIP_LEN_AVI 13 |
#define DIP_TYPE_SPD 0x3 |
#define DIP_VERSION_SPD 0x1 |
#define DIP_LEN_SPD 25 |
#define DIP_SPD_UNKNOWN 0 |
#define DIP_SPD_DSTB 0x1 |
#define DIP_SPD_DVDP 0x2 |
#define DIP_SPD_DVHS 0x3 |
#define DIP_SPD_HDDVR 0x4 |
#define DIP_SPD_DVC 0x5 |
#define DIP_SPD_DSC 0x6 |
#define DIP_SPD_VCD 0x7 |
#define DIP_SPD_GAME 0x8 |
#define DIP_SPD_PC 0x9 |
#define DIP_SPD_BD 0xa |
#define DIP_SPD_SCD 0xb |
struct dip_infoframe { |
uint8_t type; /* HB0 */ |
uint8_t ver; /* HB1 */ |
uint8_t len; /* HB2 - body len, not including checksum */ |
uint8_t ecc; /* Header ECC */ |
uint8_t checksum; /* PB0 */ |
union { |
struct { |
/* PB1 - Y 6:5, A 4:4, B 3:2, S 1:0 */ |
uint8_t Y_A_B_S; |
/* PB2 - C 7:6, M 5:4, R 3:0 */ |
uint8_t C_M_R; |
/* PB3 - ITC 7:7, EC 6:4, Q 3:2, SC 1:0 */ |
uint8_t ITC_EC_Q_SC; |
/* PB4 - VIC 6:0 */ |
uint8_t VIC; |
/* PB5 - PR 3:0 */ |
uint8_t PR; |
/* PB6 to PB13 */ |
uint16_t top_bar_end; |
uint16_t bottom_bar_start; |
uint16_t left_bar_end; |
uint16_t right_bar_start; |
} avi; |
struct { |
uint8_t vn[8]; |
uint8_t pd[16]; |
uint8_t sdi; |
} spd; |
uint8_t payload[27]; |
} __attribute__ ((packed)) body; |
} __attribute__((packed)); |
static inline struct drm_crtc * |
intel_get_crtc_for_pipe(struct drm_device *dev, int pipe) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
return dev_priv->pipe_to_crtc_mapping[pipe]; |
} |
static inline struct drm_crtc * |
intel_get_crtc_for_plane(struct drm_device *dev, int plane) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
return dev_priv->plane_to_crtc_mapping[plane]; |
} |
struct intel_unpin_work { |
// struct work_struct work; |
struct drm_device *dev; |
struct drm_i915_gem_object *old_fb_obj; |
struct drm_i915_gem_object *pending_flip_obj; |
struct drm_pending_vblank_event *event; |
int pending; |
bool enable_stall_check; |
}; |
struct intel_fbc_work { |
// struct delayed_work work; |
struct drm_crtc *crtc; |
struct drm_framebuffer *fb; |
int interval; |
}; |
int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter); |
extern bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus); |
extern void intel_attach_force_audio_property(struct drm_connector *connector); |
extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector); |
extern void intel_crt_init(struct drm_device *dev); |
extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg); |
void intel_dip_infoframe_csum(struct dip_infoframe *avi_if); |
extern bool intel_sdvo_init(struct drm_device *dev, int output_device); |
extern void intel_dvo_init(struct drm_device *dev); |
extern void intel_tv_init(struct drm_device *dev); |
extern void intel_mark_busy(struct drm_device *dev, |
struct drm_i915_gem_object *obj); |
extern bool intel_lvds_init(struct drm_device *dev); |
extern void intel_dp_init(struct drm_device *dev, int dp_reg); |
void |
intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, |
struct drm_display_mode *adjusted_mode); |
extern bool intel_dpd_is_edp(struct drm_device *dev); |
extern void intel_edp_link_config (struct intel_encoder *, int *, int *); |
extern bool intel_encoder_is_pch_edp(struct drm_encoder *encoder); |
/* intel_panel.c */ |
extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, |
struct drm_display_mode *adjusted_mode); |
extern void intel_pch_panel_fitting(struct drm_device *dev, |
int fitting_mode, |
struct drm_display_mode *mode, |
struct drm_display_mode *adjusted_mode); |
extern u32 intel_panel_get_max_backlight(struct drm_device *dev); |
extern u32 intel_panel_get_backlight(struct drm_device *dev); |
extern void intel_panel_set_backlight(struct drm_device *dev, u32 level); |
extern int intel_panel_setup_backlight(struct drm_device *dev); |
extern void intel_panel_enable_backlight(struct drm_device *dev); |
extern void intel_panel_disable_backlight(struct drm_device *dev); |
extern void intel_panel_destroy_backlight(struct drm_device *dev); |
extern enum drm_connector_status intel_panel_detect(struct drm_device *dev); |
extern void intel_crtc_load_lut(struct drm_crtc *crtc); |
extern void intel_encoder_prepare (struct drm_encoder *encoder); |
extern void intel_encoder_commit (struct drm_encoder *encoder); |
extern void intel_encoder_destroy(struct drm_encoder *encoder); |
static inline struct intel_encoder *intel_attached_encoder(struct drm_connector *connector) |
{ |
return to_intel_connector(connector)->encoder; |
} |
extern void intel_connector_attach_encoder(struct intel_connector *connector, |
struct intel_encoder *encoder); |
extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector); |
extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, |
struct drm_crtc *crtc); |
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern void intel_wait_for_vblank(struct drm_device *dev, int pipe); |
extern void intel_wait_for_pipe_off(struct drm_device *dev, int pipe); |
struct intel_load_detect_pipe { |
struct drm_framebuffer *release_fb; |
bool load_detect_temp; |
int dpms_mode; |
}; |
extern bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, |
struct drm_connector *connector, |
struct drm_display_mode *mode, |
struct intel_load_detect_pipe *old); |
extern void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, |
struct drm_connector *connector, |
struct intel_load_detect_pipe *old); |
extern struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB); |
extern int intel_sdvo_supports_hotplug(struct drm_connector *connector); |
extern void intel_sdvo_set_hotplug(struct drm_connector *connector, int enable); |
extern void intelfb_restore(void); |
extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, |
u16 blue, int regno); |
extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, |
u16 *blue, int regno); |
extern void intel_enable_clock_gating(struct drm_device *dev); |
extern void ironlake_enable_drps(struct drm_device *dev); |
extern void ironlake_disable_drps(struct drm_device *dev); |
extern void gen6_enable_rps(struct drm_i915_private *dev_priv); |
extern void gen6_update_ring_freq(struct drm_i915_private *dev_priv); |
extern void gen6_disable_rps(struct drm_device *dev); |
extern void intel_init_emon(struct drm_device *dev); |
extern int intel_pin_and_fence_fb_obj(struct drm_device *dev, |
struct drm_i915_gem_object *obj, |
struct intel_ring_buffer *pipelined); |
extern int intel_framebuffer_init(struct drm_device *dev, |
struct intel_framebuffer *ifb, |
struct drm_mode_fb_cmd *mode_cmd, |
struct drm_i915_gem_object *obj); |
extern int intel_fbdev_init(struct drm_device *dev); |
extern void intel_fbdev_fini(struct drm_device *dev); |
extern void intel_prepare_page_flip(struct drm_device *dev, int plane); |
extern void intel_finish_page_flip(struct drm_device *dev, int pipe); |
extern void intel_finish_page_flip_plane(struct drm_device *dev, int plane); |
extern void intel_setup_overlay(struct drm_device *dev); |
extern void intel_cleanup_overlay(struct drm_device *dev); |
extern int intel_overlay_switch_off(struct intel_overlay *overlay); |
extern int intel_overlay_put_image(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int intel_overlay_attrs(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern void intel_fb_output_poll_changed(struct drm_device *dev); |
extern void intel_fb_restore_mode(struct drm_device *dev); |
extern void intel_init_clock_gating(struct drm_device *dev); |
#endif /* __INTEL_DRV_H__ */ |
/drivers/video/drm/i915/intel_i2c.c |
---|
0,0 → 1,601 |
/* |
* Copyright (c) 2006 Dave Airlie <airlied@linux.ie> |
* Copyright © 2006-2008,2010 Intel Corporation |
* Jesse Barnes <jesse.barnes@intel.com> |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the "Software"), |
* to deal in the Software without restriction, including without limitation |
* the rights to use, copy, modify, merge, publish, distribute, sublicense, |
* and/or sell copies of the Software, and to permit persons to whom the |
* Software is furnished to do so, subject to the following conditions: |
* |
* The above copyright notice and this permission notice (including the next |
* paragraph) shall be included in all copies or substantial portions of the |
* Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |
* DEALINGS IN THE SOFTWARE. |
* |
* Authors: |
* Eric Anholt <eric@anholt.net> |
* Chris Wilson <chris@chris-wilson.co.uk> |
*/ |
#include <linux/i2c.h> |
#include <linux/i2c-algo-bit.h> |
#include "drmP.h" |
#include "drm.h" |
#include "intel_drv.h" |
//#include "i915_drm.h" |
#include "i915_drv.h" |
#include <syscall.h> |
#define MSEC_PER_SEC 1000L |
#define USEC_PER_MSEC 1000L |
#define NSEC_PER_USEC 1000L |
#define NSEC_PER_MSEC 1000000L |
#define USEC_PER_SEC 1000000L |
#define NSEC_PER_SEC 1000000000L |
#define FSEC_PER_SEC 1000000000000000L |
#define HZ_TO_MSEC_MUL32 0xA0000000 |
#define HZ_TO_MSEC_ADJ32 0x0 |
#define HZ_TO_MSEC_SHR32 28 |
#define HZ_TO_MSEC_MUL64 0xA000000000000000 |
#define HZ_TO_MSEC_ADJ64 0x0 |
#define HZ_TO_MSEC_SHR64 60 |
#define MSEC_TO_HZ_MUL32 0xCCCCCCCD |
#define MSEC_TO_HZ_ADJ32 0x733333333 |
#define MSEC_TO_HZ_SHR32 35 |
#define MSEC_TO_HZ_MUL64 0xCCCCCCCCCCCCCCCD |
#define MSEC_TO_HZ_ADJ64 0x73333333333333333 |
#define MSEC_TO_HZ_SHR64 67 |
#define HZ_TO_MSEC_NUM 10 |
#define HZ_TO_MSEC_DEN 1 |
#define MSEC_TO_HZ_NUM 1 |
#define MSEC_TO_HZ_DEN 10 |
#define HZ_TO_USEC_MUL32 0x9C400000 |
#define HZ_TO_USEC_ADJ32 0x0 |
#define HZ_TO_USEC_SHR32 18 |
#define HZ_TO_USEC_MUL64 0x9C40000000000000 |
#define HZ_TO_USEC_ADJ64 0x0 |
#define HZ_TO_USEC_SHR64 50 |
#define USEC_TO_HZ_MUL32 0xD1B71759 |
#define USEC_TO_HZ_ADJ32 0x1FFF2E48E8A7 |
#define USEC_TO_HZ_SHR32 45 |
#define USEC_TO_HZ_MUL64 0xD1B71758E219652C |
#define USEC_TO_HZ_ADJ64 0x1FFF2E48E8A71DE69AD4 |
#define USEC_TO_HZ_SHR64 77 |
#define HZ_TO_USEC_NUM 10000 |
#define HZ_TO_USEC_DEN 1 |
#define USEC_TO_HZ_NUM 1 |
#define USEC_TO_HZ_DEN 10000 |
unsigned int inline jiffies_to_usecs(const unsigned long j) |
{ |
#if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ) |
return (USEC_PER_SEC / HZ) * j; |
#elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC) |
return (j + (HZ / USEC_PER_SEC) - 1)/(HZ / USEC_PER_SEC); |
#else |
# if BITS_PER_LONG == 32 |
return (HZ_TO_USEC_MUL32 * j) >> HZ_TO_USEC_SHR32; |
# else |
return (j * HZ_TO_USEC_NUM) / HZ_TO_USEC_DEN; |
# endif |
#endif |
} |
/* |
* When we convert to jiffies then we interpret incoming values |
* the following way: |
* |
* - negative values mean 'infinite timeout' (MAX_JIFFY_OFFSET) |
* |
* - 'too large' values [that would result in larger than |
* MAX_JIFFY_OFFSET values] mean 'infinite timeout' too. |
* |
* - all other values are converted to jiffies by either multiplying |
* the input value by a factor or dividing it with a factor |
* |
* We must also be careful about 32-bit overflows. |
*/ |
unsigned long msecs_to_jiffies(const unsigned int m) |
{ |
/* |
* Negative value, means infinite timeout: |
*/ |
if ((int)m < 0) |
return MAX_JIFFY_OFFSET; |
#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) |
/* |
* HZ is equal to or smaller than 1000, and 1000 is a nice |
* round multiple of HZ, divide with the factor between them, |
* but round upwards: |
*/ |
return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ); |
#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC) |
/* |
* HZ is larger than 1000, and HZ is a nice round multiple of |
* 1000 - simply multiply with the factor between them. |
* |
* But first make sure the multiplication result cannot |
* overflow: |
*/ |
if (m > jiffies_to_msecs(MAX_JIFFY_OFFSET)) |
return MAX_JIFFY_OFFSET; |
return m * (HZ / MSEC_PER_SEC); |
#else |
/* |
* Generic case - multiply, round and divide. But first |
* check that if we are doing a net multiplication, that |
* we wouldn't overflow: |
*/ |
if (HZ > MSEC_PER_SEC && m > jiffies_to_msecs(MAX_JIFFY_OFFSET)) |
return MAX_JIFFY_OFFSET; |
return (MSEC_TO_HZ_MUL32 * m + MSEC_TO_HZ_ADJ32) |
>> MSEC_TO_HZ_SHR32; |
#endif |
} |
unsigned long usecs_to_jiffies(const unsigned int u) |
{ |
if (u > jiffies_to_usecs(MAX_JIFFY_OFFSET)) |
return MAX_JIFFY_OFFSET; |
#if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ) |
return (u + (USEC_PER_SEC / HZ) - 1) / (USEC_PER_SEC / HZ); |
#elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC) |
return u * (HZ / USEC_PER_SEC); |
#else |
return (USEC_TO_HZ_MUL32 * u + USEC_TO_HZ_ADJ32) |
>> USEC_TO_HZ_SHR32; |
#endif |
} |
/* Intel GPIO access functions */ |
#define I2C_RISEFALL_TIME 20 |
static inline struct intel_gmbus * |
to_intel_gmbus(struct i2c_adapter *i2c) |
{ |
return container_of(i2c, struct intel_gmbus, adapter); |
} |
struct intel_gpio { |
struct i2c_adapter adapter; |
struct i2c_algo_bit_data algo; |
struct drm_i915_private *dev_priv; |
u32 reg; |
}; |
void |
intel_i2c_reset(struct drm_device *dev) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
if (HAS_PCH_SPLIT(dev)) |
I915_WRITE(PCH_GMBUS0, 0); |
else |
I915_WRITE(GMBUS0, 0); |
} |
static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable) |
{ |
u32 val; |
/* When using bit bashing for I2C, this bit needs to be set to 1 */ |
if (!IS_PINEVIEW(dev_priv->dev)) |
return; |
val = I915_READ(DSPCLK_GATE_D); |
if (enable) |
val |= DPCUNIT_CLOCK_GATE_DISABLE; |
else |
val &= ~DPCUNIT_CLOCK_GATE_DISABLE; |
I915_WRITE(DSPCLK_GATE_D, val); |
} |
static u32 get_reserved(struct intel_gpio *gpio) |
{ |
struct drm_i915_private *dev_priv = gpio->dev_priv; |
struct drm_device *dev = dev_priv->dev; |
u32 reserved = 0; |
/* On most chips, these bits must be preserved in software. */ |
if (!IS_I830(dev) && !IS_845G(dev)) |
reserved = I915_READ_NOTRACE(gpio->reg) & |
(GPIO_DATA_PULLUP_DISABLE | |
GPIO_CLOCK_PULLUP_DISABLE); |
return reserved; |
} |
static int get_clock(void *data) |
{ |
struct intel_gpio *gpio = data; |
struct drm_i915_private *dev_priv = gpio->dev_priv; |
u32 reserved = get_reserved(gpio); |
I915_WRITE_NOTRACE(gpio->reg, reserved | GPIO_CLOCK_DIR_MASK); |
I915_WRITE_NOTRACE(gpio->reg, reserved); |
return (I915_READ_NOTRACE(gpio->reg) & GPIO_CLOCK_VAL_IN) != 0; |
} |
static int get_data(void *data) |
{ |
struct intel_gpio *gpio = data; |
struct drm_i915_private *dev_priv = gpio->dev_priv; |
u32 reserved = get_reserved(gpio); |
I915_WRITE_NOTRACE(gpio->reg, reserved | GPIO_DATA_DIR_MASK); |
I915_WRITE_NOTRACE(gpio->reg, reserved); |
return (I915_READ_NOTRACE(gpio->reg) & GPIO_DATA_VAL_IN) != 0; |
} |
static void set_clock(void *data, int state_high) |
{ |
struct intel_gpio *gpio = data; |
struct drm_i915_private *dev_priv = gpio->dev_priv; |
u32 reserved = get_reserved(gpio); |
u32 clock_bits; |
if (state_high) |
clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK; |
else |
clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK | |
GPIO_CLOCK_VAL_MASK; |
I915_WRITE_NOTRACE(gpio->reg, reserved | clock_bits); |
POSTING_READ(gpio->reg); |
} |
static void set_data(void *data, int state_high) |
{ |
struct intel_gpio *gpio = data; |
struct drm_i915_private *dev_priv = gpio->dev_priv; |
u32 reserved = get_reserved(gpio); |
u32 data_bits; |
if (state_high) |
data_bits = GPIO_DATA_DIR_IN | GPIO_DATA_DIR_MASK; |
else |
data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK | |
GPIO_DATA_VAL_MASK; |
I915_WRITE_NOTRACE(gpio->reg, reserved | data_bits); |
POSTING_READ(gpio->reg); |
} |
static struct i2c_adapter * |
intel_gpio_create(struct drm_i915_private *dev_priv, u32 pin) |
{ |
static const int map_pin_to_reg[] = { |
0, |
GPIOB, |
GPIOA, |
GPIOC, |
GPIOD, |
GPIOE, |
0, |
GPIOF, |
}; |
struct intel_gpio *gpio; |
ENTER(); |
if (pin >= ARRAY_SIZE(map_pin_to_reg) || !map_pin_to_reg[pin]) |
return NULL; |
gpio = kzalloc(sizeof(struct intel_gpio), GFP_KERNEL); |
if (gpio == NULL) |
return NULL; |
gpio->reg = map_pin_to_reg[pin]; |
if (HAS_PCH_SPLIT(dev_priv->dev)) |
gpio->reg += PCH_GPIOA - GPIOA; |
gpio->dev_priv = dev_priv; |
snprintf(gpio->adapter.name, sizeof(gpio->adapter.name), |
"i915 GPIO%c", "?BACDE?F"[pin]); |
// gpio->adapter.owner = THIS_MODULE; |
gpio->adapter.algo_data = &gpio->algo; |
gpio->adapter.dev.parent = &dev_priv->dev->pdev->dev; |
gpio->algo.setsda = set_data; |
gpio->algo.setscl = set_clock; |
gpio->algo.getsda = get_data; |
gpio->algo.getscl = get_clock; |
gpio->algo.udelay = I2C_RISEFALL_TIME; |
gpio->algo.timeout = usecs_to_jiffies(2200); |
gpio->algo.data = gpio; |
if (i2c_bit_add_bus(&gpio->adapter)) |
goto out_free; |
LEAVE(); |
return &gpio->adapter; |
out_free: |
kfree(gpio); |
return NULL; |
} |
static int |
intel_i2c_quirk_xfer(struct drm_i915_private *dev_priv, |
struct i2c_adapter *adapter, |
struct i2c_msg *msgs, |
int num) |
{ |
struct intel_gpio *gpio = container_of(adapter, |
struct intel_gpio, |
adapter); |
int ret; |
intel_i2c_reset(dev_priv->dev); |
intel_i2c_quirk_set(dev_priv, true); |
set_data(gpio, 1); |
set_clock(gpio, 1); |
udelay(I2C_RISEFALL_TIME); |
ret = adapter->algo->master_xfer(adapter, msgs, num); |
set_data(gpio, 1); |
set_clock(gpio, 1); |
intel_i2c_quirk_set(dev_priv, false); |
return ret; |
} |
static int |
gmbus_xfer(struct i2c_adapter *adapter, |
struct i2c_msg *msgs, |
int num) |
{ |
struct intel_gmbus *bus = container_of(adapter, |
struct intel_gmbus, |
adapter); |
struct drm_i915_private *dev_priv = adapter->algo_data; |
int i, reg_offset; |
if (bus->force_bit) |
return intel_i2c_quirk_xfer(dev_priv, |
bus->force_bit, msgs, num); |
reg_offset = HAS_PCH_SPLIT(dev_priv->dev) ? PCH_GMBUS0 - GMBUS0 : 0; |
I915_WRITE(GMBUS0 + reg_offset, bus->reg0); |
for (i = 0; i < num; i++) { |
u16 len = msgs[i].len; |
u8 *buf = msgs[i].buf; |
if (msgs[i].flags & I2C_M_RD) { |
I915_WRITE(GMBUS1 + reg_offset, |
GMBUS_CYCLE_WAIT | (i + 1 == num ? GMBUS_CYCLE_STOP : 0) | |
(len << GMBUS_BYTE_COUNT_SHIFT) | |
(msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) | |
GMBUS_SLAVE_READ | GMBUS_SW_RDY); |
POSTING_READ(GMBUS2+reg_offset); |
do { |
u32 val, loop = 0; |
if (wait_for(I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50)) |
goto timeout; |
if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER) |
goto clear_err; |
val = I915_READ(GMBUS3 + reg_offset); |
do { |
*buf++ = val & 0xff; |
val >>= 8; |
} while (--len && ++loop < 4); |
} while (len); |
} else { |
u32 val, loop; |
val = loop = 0; |
do { |
val |= *buf++ << (8 * loop); |
} while (--len && ++loop < 4); |
I915_WRITE(GMBUS3 + reg_offset, val); |
I915_WRITE(GMBUS1 + reg_offset, |
(i + 1 == num ? GMBUS_CYCLE_STOP : GMBUS_CYCLE_WAIT) | |
(msgs[i].len << GMBUS_BYTE_COUNT_SHIFT) | |
(msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) | |
GMBUS_SLAVE_WRITE | GMBUS_SW_RDY); |
POSTING_READ(GMBUS2+reg_offset); |
while (len) { |
if (wait_for(I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50)) |
goto timeout; |
if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER) |
goto clear_err; |
val = loop = 0; |
do { |
val |= *buf++ << (8 * loop); |
} while (--len && ++loop < 4); |
I915_WRITE(GMBUS3 + reg_offset, val); |
POSTING_READ(GMBUS2+reg_offset); |
} |
} |
if (i + 1 < num && wait_for(I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_WAIT_PHASE), 50)) |
goto timeout; |
if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER) |
goto clear_err; |
} |
goto done; |
clear_err: |
/* Toggle the Software Clear Interrupt bit. This has the effect |
* of resetting the GMBUS controller and so clearing the |
* BUS_ERROR raised by the slave's NAK. |
*/ |
I915_WRITE(GMBUS1 + reg_offset, GMBUS_SW_CLR_INT); |
I915_WRITE(GMBUS1 + reg_offset, 0); |
done: |
/* Mark the GMBUS interface as disabled. We will re-enable it at the |
* start of the next xfer, till then let it sleep. |
*/ |
I915_WRITE(GMBUS0 + reg_offset, 0); |
return i; |
timeout: |
DRM_INFO("GMBUS timed out, falling back to bit banging on pin %d [%s]\n", |
bus->reg0 & 0xff, bus->adapter.name); |
I915_WRITE(GMBUS0 + reg_offset, 0); |
/* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */ |
bus->force_bit = intel_gpio_create(dev_priv, bus->reg0 & 0xff); |
if (!bus->force_bit) |
return -ENOMEM; |
return intel_i2c_quirk_xfer(dev_priv, bus->force_bit, msgs, num); |
} |
static u32 gmbus_func(struct i2c_adapter *adapter) |
{ |
struct intel_gmbus *bus = container_of(adapter, |
struct intel_gmbus, |
adapter); |
if (bus->force_bit) |
bus->force_bit->algo->functionality(bus->force_bit); |
return (I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | |
/* I2C_FUNC_10BIT_ADDR | */ |
I2C_FUNC_SMBUS_READ_BLOCK_DATA | |
I2C_FUNC_SMBUS_BLOCK_PROC_CALL); |
} |
static const struct i2c_algorithm gmbus_algorithm = { |
.master_xfer = gmbus_xfer, |
.functionality = gmbus_func |
}; |
/** |
* intel_gmbus_setup - instantiate all Intel i2c GMBuses |
* @dev: DRM device |
*/ |
int intel_setup_gmbus(struct drm_device *dev) |
{ |
static const char *names[GMBUS_NUM_PORTS] = { |
"disabled", |
"ssc", |
"vga", |
"panel", |
"dpc", |
"dpb", |
"reserved", |
"dpd", |
}; |
struct drm_i915_private *dev_priv = dev->dev_private; |
int ret, i; |
ENTER(); |
dev_priv->gmbus = kcalloc(sizeof(struct intel_gmbus), GMBUS_NUM_PORTS, |
GFP_KERNEL); |
if (dev_priv->gmbus == NULL) |
return -ENOMEM; |
for (i = 0; i < GMBUS_NUM_PORTS; i++) { |
struct intel_gmbus *bus = &dev_priv->gmbus[i]; |
// bus->adapter.owner = THIS_MODULE; |
bus->adapter.class = I2C_CLASS_DDC; |
snprintf(bus->adapter.name, |
sizeof(bus->adapter.name), |
"i915 gmbus %s", |
names[i]); |
bus->adapter.dev.parent = &dev->pdev->dev; |
bus->adapter.algo_data = dev_priv; |
bus->adapter.algo = &gmbus_algorithm; |
// ret = i2c_add_adapter(&bus->adapter); |
// if (ret) |
// goto err; |
/* By default use a conservative clock rate */ |
bus->reg0 = i | GMBUS_RATE_100KHZ; |
/* XXX force bit banging until GMBUS is fully debugged */ |
bus->force_bit = intel_gpio_create(dev_priv, i); |
} |
intel_i2c_reset(dev_priv->dev); |
LEAVE(); |
return 0; |
err: |
// while (--i) { |
// struct intel_gmbus *bus = &dev_priv->gmbus[i]; |
// i2c_del_adapter(&bus->adapter); |
// } |
kfree(dev_priv->gmbus); |
dev_priv->gmbus = NULL; |
return ret; |
} |
void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed) |
{ |
struct intel_gmbus *bus = to_intel_gmbus(adapter); |
/* speed: |
* 0x0 = 100 KHz |
* 0x1 = 50 KHz |
* 0x2 = 400 KHz |
* 0x3 = 1000 Khz |
*/ |
bus->reg0 = (bus->reg0 & ~(0x3 << 8)) | (speed << 8); |
} |
void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit) |
{ |
struct intel_gmbus *bus = to_intel_gmbus(adapter); |
if (force_bit) { |
if (bus->force_bit == NULL) { |
struct drm_i915_private *dev_priv = adapter->algo_data; |
bus->force_bit = intel_gpio_create(dev_priv, |
bus->reg0 & 0xff); |
} |
} else { |
if (bus->force_bit) { |
// i2c_del_adapter(bus->force_bit); |
kfree(bus->force_bit); |
bus->force_bit = NULL; |
} |
} |
} |
void intel_teardown_gmbus(struct drm_device *dev) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
int i; |
if (dev_priv->gmbus == NULL) |
return; |
for (i = 0; i < GMBUS_NUM_PORTS; i++) { |
struct intel_gmbus *bus = &dev_priv->gmbus[i]; |
if (bus->force_bit) { |
// i2c_del_adapter(bus->force_bit); |
kfree(bus->force_bit); |
} |
// i2c_del_adapter(&bus->adapter); |
} |
kfree(dev_priv->gmbus); |
dev_priv->gmbus = NULL; |
} |
/drivers/video/drm/i915/intel_ringbuffer.h |
---|
0,0 → 1,203 |
#ifndef _INTEL_RINGBUFFER_H_ |
#define _INTEL_RINGBUFFER_H_ |
enum { |
RCS = 0x0, |
VCS, |
BCS, |
I915_NUM_RINGS, |
}; |
struct intel_hw_status_page { |
u32 __iomem *page_addr; |
unsigned int gfx_addr; |
struct drm_i915_gem_object *obj; |
}; |
#define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base)) |
#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val) |
#define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base)) |
#define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val) |
#define I915_READ_HEAD(ring) I915_READ(RING_HEAD((ring)->mmio_base)) |
#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val) |
#define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base)) |
#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val) |
#define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base)) |
#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val) |
#define I915_READ_NOPID(ring) I915_READ(RING_NOPID((ring)->mmio_base)) |
#define I915_READ_SYNC_0(ring) I915_READ(RING_SYNC_0((ring)->mmio_base)) |
#define I915_READ_SYNC_1(ring) I915_READ(RING_SYNC_1((ring)->mmio_base)) |
struct intel_ring_buffer { |
const char *name; |
enum intel_ring_id { |
RING_RENDER = 0x1, |
RING_BSD = 0x2, |
RING_BLT = 0x4, |
} id; |
u32 mmio_base; |
void __iomem *virtual_start; |
struct drm_device *dev; |
struct drm_i915_gem_object *obj; |
u32 head; |
u32 tail; |
int space; |
int size; |
int effective_size; |
struct intel_hw_status_page status_page; |
spinlock_t irq_lock; |
u32 irq_refcount; |
u32 irq_mask; |
u32 irq_seqno; /* last seq seem at irq time */ |
u32 trace_irq_seqno; |
u32 waiting_seqno; |
u32 sync_seqno[I915_NUM_RINGS-1]; |
bool __must_check (*irq_get)(struct intel_ring_buffer *ring); |
void (*irq_put)(struct intel_ring_buffer *ring); |
int (*init)(struct intel_ring_buffer *ring); |
void (*write_tail)(struct intel_ring_buffer *ring, |
u32 value); |
int __must_check (*flush)(struct intel_ring_buffer *ring, |
u32 invalidate_domains, |
u32 flush_domains); |
int (*add_request)(struct intel_ring_buffer *ring, |
u32 *seqno); |
u32 (*get_seqno)(struct intel_ring_buffer *ring); |
int (*dispatch_execbuffer)(struct intel_ring_buffer *ring, |
u32 offset, u32 length); |
void (*cleanup)(struct intel_ring_buffer *ring); |
/** |
* List of objects currently involved in rendering from the |
* ringbuffer. |
* |
* Includes buffers having the contents of their GPU caches |
* flushed, not necessarily primitives. last_rendering_seqno |
* represents when the rendering involved will be completed. |
* |
* A reference is held on the buffer while on this list. |
*/ |
struct list_head active_list; |
/** |
* List of breadcrumbs associated with GPU requests currently |
* outstanding. |
*/ |
struct list_head request_list; |
/** |
* List of objects currently pending a GPU write flush. |
* |
* All elements on this list will belong to either the |
* active_list or flushing_list, last_rendering_seqno can |
* be used to differentiate between the two elements. |
*/ |
struct list_head gpu_write_list; |
/** |
* Do we have some not yet emitted requests outstanding? |
*/ |
u32 outstanding_lazy_request; |
// wait_queue_head_t irq_queue; |
// drm_local_map_t map; |
void *private; |
}; |
static inline u32 |
intel_ring_sync_index(struct intel_ring_buffer *ring, |
struct intel_ring_buffer *other) |
{ |
int idx; |
/* |
* cs -> 0 = vcs, 1 = bcs |
* vcs -> 0 = bcs, 1 = cs, |
* bcs -> 0 = cs, 1 = vcs. |
*/ |
idx = (other - ring) - 1; |
if (idx < 0) |
idx += I915_NUM_RINGS; |
return idx; |
} |
static inline u32 |
intel_read_status_page(struct intel_ring_buffer *ring, |
int reg) |
{ |
return ioread32(ring->status_page.page_addr + reg); |
} |
/** |
* Reads a dword out of the status page, which is written to from the command |
* queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or |
* MI_STORE_DATA_IMM. |
* |
* The following dwords have a reserved meaning: |
* 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes. |
* 0x04: ring 0 head pointer |
* 0x05: ring 1 head pointer (915-class) |
* 0x06: ring 2 head pointer (915-class) |
* 0x10-0x1b: Context status DWords (GM45) |
* 0x1f: Last written status offset. (GM45) |
* |
* The area from dword 0x20 to 0x3ff is available for driver usage. |
*/ |
#define READ_HWSP(dev_priv, reg) intel_read_status_page(LP_RING(dev_priv), reg) |
#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX) |
#define I915_GEM_HWS_INDEX 0x20 |
#define I915_BREADCRUMB_INDEX 0x21 |
void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring); |
int __must_check intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n); |
static inline int intel_wait_ring_idle(struct intel_ring_buffer *ring) |
{ |
return intel_wait_ring_buffer(ring, ring->size - 8); |
} |
int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n); |
static inline void intel_ring_emit(struct intel_ring_buffer *ring, |
u32 data) |
{ |
iowrite32(data, ring->virtual_start + ring->tail); |
ring->tail += 4; |
} |
void intel_ring_advance(struct intel_ring_buffer *ring); |
u32 intel_ring_get_seqno(struct intel_ring_buffer *ring); |
int intel_ring_sync(struct intel_ring_buffer *ring, |
struct intel_ring_buffer *to, |
u32 seqno); |
int intel_init_render_ring_buffer(struct drm_device *dev); |
int intel_init_bsd_ring_buffer(struct drm_device *dev); |
int intel_init_blt_ring_buffer(struct drm_device *dev); |
u32 intel_ring_get_active_head(struct intel_ring_buffer *ring); |
void intel_ring_setup_status_page(struct intel_ring_buffer *ring); |
static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno) |
{ |
if (ring->trace_irq_seqno == 0 && ring->irq_get(ring)) |
ring->trace_irq_seqno = seqno; |
} |
/* DRI warts */ |
int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size); |
#endif /* _INTEL_RINGBUFFER_H_ */ |
/drivers/video/drm/i915/main.c |
---|
53,6 → 53,7 |
#define PCI_CLASS_REVISION 0x08 |
#define PCI_CLASS_DISPLAY_VGA 0x0300 |
#define PCI_CLASS_BRIDGE_HOST 0x0600 |
#define PCI_CLASS_BRIDGE_ISA 0x0601 |
int pci_scan_filter(u32_t id, u32_t busnr, u32_t devfn) |
{ |
69,7 → 70,8 |
class >>= 16; |
if( (class == PCI_CLASS_DISPLAY_VGA) || |
(class == PCI_CLASS_BRIDGE_HOST) ) |
(class == PCI_CLASS_BRIDGE_HOST) || |
(class == PCI_CLASS_BRIDGE_ISA)) |
ret = 1; |
} |
return ret; |
/drivers/video/drm/i915/pci.c |
---|
504,7 → 504,7 |
dbgprintf("PCI device %x:%x bus:%x devfn:%x\n", |
dev->pci_dev.vendor, |
dev->pci_dev.device, |
dev->pci_dev.bus, |
dev->pci_dev.busnr, |
dev->pci_dev.devfn); |
} |
569,3 → 569,97 |
} |
return NULL; |
}; |
struct pci_dev * pci_get_bus_and_slot(unsigned int bus, unsigned int devfn) |
{ |
pci_dev_t *dev; |
for(dev = (pci_dev_t*)devices.next; |
&dev->link != &devices; |
dev = (pci_dev_t*)dev->link.next) |
{ |
if ( dev->pci_dev.busnr == bus && dev->pci_dev.devfn == devfn) |
return &dev->pci_dev; |
} |
return NULL; |
} |
struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from) |
{ |
pci_dev_t *dev; |
dev = (pci_dev_t*)devices.next; |
if(from != NULL) |
{ |
for(; &dev->link != &devices; |
dev = (pci_dev_t*)dev->link.next) |
{ |
if( &dev->pci_dev == from) |
{ |
dev = (pci_dev_t*)dev->link.next; |
break; |
}; |
} |
}; |
for(; &dev->link != &devices; |
dev = (pci_dev_t*)dev->link.next) |
{ |
if( dev->pci_dev.class == class) |
{ |
return &dev->pci_dev; |
} |
} |
return NULL; |
} |
#define PIO_OFFSET 0x10000UL |
#define PIO_MASK 0x0ffffUL |
#define PIO_RESERVED 0x40000UL |
#define IO_COND(addr, is_pio, is_mmio) do { \ |
unsigned long port = (unsigned long __force)addr; \ |
if (port >= PIO_RESERVED) { \ |
is_mmio; \ |
} else if (port > PIO_OFFSET) { \ |
port &= PIO_MASK; \ |
is_pio; \ |
}; \ |
} while (0) |
/* Create a virtual mapping cookie for an IO port range */ |
void __iomem *ioport_map(unsigned long port, unsigned int nr) |
{ |
if (port > PIO_MASK) |
return NULL; |
return (void __iomem *) (unsigned long) (port + PIO_OFFSET); |
} |
void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) |
{ |
resource_size_t start = pci_resource_start(dev, bar); |
resource_size_t len = pci_resource_len(dev, bar); |
unsigned long flags = pci_resource_flags(dev, bar); |
if (!len || !start) |
return NULL; |
if (maxlen && len > maxlen) |
len = maxlen; |
if (flags & IORESOURCE_IO) |
return ioport_map(start, len); |
if (flags & IORESOURCE_MEM) { |
return ioremap(start, len); |
} |
/* What? */ |
return NULL; |
} |
void pci_iounmap(struct pci_dev *dev, void __iomem * addr) |
{ |
IO_COND(addr, /* nothing */, iounmap(addr)); |
} |