1,5 → 1,6 |
/* |
* Copyright © 2010 Daniel Vetter |
* Copyright © 2011-2014 Intel Corporation |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the "Software"), |
35,58 → 36,75 |
#include "i915_trace.h" |
#include "intel_drv.h" |
|
#define GEN6_PPGTT_PD_ENTRIES 512 |
#define I915_PPGTT_PT_ENTRIES (PAGE_SIZE / sizeof(gen6_gtt_pte_t)) |
typedef uint64_t gen8_gtt_pte_t; |
typedef gen8_gtt_pte_t gen8_ppgtt_pde_t; |
static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv); |
static void chv_setup_private_ppat(struct drm_i915_private *dev_priv); |
|
/* PPGTT stuff */ |
#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0)) |
#define HSW_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0x7f0)) |
bool intel_enable_ppgtt(struct drm_device *dev, bool full) |
{ |
if (i915.enable_ppgtt == 0) |
return false; |
|
#define GEN6_PDE_VALID (1 << 0) |
/* gen6+ has bit 11-4 for physical addr bit 39-32 */ |
#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) |
if (i915.enable_ppgtt == 1 && full) |
return false; |
|
#define GEN6_PTE_VALID (1 << 0) |
#define GEN6_PTE_UNCACHED (1 << 1) |
#define HSW_PTE_UNCACHED (0) |
#define GEN6_PTE_CACHE_LLC (2 << 1) |
#define GEN7_PTE_CACHE_L3_LLC (3 << 1) |
#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) |
#define HSW_PTE_ADDR_ENCODE(addr) HSW_GTT_ADDR_ENCODE(addr) |
return true; |
} |
|
/* Cacheability Control is a 4-bit value. The low three bits are stored in * |
* bits 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE. |
*/ |
#define HSW_CACHEABILITY_CONTROL(bits) ((((bits) & 0x7) << 1) | \ |
(((bits) & 0x8) << (11 - 3))) |
#define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2) |
#define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3) |
#define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb) |
#define HSW_WB_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x8) |
#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6) |
#define HSW_WT_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x7) |
static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt) |
{ |
if (enable_ppgtt == 0 || !HAS_ALIASING_PPGTT(dev)) |
return 0; |
|
#define GEN8_PTES_PER_PAGE (PAGE_SIZE / sizeof(gen8_gtt_pte_t)) |
#define GEN8_PDES_PER_PAGE (PAGE_SIZE / sizeof(gen8_ppgtt_pde_t)) |
#define GEN8_LEGACY_PDPS 4 |
if (enable_ppgtt == 1) |
return 1; |
|
#define PPAT_UNCACHED_INDEX (_PAGE_PWT | _PAGE_PCD) |
#define PPAT_CACHED_PDE_INDEX 0 /* WB LLC */ |
#define PPAT_CACHED_INDEX _PAGE_PAT /* WB LLCeLLC */ |
#define PPAT_DISPLAY_ELLC_INDEX _PAGE_PCD /* WT eLLC */ |
if (enable_ppgtt == 2 && HAS_PPGTT(dev)) |
return 2; |
|
#ifdef CONFIG_INTEL_IOMMU |
/* Disable ppgtt on SNB if VT-d is on. */ |
if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) { |
DRM_INFO("Disabling PPGTT because VT-d is on\n"); |
return 0; |
} |
#endif |
|
/* Early VLV doesn't have this */ |
if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && |
dev->pdev->revision < 0xb) { |
DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n"); |
return 0; |
} |
|
return HAS_ALIASING_PPGTT(dev) ? 1 : 0; |
} |
|
|
static void ppgtt_bind_vma(struct i915_vma *vma, |
enum i915_cache_level cache_level, |
u32 flags); |
static void ppgtt_unbind_vma(struct i915_vma *vma); |
static int gen8_ppgtt_enable(struct i915_hw_ppgtt *ppgtt); |
|
static inline gen8_gtt_pte_t gen8_pte_encode(dma_addr_t addr, |
enum i915_cache_level level, |
bool valid) |
{ |
gen8_gtt_pte_t pte = valid ? 1 | 2 : 0; |
gen8_gtt_pte_t pte = valid ? _PAGE_PRESENT | _PAGE_RW : 0; |
pte |= addr; |
if (level != I915_CACHE_NONE) |
|
switch (level) { |
case I915_CACHE_NONE: |
pte |= PPAT_UNCACHED_INDEX; |
break; |
case I915_CACHE_WT: |
pte |= PPAT_DISPLAY_ELLC_INDEX; |
break; |
default: |
pte |= PPAT_CACHED_INDEX; |
else |
pte |= PPAT_UNCACHED_INDEX; |
break; |
} |
|
return pte; |
} |
|
105,7 → 123,7 |
|
static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr, |
enum i915_cache_level level, |
bool valid) |
bool valid, u32 unused) |
{ |
gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; |
pte |= GEN6_PTE_ADDR_ENCODE(addr); |
127,7 → 145,7 |
|
static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr, |
enum i915_cache_level level, |
bool valid) |
bool valid, u32 unused) |
{ |
gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; |
pte |= GEN6_PTE_ADDR_ENCODE(addr); |
149,12 → 167,9 |
return pte; |
} |
|
#define BYT_PTE_WRITEABLE (1 << 1) |
#define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2) |
|
static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr, |
enum i915_cache_level level, |
bool valid) |
bool valid, u32 flags) |
{ |
gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; |
pte |= GEN6_PTE_ADDR_ENCODE(addr); |
162,6 → 177,7 |
/* Mark the page as writeable. Other platforms don't have a |
* setting for read-only/writable, so this matches that behavior. |
*/ |
if (!(flags & PTE_READ_ONLY)) |
pte |= BYT_PTE_WRITEABLE; |
|
if (level != I915_CACHE_NONE) |
172,7 → 188,7 |
|
static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr, |
enum i915_cache_level level, |
bool valid) |
bool valid, u32 unused) |
{ |
gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; |
pte |= HSW_PTE_ADDR_ENCODE(addr); |
185,7 → 201,7 |
|
static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr, |
enum i915_cache_level level, |
bool valid) |
bool valid, u32 unused) |
{ |
gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; |
pte |= HSW_PTE_ADDR_ENCODE(addr); |
205,13 → 221,20 |
} |
|
/* Broadwell Page Directory Pointer Descriptors */ |
static int gen8_write_pdp(struct intel_ring_buffer *ring, unsigned entry, |
uint64_t val) |
static int gen8_write_pdp(struct intel_engine_cs *ring, unsigned entry, |
uint64_t val, bool synchronous) |
{ |
struct drm_i915_private *dev_priv = ring->dev->dev_private; |
int ret; |
|
BUG_ON(entry >= 4); |
|
if (synchronous) { |
I915_WRITE(GEN8_RING_PDP_UDW(ring, entry), val >> 32); |
I915_WRITE(GEN8_RING_PDP_LDW(ring, entry), (u32)val); |
return 0; |
} |
|
ret = intel_ring_begin(ring, 6); |
if (ret) |
return ret; |
227,48 → 250,37 |
return 0; |
} |
|
static int gen8_ppgtt_enable(struct drm_device *dev) |
static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt, |
struct intel_engine_cs *ring, |
bool synchronous) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct intel_ring_buffer *ring; |
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; |
int i, j, ret; |
int i, ret; |
|
/* bit of a hack to find the actual last used pd */ |
int used_pd = ppgtt->num_pd_entries / GEN8_PDES_PER_PAGE; |
|
for_each_ring(ring, dev_priv, j) { |
I915_WRITE(RING_MODE_GEN7(ring), |
_MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); |
} |
|
for (i = used_pd - 1; i >= 0; i--) { |
dma_addr_t addr = ppgtt->pd_dma_addr[i]; |
for_each_ring(ring, dev_priv, j) { |
ret = gen8_write_pdp(ring, i, addr); |
ret = gen8_write_pdp(ring, i, addr, synchronous); |
if (ret) |
goto err_out; |
return ret; |
} |
} |
|
return 0; |
|
err_out: |
for_each_ring(ring, dev_priv, j) |
I915_WRITE(RING_MODE_GEN7(ring), |
_MASKED_BIT_DISABLE(GFX_PPGTT_ENABLE)); |
return ret; |
} |
|
static void gen8_ppgtt_clear_range(struct i915_address_space *vm, |
unsigned first_entry, |
unsigned num_entries, |
uint64_t start, |
uint64_t length, |
bool use_scratch) |
{ |
struct i915_hw_ppgtt *ppgtt = |
container_of(vm, struct i915_hw_ppgtt, base); |
gen8_gtt_pte_t *pt_vaddr, scratch_pte; |
unsigned act_pt = first_entry / GEN8_PTES_PER_PAGE; |
unsigned first_pte = first_entry % GEN8_PTES_PER_PAGE; |
unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK; |
unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK; |
unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK; |
unsigned num_entries = length >> PAGE_SHIFT; |
unsigned last_pte, i; |
|
pt_vaddr = (gen8_gtt_pte_t*)AllocKernelSpace(4096); |
279,34 → 291,42 |
I915_CACHE_LLC, use_scratch); |
|
while (num_entries) { |
struct page *page_table = &ppgtt->gen8_pt_pages[act_pt]; |
struct page *page_table = ppgtt->gen8_pt_pages[pdpe][pde]; |
|
last_pte = first_pte + num_entries; |
last_pte = pte + num_entries; |
if (last_pte > GEN8_PTES_PER_PAGE) |
last_pte = GEN8_PTES_PER_PAGE; |
|
MapPage(pt_vaddr,(addr_t)(ppgtt->pt_pages[act_pt]), 3); |
MapPage(pt_vaddr,(addr_t)page_table, PG_SW); |
|
for (i = first_pte; i < last_pte; i++) |
for (i = pte; i < last_pte; i++) { |
pt_vaddr[i] = scratch_pte; |
num_entries--; |
} |
|
num_entries -= last_pte - first_pte; |
first_pte = 0; |
act_pt++; |
if (!HAS_LLC(ppgtt->base.dev)) |
drm_clflush_virt_range(pt_vaddr, PAGE_SIZE); |
|
pte = 0; |
if (++pde == GEN8_PDES_PER_PAGE) { |
pdpe++; |
pde = 0; |
} |
} |
FreeKernelSpace(pt_vaddr); |
} |
|
static void gen8_ppgtt_insert_entries(struct i915_address_space *vm, |
struct sg_table *pages, |
unsigned first_entry, |
enum i915_cache_level cache_level) |
uint64_t start, |
enum i915_cache_level cache_level, u32 unused) |
{ |
struct i915_hw_ppgtt *ppgtt = |
container_of(vm, struct i915_hw_ppgtt, base); |
gen8_gtt_pte_t *pt_vaddr; |
unsigned act_pt = first_entry / GEN8_PTES_PER_PAGE; |
unsigned act_pte = first_entry % GEN8_PTES_PER_PAGE; |
unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK; |
unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK; |
unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK; |
struct sg_page_iter sg_iter; |
|
pt_vaddr = AllocKernelSpace(4096); |
313,157 → 333,317 |
if(pt_vaddr == NULL) |
return; |
|
MapPage(pt_vaddr,(addr_t)(ppgtt->pt_pages[act_pt]), 3); |
MapPage(pt_vaddr,(addr_t)(ppgtt->gen8_pt_pages[pdpe][pde]), 3); |
|
for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) { |
if (WARN_ON(pdpe >= GEN8_LEGACY_PDPS)) |
break; |
|
pt_vaddr[act_pte] = |
pt_vaddr[pte] = |
gen8_pte_encode(sg_page_iter_dma_address(&sg_iter), |
cache_level, true); |
if (++act_pte == GEN8_PTES_PER_PAGE) { |
act_pt++; |
MapPage(pt_vaddr,(addr_t)(ppgtt->pt_pages[act_pt]), 3); |
act_pte = 0; |
if (++pte == GEN8_PTES_PER_PAGE) { |
if (!HAS_LLC(ppgtt->base.dev)) |
drm_clflush_virt_range(pt_vaddr, PAGE_SIZE); |
if (++pde == GEN8_PDES_PER_PAGE) { |
pdpe++; |
pde = 0; |
} |
pte = 0; |
MapPage(pt_vaddr,(addr_t)(ppgtt->gen8_pt_pages[pdpe][pde]), 3); |
} |
} |
FreeKernelSpace(pt_vaddr); |
} |
|
static void gen8_ppgtt_cleanup(struct i915_address_space *vm) |
static void gen8_free_page_tables(struct page **pt_pages) |
{ |
struct i915_hw_ppgtt *ppgtt = |
container_of(vm, struct i915_hw_ppgtt, base); |
int i, j; |
int i; |
|
drm_mm_takedown(&vm->mm); |
if (pt_pages == NULL) |
return; |
|
// for (i = 0; i < GEN8_PDES_PER_PAGE; i++) |
// if (pt_pages[i]) |
// __free_pages(pt_pages[i], 0); |
} |
|
static void gen8_ppgtt_free(const struct i915_hw_ppgtt *ppgtt) |
{ |
int i; |
|
for (i = 0; i < ppgtt->num_pd_pages ; i++) { |
if (ppgtt->pd_dma_addr[i]) { |
pci_unmap_page(ppgtt->base.dev->pdev, |
ppgtt->pd_dma_addr[i], |
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); |
gen8_free_page_tables(ppgtt->gen8_pt_pages[i]); |
kfree(ppgtt->gen8_pt_pages[i]); |
kfree(ppgtt->gen8_pt_dma_addr[i]); |
} |
|
// __free_pages(ppgtt->pd_pages, get_order(ppgtt->num_pd_pages << PAGE_SHIFT)); |
} |
|
static void gen8_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt) |
{ |
struct pci_dev *hwdev = ppgtt->base.dev->pdev; |
int i, j; |
|
for (i = 0; i < ppgtt->num_pd_pages; i++) { |
/* TODO: In the future we'll support sparse mappings, so this |
* will have to change. */ |
if (!ppgtt->pd_dma_addr[i]) |
continue; |
|
pci_unmap_page(hwdev, ppgtt->pd_dma_addr[i], PAGE_SIZE, |
PCI_DMA_BIDIRECTIONAL); |
|
for (j = 0; j < GEN8_PDES_PER_PAGE; j++) { |
dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j]; |
if (addr) |
pci_unmap_page(ppgtt->base.dev->pdev, |
addr, |
PAGE_SIZE, |
pci_unmap_page(hwdev, addr, PAGE_SIZE, |
PCI_DMA_BIDIRECTIONAL); |
} |
} |
} |
|
static void gen8_ppgtt_cleanup(struct i915_address_space *vm) |
{ |
struct i915_hw_ppgtt *ppgtt = |
container_of(vm, struct i915_hw_ppgtt, base); |
|
list_del(&vm->global_link); |
drm_mm_takedown(&vm->mm); |
|
gen8_ppgtt_unmap_pages(ppgtt); |
gen8_ppgtt_free(ppgtt); |
} |
|
static struct page **__gen8_alloc_page_tables(void) |
{ |
struct page **pt_pages; |
int i; |
|
pt_pages = kcalloc(GEN8_PDES_PER_PAGE, sizeof(struct page *), GFP_KERNEL); |
if (!pt_pages) |
return ERR_PTR(-ENOMEM); |
|
for (i = 0; i < GEN8_PDES_PER_PAGE; i++) { |
pt_pages[i] = alloc_page(GFP_KERNEL); |
if (!pt_pages[i]) |
goto bail; |
} |
kfree(ppgtt->gen8_pt_dma_addr[i]); |
|
return pt_pages; |
|
bail: |
gen8_free_page_tables(pt_pages); |
kfree(pt_pages); |
return ERR_PTR(-ENOMEM); |
} |
|
// __free_pages(ppgtt->gen8_pt_pages, get_order(ppgtt->num_pt_pages << PAGE_SHIFT)); |
// __free_pages(ppgtt->pd_pages, get_order(ppgtt->num_pd_pages << PAGE_SHIFT)); |
static int gen8_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt, |
const int max_pdp) |
{ |
struct page **pt_pages[GEN8_LEGACY_PDPS]; |
int i, ret; |
|
for (i = 0; i < max_pdp; i++) { |
pt_pages[i] = __gen8_alloc_page_tables(); |
if (IS_ERR(pt_pages[i])) { |
ret = PTR_ERR(pt_pages[i]); |
goto unwind_out; |
} |
} |
|
/** |
* GEN8 legacy ppgtt programming is accomplished through 4 PDP registers with a |
* net effect resembling a 2-level page table in normal x86 terms. Each PDP |
* represents 1GB of memory |
* 4 * 512 * 512 * 4096 = 4GB legacy 32b address space. |
* |
* TODO: Do something with the size parameter |
**/ |
static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size) |
/* NB: Avoid touching gen8_pt_pages until last to keep the allocation, |
* "atomic" - for cleanup purposes. |
*/ |
for (i = 0; i < max_pdp; i++) |
ppgtt->gen8_pt_pages[i] = pt_pages[i]; |
|
return 0; |
|
unwind_out: |
while (i--) { |
gen8_free_page_tables(pt_pages[i]); |
kfree(pt_pages[i]); |
} |
|
return ret; |
} |
|
static int gen8_ppgtt_allocate_dma(struct i915_hw_ppgtt *ppgtt) |
{ |
struct page *pt_pages; |
int i, j, ret = -ENOMEM; |
const int max_pdp = DIV_ROUND_UP(size, 1 << 30); |
const int num_pt_pages = GEN8_PDES_PER_PAGE * max_pdp; |
int i; |
|
if (size % (1<<30)) |
DRM_INFO("Pages will be wasted unless GTT size (%llu) is divisible by 1GB\n", size); |
for (i = 0; i < ppgtt->num_pd_pages; i++) { |
ppgtt->gen8_pt_dma_addr[i] = kcalloc(GEN8_PDES_PER_PAGE, |
sizeof(dma_addr_t), |
GFP_KERNEL); |
if (!ppgtt->gen8_pt_dma_addr[i]) |
return -ENOMEM; |
} |
|
/* FIXME: split allocation into smaller pieces. For now we only ever do |
* this once, but with full PPGTT, the multiple contiguous allocations |
* will be bad. |
*/ |
ppgtt->pd_pages = AllocPages(max_pdp); |
return 0; |
} |
|
static int gen8_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt, |
const int max_pdp) |
{ |
// ppgtt->pd_pages = alloc_pages(GFP_KERNEL, get_order(max_pdp << PAGE_SHIFT)); |
if (!ppgtt->pd_pages) |
return -ENOMEM; |
|
pt_pages = AllocPages(num_pt_pages); |
if (!pt_pages) { |
// ppgtt->num_pd_pages = 1 << get_order(max_pdp << PAGE_SHIFT); |
BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPS); |
|
return 0; |
} |
|
static int gen8_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt, |
const int max_pdp) |
{ |
int ret; |
|
ret = gen8_ppgtt_allocate_page_directories(ppgtt, max_pdp); |
if (ret) |
return ret; |
|
ret = gen8_ppgtt_allocate_page_tables(ppgtt, max_pdp); |
if (ret) { |
// __free_pages(ppgtt->pd_pages, get_order(max_pdp << PAGE_SHIFT)); |
return -ENOMEM; |
return ret; |
} |
|
ppgtt->gen8_pt_pages = pt_pages; |
ppgtt->num_pd_pages = max_pdp; |
ppgtt->num_pt_pages = num_pt_pages; |
ppgtt->num_pd_entries = max_pdp * GEN8_PDES_PER_PAGE; |
ppgtt->enable = gen8_ppgtt_enable; |
ppgtt->base.clear_range = gen8_ppgtt_clear_range; |
ppgtt->base.insert_entries = gen8_ppgtt_insert_entries; |
ppgtt->base.cleanup = gen8_ppgtt_cleanup; |
ppgtt->base.start = 0; |
ppgtt->base.total = ppgtt->num_pt_pages * GEN8_PTES_PER_PAGE * PAGE_SIZE; |
|
BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPS); |
ret = gen8_ppgtt_allocate_dma(ppgtt); |
if (ret) |
gen8_ppgtt_free(ppgtt); |
|
/* |
* - Create a mapping for the page directories. |
* - For each page directory: |
* allocate space for page table mappings. |
* map each page table |
*/ |
for (i = 0; i < max_pdp; i++) { |
dma_addr_t temp; |
temp = pci_map_page(ppgtt->base.dev->pdev, |
&ppgtt->pd_pages[i], 0, |
return ret; |
} |
|
static int gen8_ppgtt_setup_page_directories(struct i915_hw_ppgtt *ppgtt, |
const int pd) |
{ |
dma_addr_t pd_addr; |
int ret; |
|
pd_addr = pci_map_page(ppgtt->base.dev->pdev, |
&ppgtt->pd_pages[pd], 0, |
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); |
|
ppgtt->pd_dma_addr[i] = temp; |
// ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pd_addr); |
// if (ret) |
// return ret; |
|
ppgtt->gen8_pt_dma_addr[i] = kmalloc(sizeof(dma_addr_t) * GEN8_PDES_PER_PAGE, GFP_KERNEL); |
if (!ppgtt->gen8_pt_dma_addr[i]) |
goto err_out; |
ppgtt->pd_dma_addr[pd] = pd_addr; |
|
for (j = 0; j < GEN8_PDES_PER_PAGE; j++) { |
struct page *p = &pt_pages[i * GEN8_PDES_PER_PAGE + j]; |
temp = pci_map_page(ppgtt->base.dev->pdev, |
p, 0, PAGE_SIZE, |
PCI_DMA_BIDIRECTIONAL); |
return 0; |
} |
|
ppgtt->gen8_pt_dma_addr[i][j] = temp; |
static int gen8_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt, |
const int pd, |
const int pt) |
{ |
dma_addr_t pt_addr; |
struct page *p; |
int ret; |
|
p = ppgtt->gen8_pt_pages[pd][pt]; |
pt_addr = pci_map_page(ppgtt->base.dev->pdev, |
p, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); |
// ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pt_addr); |
// if (ret) |
// return ret; |
|
ppgtt->gen8_pt_dma_addr[pd][pt] = pt_addr; |
|
return 0; |
} |
|
/** |
* GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers |
* with a net effect resembling a 2-level page table in normal x86 terms. Each |
* PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address |
* space. |
* |
* FIXME: split allocation into smaller pieces. For now we only ever do this |
* once, but with full PPGTT, the multiple contiguous allocations will be bad. |
* TODO: Do something with the size parameter |
*/ |
static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size) |
{ |
const int max_pdp = DIV_ROUND_UP(size, 1 << 30); |
const int min_pt_pages = GEN8_PDES_PER_PAGE * max_pdp; |
int i, j, ret; |
gen8_ppgtt_pde_t *pd_vaddr; |
|
if (size % (1<<30)) |
DRM_INFO("Pages will be wasted unless GTT size (%llu) is divisible by 1GB\n", size); |
|
/* 1. Do all our allocations for page directories and page tables. */ |
ret = gen8_ppgtt_alloc(ppgtt, max_pdp); |
if (ret) |
return ret; |
|
/* |
* 2. Create DMA mappings for the page directories and page tables. |
*/ |
for (i = 0; i < max_pdp; i++) { |
ret = gen8_ppgtt_setup_page_directories(ppgtt, i); |
if (ret) |
goto bail; |
|
for (j = 0; j < GEN8_PDES_PER_PAGE; j++) { |
ret = gen8_ppgtt_setup_page_tables(ppgtt, i, j); |
if (ret) |
goto bail; |
} |
} |
|
/* For now, the PPGTT helper functions all require that the PDEs are |
/* |
* 3. Map all the page directory entires to point to the page tables |
* we've allocated. |
* |
* For now, the PPGTT helper functions all require that the PDEs are |
* plugged in correctly. So we do that now/here. For aliasing PPGTT, we |
* will never need to touch the PDEs again */ |
* will never need to touch the PDEs again. |
*/ |
|
gen8_ppgtt_pde_t *pd_vaddr; |
pd_vaddr = AllocKernelSpace(4096); |
|
for (i = 0; i < max_pdp; i++) { |
MapPage(pd_vaddr,(addr_t)(ppgtt->pd_pages[i]), 3); |
MapPage(pd_vaddr,(addr_t)(&ppgtt->pd_pages[i]), 3); |
for (j = 0; j < GEN8_PDES_PER_PAGE; j++) { |
dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j]; |
pd_vaddr[j] = gen8_pde_encode(ppgtt->base.dev, addr, |
I915_CACHE_LLC); |
} |
if (!HAS_LLC(ppgtt->base.dev)) |
drm_clflush_virt_range(pd_vaddr, PAGE_SIZE); |
} |
FreeKernelSpace(pd_vaddr); |
|
ppgtt->base.clear_range(&ppgtt->base, 0, |
ppgtt->num_pd_entries * GEN8_PTES_PER_PAGE, |
true); |
ppgtt->enable = gen8_ppgtt_enable; |
ppgtt->switch_mm = gen8_mm_switch; |
ppgtt->base.clear_range = gen8_ppgtt_clear_range; |
ppgtt->base.insert_entries = gen8_ppgtt_insert_entries; |
ppgtt->base.cleanup = gen8_ppgtt_cleanup; |
ppgtt->base.start = 0; |
ppgtt->base.total = ppgtt->num_pd_entries * GEN8_PTES_PER_PAGE * PAGE_SIZE; |
|
ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true); |
|
DRM_DEBUG_DRIVER("Allocated %d pages for page directories (%d wasted)\n", |
ppgtt->num_pd_pages, ppgtt->num_pd_pages - max_pdp); |
DRM_DEBUG_DRIVER("Allocated %d pages for page tables (%lld wasted)\n", |
ppgtt->num_pt_pages, |
(ppgtt->num_pt_pages - num_pt_pages) + |
size % (1<<30)); |
ppgtt->num_pd_entries, |
(ppgtt->num_pd_entries - min_pt_pages) + size % (1<<30)); |
return 0; |
|
err_out: |
ppgtt->base.cleanup(&ppgtt->base); |
bail: |
gen8_ppgtt_unmap_pages(ppgtt); |
gen8_ppgtt_free(ppgtt); |
return ret; |
} |
|
489,38 → 669,163 |
readl(pd_addr); |
} |
|
static int gen6_ppgtt_enable(struct drm_device *dev) |
static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt) |
{ |
drm_i915_private_t *dev_priv = dev->dev_private; |
uint32_t pd_offset; |
struct intel_ring_buffer *ring; |
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; |
int i; |
|
BUG_ON(ppgtt->pd_offset & 0x3f); |
|
gen6_write_pdes(ppgtt); |
return (ppgtt->pd_offset / 64) << 16; |
} |
|
pd_offset = ppgtt->pd_offset; |
pd_offset /= 64; /* in cachelines, */ |
pd_offset <<= 16; |
static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt, |
struct intel_engine_cs *ring, |
bool synchronous) |
{ |
struct drm_device *dev = ppgtt->base.dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
int ret; |
|
if (INTEL_INFO(dev)->gen == 6) { |
uint32_t ecochk, gab_ctl, ecobits; |
/* If we're in reset, we can assume the GPU is sufficiently idle to |
* manually frob these bits. Ideally we could use the ring functions, |
* except our error handling makes it quite difficult (can't use |
* intel_ring_begin, ring->flush, or intel_ring_advance) |
* |
* FIXME: We should try not to special case reset |
*/ |
if (synchronous || |
i915_reset_in_progress(&dev_priv->gpu_error)) { |
WARN_ON(ppgtt != dev_priv->mm.aliasing_ppgtt); |
I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G); |
I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt)); |
POSTING_READ(RING_PP_DIR_BASE(ring)); |
return 0; |
} |
|
ecobits = I915_READ(GAC_ECO_BITS); |
I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT | |
ECOBITS_PPGTT_CACHE64B); |
/* NB: TLBs must be flushed and invalidated before a switch */ |
ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); |
if (ret) |
return ret; |
|
gab_ctl = I915_READ(GAB_CTL); |
I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT); |
ret = intel_ring_begin(ring, 6); |
if (ret) |
return ret; |
|
ecochk = I915_READ(GAM_ECOCHK); |
I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | |
ECOCHK_PPGTT_CACHE64B); |
I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); |
} else if (INTEL_INFO(dev)->gen >= 7) { |
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2)); |
intel_ring_emit(ring, RING_PP_DIR_DCLV(ring)); |
intel_ring_emit(ring, PP_DIR_DCLV_2G); |
intel_ring_emit(ring, RING_PP_DIR_BASE(ring)); |
intel_ring_emit(ring, get_pd_offset(ppgtt)); |
intel_ring_emit(ring, MI_NOOP); |
intel_ring_advance(ring); |
|
return 0; |
} |
|
static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt, |
struct intel_engine_cs *ring, |
bool synchronous) |
{ |
struct drm_device *dev = ppgtt->base.dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
int ret; |
|
/* If we're in reset, we can assume the GPU is sufficiently idle to |
* manually frob these bits. Ideally we could use the ring functions, |
* except our error handling makes it quite difficult (can't use |
* intel_ring_begin, ring->flush, or intel_ring_advance) |
* |
* FIXME: We should try not to special case reset |
*/ |
if (synchronous || |
i915_reset_in_progress(&dev_priv->gpu_error)) { |
WARN_ON(ppgtt != dev_priv->mm.aliasing_ppgtt); |
I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G); |
I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt)); |
POSTING_READ(RING_PP_DIR_BASE(ring)); |
return 0; |
} |
|
/* NB: TLBs must be flushed and invalidated before a switch */ |
ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); |
if (ret) |
return ret; |
|
ret = intel_ring_begin(ring, 6); |
if (ret) |
return ret; |
|
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2)); |
intel_ring_emit(ring, RING_PP_DIR_DCLV(ring)); |
intel_ring_emit(ring, PP_DIR_DCLV_2G); |
intel_ring_emit(ring, RING_PP_DIR_BASE(ring)); |
intel_ring_emit(ring, get_pd_offset(ppgtt)); |
intel_ring_emit(ring, MI_NOOP); |
intel_ring_advance(ring); |
|
/* XXX: RCS is the only one to auto invalidate the TLBs? */ |
if (ring->id != RCS) { |
ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); |
if (ret) |
return ret; |
} |
|
return 0; |
} |
|
static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt, |
struct intel_engine_cs *ring, |
bool synchronous) |
{ |
struct drm_device *dev = ppgtt->base.dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
|
if (!synchronous) |
return 0; |
|
I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G); |
I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt)); |
|
POSTING_READ(RING_PP_DIR_DCLV(ring)); |
|
return 0; |
} |
|
static int gen8_ppgtt_enable(struct i915_hw_ppgtt *ppgtt) |
{ |
struct drm_device *dev = ppgtt->base.dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct intel_engine_cs *ring; |
int j, ret; |
|
for_each_ring(ring, dev_priv, j) { |
I915_WRITE(RING_MODE_GEN7(ring), |
_MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); |
|
/* We promise to do a switch later with FULL PPGTT. If this is |
* aliasing, this is the one and only switch we'll do */ |
if (USES_FULL_PPGTT(dev)) |
continue; |
|
ret = ppgtt->switch_mm(ppgtt, ring, true); |
if (ret) |
goto err_out; |
} |
|
return 0; |
|
err_out: |
for_each_ring(ring, dev_priv, j) |
I915_WRITE(RING_MODE_GEN7(ring), |
_MASKED_BIT_DISABLE(GFX_PPGTT_ENABLE)); |
return ret; |
} |
|
static int gen7_ppgtt_enable(struct i915_hw_ppgtt *ppgtt) |
{ |
struct drm_device *dev = ppgtt->base.dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct intel_engine_cs *ring; |
uint32_t ecochk, ecobits; |
int i; |
|
ecobits = I915_READ(GAC_ECO_BITS); |
I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B); |
533,34 → 838,71 |
ecochk &= ~ECOCHK_PPGTT_GFDT_IVB; |
} |
I915_WRITE(GAM_ECOCHK, ecochk); |
/* GFX_MODE is per-ring on gen7+ */ |
} |
|
for_each_ring(ring, dev_priv, i) { |
if (INTEL_INFO(dev)->gen >= 7) |
int ret; |
/* GFX_MODE is per-ring on gen7+ */ |
I915_WRITE(RING_MODE_GEN7(ring), |
_MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); |
|
I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G); |
I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset); |
/* We promise to do a switch later with FULL PPGTT. If this is |
* aliasing, this is the one and only switch we'll do */ |
if (USES_FULL_PPGTT(dev)) |
continue; |
|
ret = ppgtt->switch_mm(ppgtt, ring, true); |
if (ret) |
return ret; |
} |
|
return 0; |
} |
|
static int gen6_ppgtt_enable(struct i915_hw_ppgtt *ppgtt) |
{ |
struct drm_device *dev = ppgtt->base.dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct intel_engine_cs *ring; |
uint32_t ecochk, gab_ctl, ecobits; |
int i; |
|
ecobits = I915_READ(GAC_ECO_BITS); |
I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT | |
ECOBITS_PPGTT_CACHE64B); |
|
gab_ctl = I915_READ(GAB_CTL); |
I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT); |
|
ecochk = I915_READ(GAM_ECOCHK); |
I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B); |
|
I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); |
|
for_each_ring(ring, dev_priv, i) { |
int ret = ppgtt->switch_mm(ppgtt, ring, true); |
if (ret) |
return ret; |
} |
|
return 0; |
} |
|
/* PPGTT support for Sandybdrige/Gen6 and later */ |
static void gen6_ppgtt_clear_range(struct i915_address_space *vm, |
unsigned first_entry, |
unsigned num_entries, |
uint64_t start, |
uint64_t length, |
bool use_scratch) |
{ |
struct i915_hw_ppgtt *ppgtt = |
container_of(vm, struct i915_hw_ppgtt, base); |
gen6_gtt_pte_t *pt_vaddr, scratch_pte; |
unsigned first_entry = start >> PAGE_SHIFT; |
unsigned num_entries = length >> PAGE_SHIFT; |
unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES; |
unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; |
unsigned last_pte, i; |
|
scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true); |
scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0); |
|
pt_vaddr = AllocKernelSpace(4096); |
|
587,18 → 929,17 |
|
static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, |
struct sg_table *pages, |
unsigned first_entry, |
enum i915_cache_level cache_level) |
uint64_t start, |
enum i915_cache_level cache_level, u32 flags) |
{ |
struct i915_hw_ppgtt *ppgtt = |
container_of(vm, struct i915_hw_ppgtt, base); |
gen6_gtt_pte_t *pt_vaddr; |
unsigned first_entry = start >> PAGE_SHIFT; |
unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES; |
unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES; |
struct sg_page_iter sg_iter; |
dma_addr_t page_addr; |
|
|
pt_vaddr = AllocKernelSpace(4096); |
|
if(pt_vaddr == NULL) |
609,25 → 950,21 |
|
pt_vaddr[act_pte] = |
vm->pte_encode(sg_page_iter_dma_address(&sg_iter), |
cache_level, true); |
cache_level, true, flags); |
|
if (++act_pte == I915_PPGTT_PT_ENTRIES) { |
act_pt++; |
MapPage(pt_vaddr,(addr_t)(ppgtt->pt_pages[act_pt]), 3); |
act_pte = 0; |
|
} |
} |
FreeKernelSpace(pt_vaddr); |
} |
|
static void gen6_ppgtt_cleanup(struct i915_address_space *vm) |
static void gen6_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt) |
{ |
struct i915_hw_ppgtt *ppgtt = |
container_of(vm, struct i915_hw_ppgtt, base); |
int i; |
|
drm_mm_takedown(&ppgtt->base.mm); |
|
if (ppgtt->pt_dma_addr) { |
for (i = 0; i < ppgtt->num_pd_entries; i++) |
pci_unmap_page(ppgtt->base.dev->pdev, |
634,52 → 971,120 |
ppgtt->pt_dma_addr[i], |
4096, PCI_DMA_BIDIRECTIONAL); |
} |
} |
|
static void gen6_ppgtt_free(struct i915_hw_ppgtt *ppgtt) |
{ |
int i; |
|
kfree(ppgtt->pt_dma_addr); |
for (i = 0; i < ppgtt->num_pd_entries; i++) |
__free_page(ppgtt->pt_pages[i]); |
kfree(ppgtt->pt_pages); |
kfree(ppgtt); |
} |
|
static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) |
static void gen6_ppgtt_cleanup(struct i915_address_space *vm) |
{ |
struct i915_hw_ppgtt *ppgtt = |
container_of(vm, struct i915_hw_ppgtt, base); |
|
list_del(&vm->global_link); |
drm_mm_takedown(&ppgtt->base.mm); |
drm_mm_remove_node(&ppgtt->node); |
|
gen6_ppgtt_unmap_pages(ppgtt); |
gen6_ppgtt_free(ppgtt); |
} |
|
static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt) |
{ |
struct drm_device *dev = ppgtt->base.dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
unsigned first_pd_entry_in_global_pt; |
int i; |
int ret = -ENOMEM; |
bool retried = false; |
int ret; |
|
/* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024 |
* entries. For aliasing ppgtt support we just steal them at the end for |
* now. */ |
first_pd_entry_in_global_pt = gtt_total_entries(dev_priv->gtt); |
/* PPGTT PDEs reside in the GGTT and consists of 512 entries. The |
* allocator works in address space sizes, so it's multiplied by page |
* size. We allocate at the top of the GTT to avoid fragmentation. |
*/ |
BUG_ON(!drm_mm_initialized(&dev_priv->gtt.base.mm)); |
alloc: |
ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm, |
&ppgtt->node, GEN6_PD_SIZE, |
GEN6_PD_ALIGN, 0, |
0, dev_priv->gtt.base.total, |
DRM_MM_TOPDOWN); |
if (ret == -ENOSPC && !retried) { |
ret = i915_gem_evict_something(dev, &dev_priv->gtt.base, |
GEN6_PD_SIZE, GEN6_PD_ALIGN, |
I915_CACHE_NONE, |
0, dev_priv->gtt.base.total, |
0); |
if (ret) |
return ret; |
|
ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode; |
retried = true; |
goto alloc; |
} |
|
if (ppgtt->node.start < dev_priv->gtt.mappable_end) |
DRM_DEBUG("Forced to use aperture for PDEs\n"); |
|
ppgtt->num_pd_entries = GEN6_PPGTT_PD_ENTRIES; |
ppgtt->enable = gen6_ppgtt_enable; |
ppgtt->base.clear_range = gen6_ppgtt_clear_range; |
ppgtt->base.insert_entries = gen6_ppgtt_insert_entries; |
ppgtt->base.cleanup = gen6_ppgtt_cleanup; |
ppgtt->base.scratch = dev_priv->gtt.base.scratch; |
ppgtt->base.start = 0; |
ppgtt->base.total = GEN6_PPGTT_PD_ENTRIES * I915_PPGTT_PT_ENTRIES * PAGE_SIZE; |
return ret; |
} |
|
static int gen6_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt) |
{ |
int i; |
|
ppgtt->pt_pages = kcalloc(ppgtt->num_pd_entries, sizeof(struct page *), |
GFP_KERNEL); |
|
if (!ppgtt->pt_pages) |
return -ENOMEM; |
|
for (i = 0; i < ppgtt->num_pd_entries; i++) { |
ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL); |
if (!ppgtt->pt_pages[i]) |
goto err_pt_alloc; |
if (!ppgtt->pt_pages[i]) { |
gen6_ppgtt_free(ppgtt); |
return -ENOMEM; |
} |
} |
|
return 0; |
} |
|
static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt) |
{ |
int ret; |
|
ret = gen6_ppgtt_allocate_page_directories(ppgtt); |
if (ret) |
return ret; |
|
ret = gen6_ppgtt_allocate_page_tables(ppgtt); |
if (ret) { |
drm_mm_remove_node(&ppgtt->node); |
return ret; |
} |
|
ppgtt->pt_dma_addr = kcalloc(ppgtt->num_pd_entries, sizeof(dma_addr_t), |
GFP_KERNEL); |
if (!ppgtt->pt_dma_addr) |
goto err_pt_alloc; |
if (!ppgtt->pt_dma_addr) { |
drm_mm_remove_node(&ppgtt->node); |
gen6_ppgtt_free(ppgtt); |
return -ENOMEM; |
} |
|
return 0; |
} |
|
static int gen6_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt) |
{ |
struct drm_device *dev = ppgtt->base.dev; |
int i; |
|
for (i = 0; i < ppgtt->num_pd_entries; i++) { |
dma_addr_t pt_addr; |
|
686,44 → 1091,72 |
pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i], 0, 4096, |
PCI_DMA_BIDIRECTIONAL); |
|
// if (pci_dma_mapping_error(dev->pdev, pt_addr)) { |
// gen6_ppgtt_unmap_pages(ppgtt); |
// return -EIO; |
// } |
|
ppgtt->pt_dma_addr[i] = pt_addr; |
} |
|
ppgtt->base.clear_range(&ppgtt->base, 0, |
ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES, true); |
return 0; |
} |
|
ppgtt->pd_offset = first_pd_entry_in_global_pt * sizeof(gen6_gtt_pte_t); |
static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) |
{ |
struct drm_device *dev = ppgtt->base.dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
int ret; |
|
return 0; |
ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode; |
if (IS_GEN6(dev)) { |
ppgtt->enable = gen6_ppgtt_enable; |
ppgtt->switch_mm = gen6_mm_switch; |
} else if (IS_HASWELL(dev)) { |
ppgtt->enable = gen7_ppgtt_enable; |
ppgtt->switch_mm = hsw_mm_switch; |
} else if (IS_GEN7(dev)) { |
ppgtt->enable = gen7_ppgtt_enable; |
ppgtt->switch_mm = gen7_mm_switch; |
} else |
BUG(); |
|
err_pd_pin: |
if (ppgtt->pt_dma_addr) { |
for (i--; i >= 0; i--) |
pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i], |
4096, PCI_DMA_BIDIRECTIONAL); |
} |
err_pt_alloc: |
kfree(ppgtt->pt_dma_addr); |
for (i = 0; i < ppgtt->num_pd_entries; i++) { |
if (ppgtt->pt_pages[i]) |
__free_page(ppgtt->pt_pages[i]); |
} |
kfree(ppgtt->pt_pages); |
ret = gen6_ppgtt_alloc(ppgtt); |
if (ret) |
return ret; |
|
ret = gen6_ppgtt_setup_page_tables(ppgtt); |
if (ret) { |
gen6_ppgtt_free(ppgtt); |
return ret; |
} |
|
static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev) |
ppgtt->base.clear_range = gen6_ppgtt_clear_range; |
ppgtt->base.insert_entries = gen6_ppgtt_insert_entries; |
ppgtt->base.cleanup = gen6_ppgtt_cleanup; |
ppgtt->base.start = 0; |
ppgtt->base.total = ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES * PAGE_SIZE; |
// ppgtt->debug_dump = gen6_dump_ppgtt; |
|
ppgtt->pd_offset = |
ppgtt->node.start / PAGE_SIZE * sizeof(gen6_gtt_pte_t); |
|
ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true); |
|
DRM_DEBUG_DRIVER("Allocated pde space (%ldM) at GTT entry: %lx\n", |
ppgtt->node.size >> 20, |
ppgtt->node.start / PAGE_SIZE); |
|
return 0; |
} |
|
int i915_gem_init_ppgtt(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct i915_hw_ppgtt *ppgtt; |
int ret; |
int ret = 0; |
|
ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL); |
if (!ppgtt) |
return -ENOMEM; |
|
ppgtt->base.dev = dev; |
ppgtt->base.scratch = dev_priv->gtt.base.scratch; |
|
if (INTEL_INFO(dev)->gen < 8) |
ret = gen6_ppgtt_init(ppgtt); |
732,44 → 1165,40 |
else |
BUG(); |
|
if (ret) |
kfree(ppgtt); |
else { |
dev_priv->mm.aliasing_ppgtt = ppgtt; |
if (!ret) { |
struct drm_i915_private *dev_priv = dev->dev_private; |
kref_init(&ppgtt->ref); |
drm_mm_init(&ppgtt->base.mm, ppgtt->base.start, |
ppgtt->base.total); |
i915_init_vm(dev_priv, &ppgtt->base); |
if (INTEL_INFO(dev)->gen < 8) { |
gen6_write_pdes(ppgtt); |
DRM_DEBUG("Adding PPGTT at offset %x\n", |
ppgtt->pd_offset << 10); |
} |
} |
|
return ret; |
} |
|
void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev) |
static void |
ppgtt_bind_vma(struct i915_vma *vma, |
enum i915_cache_level cache_level, |
u32 flags) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; |
/* Currently applicable only to VLV */ |
if (vma->obj->gt_ro) |
flags |= PTE_READ_ONLY; |
|
if (!ppgtt) |
return; |
|
ppgtt->base.cleanup(&ppgtt->base); |
dev_priv->mm.aliasing_ppgtt = NULL; |
vma->vm->insert_entries(vma->vm, vma->obj->pages, vma->node.start, |
cache_level, flags); |
} |
|
void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, |
struct drm_i915_gem_object *obj, |
enum i915_cache_level cache_level) |
static void ppgtt_unbind_vma(struct i915_vma *vma) |
{ |
ppgtt->base.insert_entries(&ppgtt->base, obj->pages, |
i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT, |
cache_level); |
} |
|
void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, |
struct drm_i915_gem_object *obj) |
{ |
ppgtt->base.clear_range(&ppgtt->base, |
i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT, |
obj->base.size >> PAGE_SHIFT, |
vma->vm->clear_range(vma->vm, |
vma->node.start, |
vma->obj->base.size, |
true); |
} |
|
814,7 → 1243,7 |
void i915_check_and_clear_faults(struct drm_device *dev) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct intel_ring_buffer *ring; |
struct intel_engine_cs *ring; |
int i; |
|
if (INTEL_INFO(dev)->gen < 6) |
853,9 → 1282,9 |
i915_check_and_clear_faults(dev); |
|
dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, |
dev_priv->gtt.base.start / PAGE_SIZE, |
dev_priv->gtt.base.total / PAGE_SIZE, |
false); |
dev_priv->gtt.base.start, |
dev_priv->gtt.base.total, |
true); |
} |
|
void i915_gem_restore_gtt_mappings(struct drm_device *dev) |
862,20 → 1291,52 |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct drm_i915_gem_object *obj; |
struct i915_address_space *vm; |
|
i915_check_and_clear_faults(dev); |
|
/* First fill our portion of the GTT with scratch pages */ |
dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, |
dev_priv->gtt.base.start / PAGE_SIZE, |
dev_priv->gtt.base.total / PAGE_SIZE, |
dev_priv->gtt.base.start, |
dev_priv->gtt.base.total, |
true); |
|
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { |
struct i915_vma *vma = i915_gem_obj_to_vma(obj, |
&dev_priv->gtt.base); |
if (!vma) |
continue; |
|
i915_gem_clflush_object(obj, obj->pin_display); |
i915_gem_gtt_bind_object(obj, obj->cache_level); |
/* The bind_vma code tries to be smart about tracking mappings. |
* Unfortunately above, we've just wiped out the mappings |
* without telling our object about it. So we need to fake it. |
*/ |
obj->has_global_gtt_mapping = 0; |
vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND); |
} |
|
|
if (INTEL_INFO(dev)->gen >= 8) { |
if (IS_CHERRYVIEW(dev)) |
chv_setup_private_ppat(dev_priv); |
else |
bdw_setup_private_ppat(dev_priv); |
|
return; |
} |
|
list_for_each_entry(vm, &dev_priv->vm_list, global_link) { |
/* TODO: Perhaps it shouldn't be gen6 specific */ |
if (i915_is_ggtt(vm)) { |
if (dev_priv->mm.aliasing_ppgtt) |
gen6_write_pdes(dev_priv->mm.aliasing_ppgtt); |
continue; |
} |
|
gen6_write_pdes(container_of(vm, struct i915_hw_ppgtt, base)); |
} |
|
i915_gem_chipset_flush(dev); |
} |
|
904,15 → 1365,16 |
|
static void gen8_ggtt_insert_entries(struct i915_address_space *vm, |
struct sg_table *st, |
unsigned int first_entry, |
enum i915_cache_level level) |
uint64_t start, |
enum i915_cache_level level, u32 unused) |
{ |
struct drm_i915_private *dev_priv = vm->dev->dev_private; |
unsigned first_entry = start >> PAGE_SHIFT; |
gen8_gtt_pte_t __iomem *gtt_entries = |
(gen8_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry; |
int i = 0; |
struct sg_page_iter sg_iter; |
dma_addr_t addr; |
dma_addr_t addr = 0; /* shut up gcc */ |
|
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { |
addr = sg_dma_address(sg_iter.sg) + |
949,19 → 1411,20 |
*/ |
static void gen6_ggtt_insert_entries(struct i915_address_space *vm, |
struct sg_table *st, |
unsigned int first_entry, |
enum i915_cache_level level) |
uint64_t start, |
enum i915_cache_level level, u32 flags) |
{ |
struct drm_i915_private *dev_priv = vm->dev->dev_private; |
unsigned first_entry = start >> PAGE_SHIFT; |
gen6_gtt_pte_t __iomem *gtt_entries = |
(gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry; |
int i = 0; |
struct sg_page_iter sg_iter; |
dma_addr_t addr; |
dma_addr_t addr = 0; |
|
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { |
addr = sg_page_iter_dma_address(&sg_iter); |
iowrite32(vm->pte_encode(addr, level, true), >t_entries[i]); |
iowrite32(vm->pte_encode(addr, level, true, flags), >t_entries[i]); |
i++; |
} |
|
971,9 → 1434,10 |
* of NUMA access patterns. Therefore, even with the way we assume |
* hardware should work, we must keep this posting read for paranoia. |
*/ |
if (i != 0) |
WARN_ON(readl(>t_entries[i-1]) != |
vm->pte_encode(addr, level, true)); |
if (i != 0) { |
unsigned long gtt = readl(>t_entries[i-1]); |
WARN_ON(gtt != vm->pte_encode(addr, level, true, flags)); |
} |
|
/* This next bit makes the above posting read even more important. We |
* want to flush the TLBs only after we're certain all the PTE updates |
984,11 → 1448,13 |
} |
|
static void gen8_ggtt_clear_range(struct i915_address_space *vm, |
unsigned int first_entry, |
unsigned int num_entries, |
uint64_t start, |
uint64_t length, |
bool use_scratch) |
{ |
struct drm_i915_private *dev_priv = vm->dev->dev_private; |
unsigned first_entry = start >> PAGE_SHIFT; |
unsigned num_entries = length >> PAGE_SHIFT; |
gen8_gtt_pte_t scratch_pte, __iomem *gtt_base = |
(gen8_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry; |
const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry; |
1008,11 → 1474,13 |
} |
|
static void gen6_ggtt_clear_range(struct i915_address_space *vm, |
unsigned int first_entry, |
unsigned int num_entries, |
uint64_t start, |
uint64_t length, |
bool use_scratch) |
{ |
struct drm_i915_private *dev_priv = vm->dev->dev_private; |
unsigned first_entry = start >> PAGE_SHIFT; |
unsigned num_entries = length >> PAGE_SHIFT; |
gen6_gtt_pte_t scratch_pte, __iomem *gtt_base = |
(gen6_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry; |
const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry; |
1023,7 → 1491,7 |
first_entry, num_entries, max_entries)) |
num_entries = max_entries; |
|
scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, use_scratch); |
scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, use_scratch, 0); |
|
for (i = 0; i < num_entries; i++) |
iowrite32(scratch_pte, >t_base[i]); |
1030,55 → 1498,109 |
readl(gtt_base); |
} |
|
static void i915_ggtt_insert_entries(struct i915_address_space *vm, |
struct sg_table *st, |
unsigned int pg_start, |
enum i915_cache_level cache_level) |
|
static void i915_ggtt_bind_vma(struct i915_vma *vma, |
enum i915_cache_level cache_level, |
u32 unused) |
{ |
const unsigned long entry = vma->node.start >> PAGE_SHIFT; |
unsigned int flags = (cache_level == I915_CACHE_NONE) ? |
AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; |
|
intel_gtt_insert_sg_entries(st, pg_start, flags); |
|
BUG_ON(!i915_is_ggtt(vma->vm)); |
intel_gtt_insert_sg_entries(vma->obj->pages, entry, flags); |
vma->obj->has_global_gtt_mapping = 1; |
} |
|
static void i915_ggtt_clear_range(struct i915_address_space *vm, |
unsigned int first_entry, |
unsigned int num_entries, |
uint64_t start, |
uint64_t length, |
bool unused) |
{ |
unsigned first_entry = start >> PAGE_SHIFT; |
unsigned num_entries = length >> PAGE_SHIFT; |
intel_gtt_clear_range(first_entry, num_entries); |
} |
|
static void i915_ggtt_unbind_vma(struct i915_vma *vma) |
{ |
const unsigned int first = vma->node.start >> PAGE_SHIFT; |
const unsigned int size = vma->obj->base.size >> PAGE_SHIFT; |
|
void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj, |
enum i915_cache_level cache_level) |
BUG_ON(!i915_is_ggtt(vma->vm)); |
vma->obj->has_global_gtt_mapping = 0; |
intel_gtt_clear_range(first, size); |
} |
|
static void ggtt_bind_vma(struct i915_vma *vma, |
enum i915_cache_level cache_level, |
u32 flags) |
{ |
struct drm_device *dev = obj->base.dev; |
struct drm_device *dev = vma->vm->dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT; |
struct drm_i915_gem_object *obj = vma->obj; |
|
dev_priv->gtt.base.insert_entries(&dev_priv->gtt.base, obj->pages, |
entry, |
cache_level); |
/* Currently applicable only to VLV */ |
if (obj->gt_ro) |
flags |= PTE_READ_ONLY; |
|
/* If there is no aliasing PPGTT, or the caller needs a global mapping, |
* or we have a global mapping already but the cacheability flags have |
* changed, set the global PTEs. |
* |
* If there is an aliasing PPGTT it is anecdotally faster, so use that |
* instead if none of the above hold true. |
* |
* NB: A global mapping should only be needed for special regions like |
* "gtt mappable", SNB errata, or if specified via special execbuf |
* flags. At all other times, the GPU will use the aliasing PPGTT. |
*/ |
if (!dev_priv->mm.aliasing_ppgtt || flags & GLOBAL_BIND) { |
if (!obj->has_global_gtt_mapping || |
(cache_level != obj->cache_level)) { |
vma->vm->insert_entries(vma->vm, obj->pages, |
vma->node.start, |
cache_level, flags); |
obj->has_global_gtt_mapping = 1; |
} |
} |
|
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj) |
if (dev_priv->mm.aliasing_ppgtt && |
(!obj->has_aliasing_ppgtt_mapping || |
(cache_level != obj->cache_level))) { |
struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt; |
appgtt->base.insert_entries(&appgtt->base, |
vma->obj->pages, |
vma->node.start, |
cache_level, flags); |
vma->obj->has_aliasing_ppgtt_mapping = 1; |
} |
} |
|
static void ggtt_unbind_vma(struct i915_vma *vma) |
{ |
struct drm_device *dev = obj->base.dev; |
struct drm_device *dev = vma->vm->dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT; |
struct drm_i915_gem_object *obj = vma->obj; |
|
dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, |
entry, |
obj->base.size >> PAGE_SHIFT, |
if (obj->has_global_gtt_mapping) { |
vma->vm->clear_range(vma->vm, |
vma->node.start, |
obj->base.size, |
true); |
|
obj->has_global_gtt_mapping = 0; |
} |
|
if (obj->has_aliasing_ppgtt_mapping) { |
struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt; |
appgtt->base.clear_range(&appgtt->base, |
vma->node.start, |
obj->base.size, |
true); |
obj->has_aliasing_ppgtt_mapping = 0; |
} |
} |
|
void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj) |
{ |
struct drm_device *dev = obj->base.dev; |
1158,31 → 1680,16 |
|
/* Clear any non-preallocated blocks */ |
drm_mm_for_each_hole(entry, &ggtt_vm->mm, hole_start, hole_end) { |
const unsigned long count = (hole_end - hole_start) / PAGE_SIZE; |
DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n", |
hole_start, hole_end); |
ggtt_vm->clear_range(ggtt_vm, hole_start / PAGE_SIZE, count, true); |
ggtt_vm->clear_range(ggtt_vm, hole_start, |
hole_end - hole_start, true); |
} |
|
/* And finally clear the reserved guard page */ |
ggtt_vm->clear_range(ggtt_vm, end / PAGE_SIZE - 1, 1, true); |
ggtt_vm->clear_range(ggtt_vm, end - PAGE_SIZE, PAGE_SIZE, true); |
} |
|
static bool |
intel_enable_ppgtt(struct drm_device *dev) |
{ |
if (i915_enable_ppgtt >= 0) |
return i915_enable_ppgtt; |
|
#ifdef CONFIG_INTEL_IOMMU |
/* Disable ppgtt on SNB if VT-d is on. */ |
if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) |
return false; |
#endif |
|
return true; |
} |
|
void i915_gem_init_global_gtt(struct drm_device *dev) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
1191,28 → 1698,8 |
gtt_size = dev_priv->gtt.base.total; |
mappable_size = dev_priv->gtt.mappable_end; |
|
if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) { |
int ret; |
|
if (INTEL_INFO(dev)->gen <= 7) { |
/* PPGTT pdes are stolen from global gtt ptes, so shrink the |
* aperture accordingly when using aliasing ppgtt. */ |
gtt_size -= GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE; |
} |
|
i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size); |
|
ret = i915_gem_init_aliasing_ppgtt(dev); |
if (!ret) |
return; |
|
DRM_ERROR("Aliased PPGTT setup failed %d\n", ret); |
drm_mm_takedown(&dev_priv->gtt.base.mm); |
if (INTEL_INFO(dev)->gen < 8) |
gtt_size += GEN6_PPGTT_PD_ENTRIES*PAGE_SIZE; |
} |
i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size); |
} |
|
static int setup_scratch_page(struct drm_device *dev) |
{ |
1265,14 → 1752,27 |
bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK; |
if (bdw_gmch_ctl) |
bdw_gmch_ctl = 1 << bdw_gmch_ctl; |
if (bdw_gmch_ctl > 4) { |
WARN_ON(!i915_preliminary_hw_support); |
return 4<<20; |
} |
|
#ifdef CONFIG_X86_32 |
/* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * PAGE_SIZE */ |
if (bdw_gmch_ctl > 4) |
bdw_gmch_ctl = 4; |
#endif |
|
return bdw_gmch_ctl << 20; |
} |
|
static inline unsigned int chv_get_total_gtt_size(u16 gmch_ctrl) |
{ |
gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT; |
gmch_ctrl &= SNB_GMCH_GGMS_MASK; |
|
if (gmch_ctrl) |
return 1 << (20 + gmch_ctrl); |
|
return 0; |
} |
|
static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl) |
{ |
snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT; |
1287,6 → 1787,24 |
return bdw_gmch_ctl << 25; /* 32 MB units */ |
} |
|
static size_t chv_get_stolen_size(u16 gmch_ctrl) |
{ |
gmch_ctrl >>= SNB_GMCH_GMS_SHIFT; |
gmch_ctrl &= SNB_GMCH_GMS_MASK; |
|
/* |
* 0x0 to 0x10: 32MB increments starting at 0MB |
* 0x11 to 0x16: 4MB increments starting at 8MB |
* 0x17 to 0x1d: 4MB increments start at 36MB |
*/ |
if (gmch_ctrl < 0x11) |
return gmch_ctrl << 25; |
else if (gmch_ctrl < 0x17) |
return (gmch_ctrl - 0x11 + 2) << 22; |
else |
return (gmch_ctrl - 0x17 + 9) << 22; |
} |
|
static int ggtt_probe_common(struct drm_device *dev, |
size_t gtt_size) |
{ |
1317,19 → 1835,8 |
/* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability |
* bits. When using advanced contexts each context stores its own PAT, but |
* writing this data shouldn't be harmful even in those cases. */ |
static void gen8_setup_private_ppat(struct drm_i915_private *dev_priv) |
static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv) |
{ |
#define GEN8_PPAT_UC (0<<0) |
#define GEN8_PPAT_WC (1<<0) |
#define GEN8_PPAT_WT (2<<0) |
#define GEN8_PPAT_WB (3<<0) |
#define GEN8_PPAT_ELLC_OVERRIDE (0<<2) |
/* FIXME(BDW): Bspec is completely confused about cache control bits. */ |
#define GEN8_PPAT_LLC (1<<2) |
#define GEN8_PPAT_LLCELLC (2<<2) |
#define GEN8_PPAT_LLCeLLC (3<<2) |
#define GEN8_PPAT_AGE(x) (x<<4) |
#define GEN8_PPAT(i, x) ((uint64_t) (x) << ((i) * 8)) |
uint64_t pat; |
|
pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */ |
1347,6 → 1854,33 |
I915_WRITE(GEN8_PRIVATE_PAT + 4, pat >> 32); |
} |
|
static void chv_setup_private_ppat(struct drm_i915_private *dev_priv) |
{ |
uint64_t pat; |
|
/* |
* Map WB on BDW to snooped on CHV. |
* |
* Only the snoop bit has meaning for CHV, the rest is |
* ignored. |
* |
* Note that the harware enforces snooping for all page |
* table accesses. The snoop bit is actually ignored for |
* PDEs. |
*/ |
pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) | |
GEN8_PPAT(1, 0) | |
GEN8_PPAT(2, 0) | |
GEN8_PPAT(3, 0) | |
GEN8_PPAT(4, CHV_PPAT_SNOOP) | |
GEN8_PPAT(5, CHV_PPAT_SNOOP) | |
GEN8_PPAT(6, CHV_PPAT_SNOOP) | |
GEN8_PPAT(7, CHV_PPAT_SNOOP); |
|
I915_WRITE(GEN8_PRIVATE_PAT, pat); |
I915_WRITE(GEN8_PRIVATE_PAT + 4, pat >> 32); |
} |
|
static int gen8_gmch_probe(struct drm_device *dev, |
size_t *gtt_total, |
size_t *stolen, |
1367,12 → 1901,20 |
|
pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); |
|
if (IS_CHERRYVIEW(dev)) { |
*stolen = chv_get_stolen_size(snb_gmch_ctl); |
gtt_size = chv_get_total_gtt_size(snb_gmch_ctl); |
} else { |
*stolen = gen8_get_stolen_size(snb_gmch_ctl); |
gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl); |
} |
|
gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl); |
*gtt_total = (gtt_size / sizeof(gen8_gtt_pte_t)) << PAGE_SHIFT; |
|
gen8_setup_private_ppat(dev_priv); |
if (IS_CHERRYVIEW(dev)) |
chv_setup_private_ppat(dev_priv); |
else |
bdw_setup_private_ppat(dev_priv); |
|
ret = ggtt_probe_common(dev, gtt_size); |
|
1426,6 → 1968,11 |
{ |
|
struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base); |
|
if (drm_mm_initialized(&vm->mm)) { |
drm_mm_takedown(&vm->mm); |
list_del(&vm->global_link); |
} |
iounmap(gtt->gsm); |
teardown_scratch_page(vm->dev); |
} |
1449,7 → 1996,6 |
|
dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev); |
dev_priv->gtt.base.clear_range = i915_ggtt_clear_range; |
dev_priv->gtt.base.insert_entries = i915_ggtt_insert_entries; |
|
if (unlikely(dev_priv->gtt.do_idle_maps)) |
DRM_INFO("applying Ironlake quirks for intel_iommu\n"); |
1501,10 → 2047,81 |
gtt->base.total >> 20); |
DRM_DEBUG_DRIVER("GMADR size = %ldM\n", gtt->mappable_end >> 20); |
DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20); |
#ifdef CONFIG_INTEL_IOMMU |
if (intel_iommu_gfx_mapped) |
DRM_INFO("VT-d active for gfx access\n"); |
#endif |
/* |
* i915.enable_ppgtt is read-only, so do an early pass to validate the |
* user's requested state against the hardware/driver capabilities. We |
* do this now so that we can print out any log messages once rather |
* than every time we check intel_enable_ppgtt(). |
*/ |
i915.enable_ppgtt = sanitize_enable_ppgtt(dev, i915.enable_ppgtt); |
DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt); |
|
return 0; |
} |
|
static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj, |
struct i915_address_space *vm) |
{ |
struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL); |
if (vma == NULL) |
return ERR_PTR(-ENOMEM); |
|
INIT_LIST_HEAD(&vma->vma_link); |
INIT_LIST_HEAD(&vma->mm_list); |
INIT_LIST_HEAD(&vma->exec_list); |
vma->vm = vm; |
vma->obj = obj; |
|
switch (INTEL_INFO(vm->dev)->gen) { |
case 8: |
case 7: |
case 6: |
if (i915_is_ggtt(vm)) { |
vma->unbind_vma = ggtt_unbind_vma; |
vma->bind_vma = ggtt_bind_vma; |
} else { |
vma->unbind_vma = ppgtt_unbind_vma; |
vma->bind_vma = ppgtt_bind_vma; |
} |
break; |
case 5: |
case 4: |
case 3: |
case 2: |
BUG_ON(!i915_is_ggtt(vm)); |
vma->unbind_vma = i915_ggtt_unbind_vma; |
vma->bind_vma = i915_ggtt_bind_vma; |
break; |
default: |
BUG(); |
} |
|
/* Keep GGTT vmas first to make debug easier */ |
if (i915_is_ggtt(vm)) |
list_add(&vma->vma_link, &obj->vma_list); |
else |
list_add_tail(&vma->vma_link, &obj->vma_list); |
|
return vma; |
} |
|
struct i915_vma * |
i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj, |
struct i915_address_space *vm) |
{ |
struct i915_vma *vma; |
|
vma = i915_gem_obj_to_vma(obj, vm); |
if (!vma) |
vma = __i915_gem_vma_create(obj, vm); |
|
return vma; |
} |
|
struct scatterlist *sg_next(struct scatterlist *sg) |
{ |
if (sg_is_last(sg)) |
1519,7 → 2136,7 |
|
|
void __sg_free_table(struct sg_table *table, unsigned int max_ents, |
sg_free_fn *free_fn) |
bool skip_first_chunk, sg_free_fn *free_fn) |
{ |
struct scatterlist *sgl, *next; |
|
1547,16 → 2164,18 |
} |
|
table->orig_nents -= sg_size; |
if (!skip_first_chunk) { |
kfree(sgl); |
skip_first_chunk = false; |
} |
sgl = next; |
} |
|
table->sgl = NULL; |
} |
|
void sg_free_table(struct sg_table *table) |
{ |
__sg_free_table(table, SG_MAX_SINGLE_ALLOC, NULL); |
__sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, NULL); |
} |
|
int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask) |
1622,7 → 2241,7 |
return 0; |
|
err: |
__sg_free_table(table, SG_MAX_SINGLE_ALLOC, NULL); |
__sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, NULL); |
|
return -ENOMEM; |
} |