35,10 → 35,12 |
#include "i915_trace.h" |
#include "intel_drv.h" |
|
typedef uint32_t gen6_gtt_pte_t; |
#define GEN6_PPGTT_PD_ENTRIES 512 |
#define I915_PPGTT_PT_ENTRIES (PAGE_SIZE / sizeof(gen6_gtt_pte_t)) |
|
/* PPGTT stuff */ |
#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0)) |
#define HSW_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0x7f0)) |
|
#define GEN6_PDE_VALID (1 << 0) |
/* gen6+ has bit 11-4 for physical addr bit 39-32 */ |
48,11 → 50,21 |
#define GEN6_PTE_UNCACHED (1 << 1) |
#define HSW_PTE_UNCACHED (0) |
#define GEN6_PTE_CACHE_LLC (2 << 1) |
#define GEN6_PTE_CACHE_LLC_MLC (3 << 1) |
#define GEN7_PTE_CACHE_L3_LLC (3 << 1) |
#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) |
#define HSW_PTE_ADDR_ENCODE(addr) HSW_GTT_ADDR_ENCODE(addr) |
|
static inline gen6_gtt_pte_t gen6_pte_encode(struct drm_device *dev, |
dma_addr_t addr, |
/* Cacheability Control is a 4-bit value. The low three bits are stored in * |
* bits 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE. |
*/ |
#define HSW_CACHEABILITY_CONTROL(bits) ((((bits) & 0x7) << 1) | \ |
(((bits) & 0x8) << (11 - 3))) |
#define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2) |
#define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3) |
#define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb) |
#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6) |
|
static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr, |
enum i915_cache_level level) |
{ |
gen6_gtt_pte_t pte = GEN6_PTE_VALID; |
59,39 → 71,103 |
pte |= GEN6_PTE_ADDR_ENCODE(addr); |
|
switch (level) { |
case I915_CACHE_LLC_MLC: |
/* Haswell doesn't set L3 this way */ |
if (IS_HASWELL(dev)) |
case I915_CACHE_L3_LLC: |
case I915_CACHE_LLC: |
pte |= GEN6_PTE_CACHE_LLC; |
else |
pte |= GEN6_PTE_CACHE_LLC_MLC; |
break; |
case I915_CACHE_NONE: |
pte |= GEN6_PTE_UNCACHED; |
break; |
default: |
WARN_ON(1); |
} |
|
return pte; |
} |
|
static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr, |
enum i915_cache_level level) |
{ |
gen6_gtt_pte_t pte = GEN6_PTE_VALID; |
pte |= GEN6_PTE_ADDR_ENCODE(addr); |
|
switch (level) { |
case I915_CACHE_L3_LLC: |
pte |= GEN7_PTE_CACHE_L3_LLC; |
break; |
case I915_CACHE_LLC: |
pte |= GEN6_PTE_CACHE_LLC; |
break; |
case I915_CACHE_NONE: |
if (IS_HASWELL(dev)) |
pte |= HSW_PTE_UNCACHED; |
else |
pte |= GEN6_PTE_UNCACHED; |
break; |
default: |
BUG(); |
WARN_ON(1); |
} |
|
return pte; |
} |
|
static int gen6_ppgtt_enable(struct drm_device *dev) |
#define BYT_PTE_WRITEABLE (1 << 1) |
#define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2) |
|
static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr, |
enum i915_cache_level level) |
{ |
drm_i915_private_t *dev_priv = dev->dev_private; |
uint32_t pd_offset; |
struct intel_ring_buffer *ring; |
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; |
gen6_gtt_pte_t pte = GEN6_PTE_VALID; |
pte |= GEN6_PTE_ADDR_ENCODE(addr); |
|
/* Mark the page as writeable. Other platforms don't have a |
* setting for read-only/writable, so this matches that behavior. |
*/ |
pte |= BYT_PTE_WRITEABLE; |
|
if (level != I915_CACHE_NONE) |
pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES; |
|
return pte; |
} |
|
static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr, |
enum i915_cache_level level) |
{ |
gen6_gtt_pte_t pte = GEN6_PTE_VALID; |
pte |= HSW_PTE_ADDR_ENCODE(addr); |
|
if (level != I915_CACHE_NONE) |
pte |= HSW_WB_LLC_AGE3; |
|
return pte; |
} |
|
static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr, |
enum i915_cache_level level) |
{ |
gen6_gtt_pte_t pte = GEN6_PTE_VALID; |
pte |= HSW_PTE_ADDR_ENCODE(addr); |
|
switch (level) { |
case I915_CACHE_NONE: |
break; |
case I915_CACHE_WT: |
pte |= HSW_WT_ELLC_LLC_AGE0; |
break; |
default: |
pte |= HSW_WB_ELLC_LLC_AGE0; |
break; |
} |
|
return pte; |
} |
|
static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt) |
{ |
struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private; |
gen6_gtt_pte_t __iomem *pd_addr; |
uint32_t pd_entry; |
int i; |
|
WARN_ON(ppgtt->pd_offset & 0x3f); |
pd_addr = (gen6_gtt_pte_t __iomem*)dev_priv->gtt.gsm + |
ppgtt->pd_offset / sizeof(gen6_gtt_pte_t); |
for (i = 0; i < ppgtt->num_pd_entries; i++) { |
104,7 → 180,20 |
writel(pd_entry, pd_addr + i); |
} |
readl(pd_addr); |
} |
|
static int gen6_ppgtt_enable(struct drm_device *dev) |
{ |
drm_i915_private_t *dev_priv = dev->dev_private; |
uint32_t pd_offset; |
struct intel_ring_buffer *ring; |
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; |
int i; |
|
BUG_ON(ppgtt->pd_offset & 0x3f); |
|
gen6_write_pdes(ppgtt); |
|
pd_offset = ppgtt->pd_offset; |
pd_offset /= 64; /* in cachelines, */ |
pd_offset <<= 16; |
152,18 → 241,18 |
} |
|
/* PPGTT support for Sandybdrige/Gen6 and later */ |
static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt, |
static void gen6_ppgtt_clear_range(struct i915_address_space *vm, |
unsigned first_entry, |
unsigned num_entries) |
{ |
struct i915_hw_ppgtt *ppgtt = |
container_of(vm, struct i915_hw_ppgtt, base); |
gen6_gtt_pte_t *pt_vaddr, scratch_pte; |
unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES; |
unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; |
unsigned last_pte, i; |
|
scratch_pte = gen6_pte_encode(ppgtt->dev, |
ppgtt->scratch_page_dma_addr, |
I915_CACHE_LLC); |
scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC); |
|
pt_vaddr = AllocKernelSpace(4096); |
|
188,11 → 277,13 |
FreeKernelSpace(pt_vaddr); |
} |
|
static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt, |
static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, |
struct sg_table *pages, |
unsigned first_entry, |
enum i915_cache_level cache_level) |
{ |
struct i915_hw_ppgtt *ppgtt = |
container_of(vm, struct i915_hw_ppgtt, base); |
gen6_gtt_pte_t *pt_vaddr; |
unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES; |
unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES; |
210,8 → 301,7 |
dma_addr_t page_addr; |
|
page_addr = sg_page_iter_dma_address(&sg_iter); |
pt_vaddr[act_pte] = gen6_pte_encode(ppgtt->dev, page_addr, |
cache_level); |
pt_vaddr[act_pte] = vm->pte_encode(page_addr, cache_level); |
if (++act_pte == I915_PPGTT_PT_ENTRIES) { |
act_pt++; |
MapPage(pt_vaddr,(addr_t)(ppgtt->pt_pages[act_pt]), 3); |
222,13 → 312,17 |
FreeKernelSpace(pt_vaddr); |
} |
|
static void gen6_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt) |
static void gen6_ppgtt_cleanup(struct i915_address_space *vm) |
{ |
struct i915_hw_ppgtt *ppgtt = |
container_of(vm, struct i915_hw_ppgtt, base); |
int i; |
|
drm_mm_takedown(&ppgtt->base.mm); |
|
if (ppgtt->pt_dma_addr) { |
for (i = 0; i < ppgtt->num_pd_entries; i++) |
pci_unmap_page(ppgtt->dev->pdev, |
pci_unmap_page(ppgtt->base.dev->pdev, |
ppgtt->pt_dma_addr[i], |
4096, PCI_DMA_BIDIRECTIONAL); |
} |
242,7 → 336,7 |
|
static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) |
{ |
struct drm_device *dev = ppgtt->dev; |
struct drm_device *dev = ppgtt->base.dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
unsigned first_pd_entry_in_global_pt; |
int i; |
253,11 → 347,13 |
* now. */ |
first_pd_entry_in_global_pt = gtt_total_entries(dev_priv->gtt); |
|
ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES; |
ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode; |
ppgtt->num_pd_entries = GEN6_PPGTT_PD_ENTRIES; |
ppgtt->enable = gen6_ppgtt_enable; |
ppgtt->clear_range = gen6_ppgtt_clear_range; |
ppgtt->insert_entries = gen6_ppgtt_insert_entries; |
ppgtt->cleanup = gen6_ppgtt_cleanup; |
ppgtt->base.clear_range = gen6_ppgtt_clear_range; |
ppgtt->base.insert_entries = gen6_ppgtt_insert_entries; |
ppgtt->base.cleanup = gen6_ppgtt_cleanup; |
ppgtt->base.scratch = dev_priv->gtt.base.scratch; |
ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries, |
GFP_KERNEL); |
if (!ppgtt->pt_pages) |
283,7 → 379,7 |
ppgtt->pt_dma_addr[i] = pt_addr; |
} |
|
ppgtt->clear_range(ppgtt, 0, |
ppgtt->base.clear_range(&ppgtt->base, 0, |
ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES); |
|
ppgtt->pd_offset = first_pd_entry_in_global_pt * sizeof(gen6_gtt_pte_t); |
317,8 → 413,7 |
if (!ppgtt) |
return -ENOMEM; |
|
ppgtt->dev = dev; |
ppgtt->scratch_page_dma_addr = dev_priv->gtt.scratch_page_dma; |
ppgtt->base.dev = dev; |
|
if (INTEL_INFO(dev)->gen < 8) |
ret = gen6_ppgtt_init(ppgtt); |
327,8 → 422,11 |
|
if (ret) |
kfree(ppgtt); |
else |
else { |
dev_priv->mm.aliasing_ppgtt = ppgtt; |
drm_mm_init(&ppgtt->base.mm, ppgtt->base.start, |
ppgtt->base.total); |
} |
|
return ret; |
} |
341,7 → 439,7 |
if (!ppgtt) |
return; |
|
ppgtt->cleanup(ppgtt); |
ppgtt->base.cleanup(&ppgtt->base); |
dev_priv->mm.aliasing_ppgtt = NULL; |
} |
|
349,8 → 447,8 |
struct drm_i915_gem_object *obj, |
enum i915_cache_level cache_level) |
{ |
ppgtt->insert_entries(ppgtt, obj->pages, |
obj->gtt_space->start >> PAGE_SHIFT, |
ppgtt->base.insert_entries(&ppgtt->base, obj->pages, |
i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT, |
cache_level); |
} |
|
357,8 → 455,8 |
void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, |
struct drm_i915_gem_object *obj) |
{ |
ppgtt->clear_range(ppgtt, |
obj->gtt_space->start >> PAGE_SHIFT, |
ppgtt->base.clear_range(&ppgtt->base, |
i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT, |
obj->base.size >> PAGE_SHIFT); |
} |
|
406,11 → 504,12 |
struct drm_i915_gem_object *obj; |
|
/* First fill our portion of the GTT with scratch pages */ |
dev_priv->gtt.gtt_clear_range(dev, dev_priv->gtt.start / PAGE_SIZE, |
dev_priv->gtt.total / PAGE_SIZE); |
dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, |
dev_priv->gtt.base.start / PAGE_SIZE, |
dev_priv->gtt.base.total / PAGE_SIZE); |
|
list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) { |
i915_gem_clflush_object(obj); |
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { |
i915_gem_clflush_object(obj, obj->pin_display); |
i915_gem_gtt_bind_object(obj, obj->cache_level); |
} |
|
436,12 → 535,12 |
* within the global GTT as well as accessible by the GPU through the GMADR |
* mapped BAR (dev_priv->mm.gtt->gtt). |
*/ |
static void gen6_ggtt_insert_entries(struct drm_device *dev, |
static void gen6_ggtt_insert_entries(struct i915_address_space *vm, |
struct sg_table *st, |
unsigned int first_entry, |
enum i915_cache_level level) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct drm_i915_private *dev_priv = vm->dev->dev_private; |
gen6_gtt_pte_t __iomem *gtt_entries = |
(gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry; |
int i = 0; |
450,7 → 549,7 |
|
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { |
addr = sg_page_iter_dma_address(&sg_iter); |
iowrite32(gen6_pte_encode(dev, addr, level), >t_entries[i]); |
iowrite32(vm->pte_encode(addr, level), >t_entries[i]); |
i++; |
} |
|
461,8 → 560,8 |
* hardware should work, we must keep this posting read for paranoia. |
*/ |
if (i != 0) |
WARN_ON(readl(>t_entries[i-1]) |
!= gen6_pte_encode(dev, addr, level)); |
WARN_ON(readl(>t_entries[i-1]) != |
vm->pte_encode(addr, level)); |
|
/* This next bit makes the above posting read even more important. We |
* want to flush the TLBs only after we're certain all the PTE updates |
472,11 → 571,11 |
POSTING_READ(GFX_FLSH_CNTL_GEN6); |
} |
|
static void gen6_ggtt_clear_range(struct drm_device *dev, |
static void gen6_ggtt_clear_range(struct i915_address_space *vm, |
unsigned int first_entry, |
unsigned int num_entries) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct drm_i915_private *dev_priv = vm->dev->dev_private; |
gen6_gtt_pte_t scratch_pte, __iomem *gtt_base = |
(gen6_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry; |
const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry; |
488,8 → 587,7 |
if (num_entries > max_entries) |
num_entries = max_entries; |
|
scratch_pte = gen6_pte_encode(dev, dev_priv->gtt.scratch_page_dma, |
I915_CACHE_LLC); |
scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC); |
for (i = 0; i < num_entries; i++) |
iowrite32(scratch_pte, >t_base[i]); |
readl(gtt_base); |
496,7 → 594,7 |
} |
|
|
static void i915_ggtt_insert_entries(struct drm_device *dev, |
static void i915_ggtt_insert_entries(struct i915_address_space *vm, |
struct sg_table *st, |
unsigned int pg_start, |
enum i915_cache_level cache_level) |
508,7 → 606,7 |
|
} |
|
static void i915_ggtt_clear_range(struct drm_device *dev, |
static void i915_ggtt_clear_range(struct i915_address_space *vm, |
unsigned int first_entry, |
unsigned int num_entries) |
{ |
521,9 → 619,10 |
{ |
struct drm_device *dev = obj->base.dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT; |
|
dev_priv->gtt.gtt_insert_entries(dev, obj->pages, |
obj->gtt_space->start >> PAGE_SHIFT, |
dev_priv->gtt.base.insert_entries(&dev_priv->gtt.base, obj->pages, |
entry, |
cache_level); |
|
obj->has_global_gtt_mapping = 1; |
533,9 → 632,10 |
{ |
struct drm_device *dev = obj->base.dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT; |
|
dev_priv->gtt.gtt_clear_range(obj->base.dev, |
obj->gtt_space->start >> PAGE_SHIFT, |
dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, |
entry, |
obj->base.size >> PAGE_SHIFT); |
|
obj->has_global_gtt_mapping = 0; |
587,7 → 687,8 |
* aperture. One page should be enough to keep any prefetching inside |
* of the aperture. |
*/ |
drm_i915_private_t *dev_priv = dev->dev_private; |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct i915_address_space *ggtt_vm = &dev_priv->gtt.base; |
struct drm_mm_node *entry; |
struct drm_i915_gem_object *obj; |
unsigned long hole_start, hole_end; |
595,37 → 696,38 |
BUG_ON(mappable_end > end); |
|
/* Subtract the guard page ... */ |
drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE); |
drm_mm_init(&ggtt_vm->mm, start, end - start - PAGE_SIZE); |
if (!HAS_LLC(dev)) |
dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust; |
dev_priv->gtt.base.mm.color_adjust = i915_gtt_color_adjust; |
|
/* Mark any preallocated objects as occupied */ |
list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) { |
DRM_DEBUG_KMS("reserving preallocated space: %x + %zx\n", |
obj->gtt_offset, obj->base.size); |
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { |
struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm); |
int ret; |
DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n", |
i915_gem_obj_ggtt_offset(obj), obj->base.size); |
|
BUG_ON(obj->gtt_space != I915_GTT_RESERVED); |
obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space, |
obj->gtt_offset, |
obj->base.size, |
false); |
WARN_ON(i915_gem_obj_ggtt_bound(obj)); |
ret = drm_mm_reserve_node(&ggtt_vm->mm, &vma->node); |
if (ret) |
DRM_DEBUG_KMS("Reservation failed\n"); |
obj->has_global_gtt_mapping = 1; |
list_add(&vma->vma_link, &obj->vma_list); |
} |
|
dev_priv->gtt.start = start; |
dev_priv->gtt.total = end - start; |
dev_priv->gtt.base.start = start; |
dev_priv->gtt.base.total = end - start; |
|
/* Clear any non-preallocated blocks */ |
drm_mm_for_each_hole(entry, &dev_priv->mm.gtt_space, |
hole_start, hole_end) { |
drm_mm_for_each_hole(entry, &ggtt_vm->mm, hole_start, hole_end) { |
const unsigned long count = (hole_end - hole_start) / PAGE_SIZE; |
DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n", |
hole_start, hole_end); |
dev_priv->gtt.gtt_clear_range(dev, hole_start / PAGE_SIZE, |
(hole_end-hole_start) / PAGE_SIZE); |
ggtt_vm->clear_range(ggtt_vm, hole_start / PAGE_SIZE, count); |
} |
|
/* And finally clear the reserved guard page */ |
dev_priv->gtt.gtt_clear_range(dev, end / PAGE_SIZE - 1, 1); |
ggtt_vm->clear_range(ggtt_vm, end / PAGE_SIZE - 1, 1); |
} |
|
static bool |
648,7 → 750,7 |
struct drm_i915_private *dev_priv = dev->dev_private; |
unsigned long gtt_size, mappable_size; |
|
gtt_size = dev_priv->gtt.total; |
gtt_size = dev_priv->gtt.base.total; |
mappable_size = dev_priv->gtt.mappable_end; |
|
#if 0 |
658,10 → 760,10 |
if (INTEL_INFO(dev)->gen <= 7) { |
/* PPGTT pdes are stolen from global gtt ptes, so shrink the |
* aperture accordingly when using aliasing ppgtt. */ |
gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE; |
gtt_size -= GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE; |
} |
|
i915_gem_setup_global_gtt(dev, LFB_SIZE, mappable_size, gtt_size); |
i915_gem_setup_global_gtt(dev, LFB_SIZE, mappable_size, gtt_size-LFB_SIZE); |
|
ret = i915_gem_init_aliasing_ppgtt(dev); |
if (!ret) |
668,12 → 770,12 |
return; |
|
DRM_ERROR("Aliased PPGTT setup failed %d\n", ret); |
drm_mm_takedown(&dev_priv->mm.gtt_space); |
gtt_size += I915_PPGTT_PD_ENTRIES*PAGE_SIZE; |
drm_mm_takedown(&dev_priv->gtt.base.mm); |
gtt_size += GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE; |
} |
#endif |
|
i915_gem_setup_global_gtt(dev, LFB_SIZE, mappable_size, gtt_size); |
i915_gem_setup_global_gtt(dev, LFB_SIZE, mappable_size, gtt_size-LFB_SIZE); |
} |
|
static int setup_scratch_page(struct drm_device *dev) |
696,8 → 798,8 |
#else |
dma_addr = page_to_phys(page); |
#endif |
dev_priv->gtt.scratch_page = page; |
dev_priv->gtt.scratch_page_dma = dma_addr; |
dev_priv->gtt.base.scratch.page = page; |
dev_priv->gtt.base.scratch.addr = dma_addr; |
|
return 0; |
} |
705,11 → 807,13 |
static void teardown_scratch_page(struct drm_device *dev) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
set_pages_wb(dev_priv->gtt.scratch_page, 1); |
pci_unmap_page(dev->pdev, dev_priv->gtt.scratch_page_dma, |
struct page *page = dev_priv->gtt.base.scratch.page; |
|
set_pages_wb(page, 1); |
pci_unmap_page(dev->pdev, dev_priv->gtt.base.scratch.addr, |
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); |
put_page(dev_priv->gtt.scratch_page); |
__free_page(dev_priv->gtt.scratch_page); |
put_page(page); |
__free_page(page); |
} |
|
static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl) |
772,17 → 876,18 |
if (ret) |
DRM_ERROR("Scratch setup failed\n"); |
|
dev_priv->gtt.gtt_clear_range = gen6_ggtt_clear_range; |
dev_priv->gtt.gtt_insert_entries = gen6_ggtt_insert_entries; |
dev_priv->gtt.base.clear_range = gen6_ggtt_clear_range; |
dev_priv->gtt.base.insert_entries = gen6_ggtt_insert_entries; |
|
return ret; |
} |
|
static void gen6_gmch_remove(struct drm_device *dev) |
static void gen6_gmch_remove(struct i915_address_space *vm) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
iounmap(dev_priv->gtt.gsm); |
teardown_scratch_page(dev_priv->dev); |
|
struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base); |
iounmap(gtt->gsm); |
teardown_scratch_page(vm->dev); |
} |
|
static int i915_gmch_probe(struct drm_device *dev, |
803,13 → 908,13 |
intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end); |
|
dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev); |
dev_priv->gtt.gtt_clear_range = i915_ggtt_clear_range; |
dev_priv->gtt.gtt_insert_entries = i915_ggtt_insert_entries; |
dev_priv->gtt.base.clear_range = i915_ggtt_clear_range; |
dev_priv->gtt.base.insert_entries = i915_ggtt_insert_entries; |
|
return 0; |
} |
|
static void i915_gmch_remove(struct drm_device *dev) |
static void i915_gmch_remove(struct i915_address_space *vm) |
{ |
// intel_gmch_remove(); |
} |
821,27 → 926,35 |
int ret; |
|
if (INTEL_INFO(dev)->gen <= 5) { |
dev_priv->gtt.gtt_probe = i915_gmch_probe; |
dev_priv->gtt.gtt_remove = i915_gmch_remove; |
gtt->gtt_probe = i915_gmch_probe; |
gtt->base.cleanup = i915_gmch_remove; |
} else { |
dev_priv->gtt.gtt_probe = gen6_gmch_probe; |
dev_priv->gtt.gtt_remove = gen6_gmch_remove; |
gtt->gtt_probe = gen6_gmch_probe; |
gtt->base.cleanup = gen6_gmch_remove; |
if (IS_HASWELL(dev) && dev_priv->ellc_size) |
gtt->base.pte_encode = iris_pte_encode; |
else if (IS_HASWELL(dev)) |
gtt->base.pte_encode = hsw_pte_encode; |
else if (IS_VALLEYVIEW(dev)) |
gtt->base.pte_encode = byt_pte_encode; |
else if (INTEL_INFO(dev)->gen >= 7) |
gtt->base.pte_encode = ivb_pte_encode; |
else |
gtt->base.pte_encode = snb_pte_encode; |
} |
|
ret = dev_priv->gtt.gtt_probe(dev, &dev_priv->gtt.total, |
&dev_priv->gtt.stolen_size, |
>t->mappable_base, |
>t->mappable_end); |
ret = gtt->gtt_probe(dev, >t->base.total, >t->stolen_size, |
>t->mappable_base, >t->mappable_end); |
if (ret) |
return ret; |
|
gtt->base.dev = dev; |
|
/* GMADR is the PCI mmio aperture into the global GTT. */ |
DRM_INFO("Memory usable by graphics device = %zdM\n", |
dev_priv->gtt.total >> 20); |
DRM_DEBUG_DRIVER("GMADR size = %ldM\n", |
dev_priv->gtt.mappable_end >> 20); |
DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", |
dev_priv->gtt.stolen_size >> 20); |
gtt->base.total >> 20); |
DRM_DEBUG_DRIVER("GMADR size = %ldM\n", gtt->mappable_end >> 20); |
DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20); |
|
return 0; |
} |