65,9 → 65,10 |
#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6) |
|
static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr, |
enum i915_cache_level level) |
enum i915_cache_level level, |
bool valid) |
{ |
gen6_gtt_pte_t pte = GEN6_PTE_VALID; |
gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; |
pte |= GEN6_PTE_ADDR_ENCODE(addr); |
|
switch (level) { |
86,9 → 87,10 |
} |
|
static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr, |
enum i915_cache_level level) |
enum i915_cache_level level, |
bool valid) |
{ |
gen6_gtt_pte_t pte = GEN6_PTE_VALID; |
gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; |
pte |= GEN6_PTE_ADDR_ENCODE(addr); |
|
switch (level) { |
112,9 → 114,10 |
#define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2) |
|
static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr, |
enum i915_cache_level level) |
enum i915_cache_level level, |
bool valid) |
{ |
gen6_gtt_pte_t pte = GEN6_PTE_VALID; |
gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; |
pte |= GEN6_PTE_ADDR_ENCODE(addr); |
|
/* Mark the page as writeable. Other platforms don't have a |
129,9 → 132,10 |
} |
|
static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr, |
enum i915_cache_level level) |
enum i915_cache_level level, |
bool valid) |
{ |
gen6_gtt_pte_t pte = GEN6_PTE_VALID; |
gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; |
pte |= HSW_PTE_ADDR_ENCODE(addr); |
|
if (level != I915_CACHE_NONE) |
141,9 → 145,10 |
} |
|
static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr, |
enum i915_cache_level level) |
enum i915_cache_level level, |
bool valid) |
{ |
gen6_gtt_pte_t pte = GEN6_PTE_VALID; |
gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; |
pte |= HSW_PTE_ADDR_ENCODE(addr); |
|
switch (level) { |
243,7 → 248,8 |
/* PPGTT support for Sandybdrige/Gen6 and later */ |
static void gen6_ppgtt_clear_range(struct i915_address_space *vm, |
unsigned first_entry, |
unsigned num_entries) |
unsigned num_entries, |
bool use_scratch) |
{ |
struct i915_hw_ppgtt *ppgtt = |
container_of(vm, struct i915_hw_ppgtt, base); |
252,7 → 258,7 |
unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; |
unsigned last_pte, i; |
|
scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC); |
scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true); |
|
pt_vaddr = AllocKernelSpace(4096); |
|
301,7 → 307,7 |
dma_addr_t page_addr; |
|
page_addr = sg_page_iter_dma_address(&sg_iter); |
pt_vaddr[act_pte] = vm->pte_encode(page_addr, cache_level); |
pt_vaddr[act_pte] = vm->pte_encode(page_addr, cache_level, true); |
if (++act_pte == I915_PPGTT_PT_ENTRIES) { |
act_pt++; |
MapPage(pt_vaddr,(addr_t)(ppgtt->pt_pages[act_pt]), 3); |
380,7 → 386,7 |
} |
|
ppgtt->base.clear_range(&ppgtt->base, 0, |
ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES); |
ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES, true); |
|
ppgtt->pd_offset = first_pd_entry_in_global_pt * sizeof(gen6_gtt_pte_t); |
|
457,7 → 463,8 |
{ |
ppgtt->base.clear_range(&ppgtt->base, |
i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT, |
obj->base.size >> PAGE_SHIFT); |
obj->base.size >> PAGE_SHIFT, |
true); |
} |
|
extern int intel_iommu_gfx_mapped; |
498,15 → 505,65 |
dev_priv->mm.interruptible = interruptible; |
} |
|
void i915_check_and_clear_faults(struct drm_device *dev) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct intel_ring_buffer *ring; |
int i; |
|
if (INTEL_INFO(dev)->gen < 6) |
return; |
|
for_each_ring(ring, dev_priv, i) { |
u32 fault_reg; |
fault_reg = I915_READ(RING_FAULT_REG(ring)); |
if (fault_reg & RING_FAULT_VALID) { |
DRM_DEBUG_DRIVER("Unexpected fault\n" |
"\tAddr: 0x%08lx\\n" |
"\tAddress space: %s\n" |
"\tSource ID: %d\n" |
"\tType: %d\n", |
fault_reg & PAGE_MASK, |
fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT", |
RING_FAULT_SRCID(fault_reg), |
RING_FAULT_FAULT_TYPE(fault_reg)); |
I915_WRITE(RING_FAULT_REG(ring), |
fault_reg & ~RING_FAULT_VALID); |
} |
} |
POSTING_READ(RING_FAULT_REG(&dev_priv->ring[RCS])); |
} |
|
void i915_gem_suspend_gtt_mappings(struct drm_device *dev) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
|
/* Don't bother messing with faults pre GEN6 as we have little |
* documentation supporting that it's a good idea. |
*/ |
if (INTEL_INFO(dev)->gen < 6) |
return; |
|
i915_check_and_clear_faults(dev); |
|
dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, |
dev_priv->gtt.base.start / PAGE_SIZE, |
dev_priv->gtt.base.total / PAGE_SIZE, |
false); |
} |
|
void i915_gem_restore_gtt_mappings(struct drm_device *dev) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct drm_i915_gem_object *obj; |
|
i915_check_and_clear_faults(dev); |
|
/* First fill our portion of the GTT with scratch pages */ |
dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, |
dev_priv->gtt.base.start / PAGE_SIZE, |
dev_priv->gtt.base.total / PAGE_SIZE); |
dev_priv->gtt.base.total / PAGE_SIZE, |
true); |
|
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { |
i915_gem_clflush_object(obj, obj->pin_display); |
549,7 → 606,7 |
|
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { |
addr = sg_page_iter_dma_address(&sg_iter); |
iowrite32(vm->pte_encode(addr, level), >t_entries[i]); |
iowrite32(vm->pte_encode(addr, level, true), >t_entries[i]); |
i++; |
} |
|
561,7 → 618,7 |
*/ |
if (i != 0) |
WARN_ON(readl(>t_entries[i-1]) != |
vm->pte_encode(addr, level)); |
vm->pte_encode(addr, level, true)); |
|
/* This next bit makes the above posting read even more important. We |
* want to flush the TLBs only after we're certain all the PTE updates |
573,7 → 630,8 |
|
static void gen6_ggtt_clear_range(struct i915_address_space *vm, |
unsigned int first_entry, |
unsigned int num_entries) |
unsigned int num_entries, |
bool use_scratch) |
{ |
struct drm_i915_private *dev_priv = vm->dev->dev_private; |
gen6_gtt_pte_t scratch_pte, __iomem *gtt_base = |
586,7 → 644,8 |
first_entry, num_entries, max_entries)) |
num_entries = max_entries; |
|
scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC); |
scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, use_scratch); |
|
for (i = 0; i < num_entries; i++) |
iowrite32(scratch_pte, >t_base[i]); |
readl(gtt_base); |
607,7 → 666,8 |
|
static void i915_ggtt_clear_range(struct i915_address_space *vm, |
unsigned int first_entry, |
unsigned int num_entries) |
unsigned int num_entries, |
bool unused) |
{ |
intel_gtt_clear_range(first_entry, num_entries); |
} |
635,7 → 695,8 |
|
dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, |
entry, |
obj->base.size >> PAGE_SHIFT); |
obj->base.size >> PAGE_SHIFT, |
true); |
|
obj->has_global_gtt_mapping = 0; |
} |
722,11 → 783,11 |
const unsigned long count = (hole_end - hole_start) / PAGE_SIZE; |
DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n", |
hole_start, hole_end); |
ggtt_vm->clear_range(ggtt_vm, hole_start / PAGE_SIZE, count); |
ggtt_vm->clear_range(ggtt_vm, hole_start / PAGE_SIZE, count, true); |
} |
|
/* And finally clear the reserved guard page */ |
ggtt_vm->clear_range(ggtt_vm, end / PAGE_SIZE - 1, 1); |
ggtt_vm->clear_range(ggtt_vm, end / PAGE_SIZE - 1, 1, true); |
} |
|
static bool |
752,7 → 813,6 |
gtt_size = dev_priv->gtt.base.total; |
mappable_size = dev_priv->gtt.mappable_end; |
|
#if 0 |
if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) { |
int ret; |
|
762,7 → 822,7 |
gtt_size -= GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE; |
} |
|
i915_gem_setup_global_gtt(dev, LFB_SIZE, mappable_size, gtt_size-LFB_SIZE); |
i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size); |
|
ret = i915_gem_init_aliasing_ppgtt(dev); |
if (!ret) |
772,9 → 832,7 |
drm_mm_takedown(&dev_priv->gtt.base.mm); |
gtt_size += GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE; |
} |
#endif |
|
i915_gem_setup_global_gtt(dev, LFB_SIZE, mappable_size, gtt_size-LFB_SIZE); |
i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size); |
} |
|
static int setup_scratch_page(struct drm_device *dev) |