Subversion Repositories Kolibri OS

Compare Revisions

Regard whitespace Rev 6936 → Rev 6937

/drivers/video/drm/i915/i915_gem_gtt.c
104,9 → 104,11
{
bool has_aliasing_ppgtt;
bool has_full_ppgtt;
bool has_full_48bit_ppgtt;
 
has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6;
has_full_ppgtt = INTEL_INFO(dev)->gen >= 7;
has_full_48bit_ppgtt = IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9;
 
if (intel_vgpu_active(dev))
has_full_ppgtt = false; /* emulation is too hard */
125,6 → 127,9
if (enable_ppgtt == 2 && has_full_ppgtt)
return 2;
 
if (enable_ppgtt == 3 && has_full_48bit_ppgtt)
return 3;
 
#ifdef CONFIG_INTEL_IOMMU
/* Disable ppgtt on SNB if VT-d is on. */
if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) {
134,14 → 139,13
#endif
 
/* Early VLV doesn't have this */
if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
dev->pdev->revision < 0xb) {
if (IS_VALLEYVIEW(dev) && dev->pdev->revision < 0xb) {
DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n");
return 0;
}
 
if (INTEL_INFO(dev)->gen >= 8 && i915.enable_execlists)
return 2;
return has_full_48bit_ppgtt ? 3 : 2;
else
return has_aliasing_ppgtt ? 1 : 0;
}
654,10 → 658,10
return ret;
 
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(ring, GEN8_RING_PDP_UDW(ring, entry));
intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(ring, entry));
intel_ring_emit(ring, upper_32_bits(addr));
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(ring, GEN8_RING_PDP_LDW(ring, entry));
intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(ring, entry));
intel_ring_emit(ring, lower_32_bits(addr));
intel_ring_advance(ring);
 
757,10 → 761,10
gen8_ppgtt_clear_pte_range(vm, &ppgtt->pdp, start, length,
scratch_pte);
} else {
uint64_t templ4, pml4e;
uint64_t pml4e;
struct i915_page_directory_pointer *pdp;
 
gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, templ4, pml4e) {
gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, pml4e) {
gen8_ppgtt_clear_pte_range(vm, pdp, start, length,
scratch_pte);
}
826,10 → 830,10
cache_level);
} else {
struct i915_page_directory_pointer *pdp;
uint64_t templ4, pml4e;
uint64_t pml4e;
uint64_t length = (uint64_t)pages->orig_nents << PAGE_SHIFT;
 
gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, templ4, pml4e) {
gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, pml4e) {
gen8_ppgtt_insert_pte_entries(vm, pdp, &sg_iter,
start, cache_level);
}
897,14 → 901,13
enum vgt_g2v_type msg;
struct drm_device *dev = ppgtt->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned int offset = vgtif_reg(pdp0_lo);
int i;
 
if (USES_FULL_48BIT_PPGTT(dev)) {
u64 daddr = px_dma(&ppgtt->pml4);
 
I915_WRITE(offset, lower_32_bits(daddr));
I915_WRITE(offset + 4, upper_32_bits(daddr));
I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
I915_WRITE(vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
 
msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY);
912,10 → 915,8
for (i = 0; i < GEN8_LEGACY_PDPES; i++) {
u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
 
I915_WRITE(offset, lower_32_bits(daddr));
I915_WRITE(offset + 4, upper_32_bits(daddr));
 
offset += 8;
I915_WRITE(vgtif_reg(pdp[i].lo), lower_32_bits(daddr));
I915_WRITE(vgtif_reg(pdp[i].hi), upper_32_bits(daddr));
}
 
msg = (create ? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
1010,10 → 1011,9
{
struct drm_device *dev = vm->dev;
struct i915_page_table *pt;
uint64_t temp;
uint32_t pde;
 
gen8_for_each_pde(pt, pd, start, length, temp, pde) {
gen8_for_each_pde(pt, pd, start, length, pde) {
/* Don't reallocate page tables */
if (test_bit(pde, pd->used_pdes)) {
/* Scratch is never allocated this way */
1072,13 → 1072,12
{
struct drm_device *dev = vm->dev;
struct i915_page_directory *pd;
uint64_t temp;
uint32_t pdpe;
uint32_t pdpes = I915_PDPES_PER_PDP(dev);
 
WARN_ON(!bitmap_empty(new_pds, pdpes));
 
gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) {
gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
if (test_bit(pdpe, pdp->used_pdpes))
continue;
 
1126,12 → 1125,11
{
struct drm_device *dev = vm->dev;
struct i915_page_directory_pointer *pdp;
uint64_t temp;
uint32_t pml4e;
 
WARN_ON(!bitmap_empty(new_pdps, GEN8_PML4ES_PER_PML4));
 
gen8_for_each_pml4e(pdp, pml4, start, length, temp, pml4e) {
gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
if (!test_bit(pml4e, pml4->used_pml4es)) {
pdp = alloc_pdp(dev);
if (IS_ERR(pdp))
1215,7 → 1213,6
struct i915_page_directory *pd;
const uint64_t orig_start = start;
const uint64_t orig_length = length;
uint64_t temp;
uint32_t pdpe;
uint32_t pdpes = I915_PDPES_PER_PDP(dev);
int ret;
1242,7 → 1239,7
}
 
/* For every page directory referenced, allocate page tables */
gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) {
gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
ret = gen8_ppgtt_alloc_pagetabs(vm, pd, start, length,
new_page_tables + pdpe * BITS_TO_LONGS(I915_PDES));
if (ret)
1254,7 → 1251,7
 
/* Allocations have completed successfully, so set the bitmaps, and do
* the mappings. */
gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) {
gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
gen8_pde_t *const page_directory = kmap_px(pd);
struct i915_page_table *pt;
uint64_t pd_len = length;
1264,7 → 1261,7
/* Every pd should be allocated, we just did that above. */
WARN_ON(!pd);
 
gen8_for_each_pde(pt, pd, pd_start, pd_len, temp, pde) {
gen8_for_each_pde(pt, pd, pd_start, pd_len, pde) {
/* Same reasoning as pd */
WARN_ON(!pt);
WARN_ON(!pd_len);
1301,6 → 1298,8
 
err_out:
while (pdpe--) {
unsigned long temp;
 
for_each_set_bit(temp, new_page_tables + pdpe *
BITS_TO_LONGS(I915_PDES), I915_PDES)
free_pt(dev, pdp->page_directory[pdpe]->page_table[temp]);
1323,7 → 1322,7
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
struct i915_page_directory_pointer *pdp;
uint64_t temp, pml4e;
uint64_t pml4e;
int ret = 0;
 
/* Do the pml4 allocations first, so we don't need to track the newly
1342,7 → 1341,7
"The allocation has spanned more than 512GB. "
"It is highly likely this is incorrect.");
 
gen8_for_each_pml4e(pdp, pml4, start, length, temp, pml4e) {
gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
WARN_ON(!pdp);
 
ret = gen8_alloc_va_range_3lvl(vm, pdp, start, length);
1382,10 → 1381,9
struct seq_file *m)
{
struct i915_page_directory *pd;
uint64_t temp;
uint32_t pdpe;
 
gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) {
gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
struct i915_page_table *pt;
uint64_t pd_len = length;
uint64_t pd_start = start;
1395,7 → 1393,7
continue;
 
seq_printf(m, "\tPDPE #%d\n", pdpe);
gen8_for_each_pde(pt, pd, pd_start, pd_len, temp, pde) {
gen8_for_each_pde(pt, pd, pd_start, pd_len, pde) {
uint32_t pte;
gen8_pte_t *pt_vaddr;
 
1445,11 → 1443,11
if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
gen8_dump_pdp(&ppgtt->pdp, start, length, scratch_pte, m);
} else {
uint64_t templ4, pml4e;
uint64_t pml4e;
struct i915_pml4 *pml4 = &ppgtt->pml4;
struct i915_page_directory_pointer *pdp;
 
gen8_for_each_pml4e(pdp, pml4, start, length, templ4, pml4e) {
gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
if (!test_bit(pml4e, pml4->used_pml4es))
continue;
 
1655,9 → 1653,9
return ret;
 
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
intel_ring_emit(ring, RING_PP_DIR_DCLV(ring));
intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(ring));
intel_ring_emit(ring, PP_DIR_DCLV_2G);
intel_ring_emit(ring, RING_PP_DIR_BASE(ring));
intel_ring_emit_reg(ring, RING_PP_DIR_BASE(ring));
intel_ring_emit(ring, get_pd_offset(ppgtt));
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
1692,9 → 1690,9
return ret;
 
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
intel_ring_emit(ring, RING_PP_DIR_DCLV(ring));
intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(ring));
intel_ring_emit(ring, PP_DIR_DCLV_2G);
intel_ring_emit(ring, RING_PP_DIR_BASE(ring));
intel_ring_emit_reg(ring, RING_PP_DIR_BASE(ring));
intel_ring_emit(ring, get_pd_offset(ppgtt));
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
2345,7 → 2343,10
int i = 0;
struct sg_page_iter sg_iter;
dma_addr_t addr = 0; /* shut up gcc */
int rpm_atomic_seq;
 
rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
 
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
addr = sg_dma_address(sg_iter.sg) +
(sg_iter.sg_pgoffset << PAGE_SHIFT);
2371,8 → 2372,36
*/
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
POSTING_READ(GFX_FLSH_CNTL_GEN6);
 
assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
}
 
struct insert_entries {
struct i915_address_space *vm;
struct sg_table *st;
uint64_t start;
enum i915_cache_level level;
u32 flags;
};
 
static int gen8_ggtt_insert_entries__cb(void *_arg)
{
struct insert_entries *arg = _arg;
gen8_ggtt_insert_entries(arg->vm, arg->st,
arg->start, arg->level, arg->flags);
return 0;
}
 
static void gen8_ggtt_insert_entries__BKL(struct i915_address_space *vm,
struct sg_table *st,
uint64_t start,
enum i915_cache_level level,
u32 flags)
{
struct insert_entries arg = { vm, st, start, level, flags };
gen8_ggtt_insert_entries__cb, &arg;
}
 
/*
* Binds an object into the global gtt with the specified cache level. The object
* will be accessible to the GPU via commands whose operands reference offsets
2391,7 → 2420,10
int i = 0;
struct sg_page_iter sg_iter;
dma_addr_t addr = 0;
int rpm_atomic_seq;
 
rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
 
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
addr = sg_page_iter_dma_address(&sg_iter);
iowrite32(vm->pte_encode(addr, level, true, flags), &gtt_entries[i]);
2415,6 → 2447,8
*/
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
POSTING_READ(GFX_FLSH_CNTL_GEN6);
 
assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
}
 
static void gen8_ggtt_clear_range(struct i915_address_space *vm,
2429,7 → 2463,10
(gen8_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
int i;
int rpm_atomic_seq;
 
rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
 
if (WARN(num_entries > max_entries,
"First entry = %d; Num entries = %d (max=%d)\n",
first_entry, num_entries, max_entries))
2441,6 → 2478,8
for (i = 0; i < num_entries; i++)
gen8_set_pte(&gtt_base[i], scratch_pte);
readl(gtt_base);
 
assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
}
 
static void gen6_ggtt_clear_range(struct i915_address_space *vm,
2455,7 → 2494,10
(gen6_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
int i;
int rpm_atomic_seq;
 
rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
 
if (WARN(num_entries > max_entries,
"First entry = %d; Num entries = %d (max=%d)\n",
first_entry, num_entries, max_entries))
2467,6 → 2509,8
for (i = 0; i < num_entries; i++)
iowrite32(scratch_pte, &gtt_base[i]);
readl(gtt_base);
 
assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
}
 
static void i915_ggtt_insert_entries(struct i915_address_space *vm,
2474,11 → 2518,17
uint64_t start,
enum i915_cache_level cache_level, u32 unused)
{
struct drm_i915_private *dev_priv = vm->dev->dev_private;
unsigned int flags = (cache_level == I915_CACHE_NONE) ?
AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
int rpm_atomic_seq;
 
rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
 
intel_gtt_insert_sg_entries(pages, start >> PAGE_SHIFT, flags);
 
assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
 
}
 
static void i915_ggtt_clear_range(struct i915_address_space *vm,
2486,9 → 2536,16
uint64_t length,
bool unused)
{
struct drm_i915_private *dev_priv = vm->dev->dev_private;
unsigned first_entry = start >> PAGE_SHIFT;
unsigned num_entries = length >> PAGE_SHIFT;
int rpm_atomic_seq;
 
rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
 
intel_gtt_clear_range(first_entry, num_entries);
 
assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
}
 
static int ggtt_bind_vma(struct i915_vma *vma,
2740,7 → 2797,6
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
 
ppgtt->base.cleanup(&ppgtt->base);
kfree(ppgtt);
}
 
if (drm_mm_initialized(&vm->mm)) {
2990,6 → 3046,9
dev_priv->gtt.base.bind_vma = ggtt_bind_vma;
dev_priv->gtt.base.unbind_vma = ggtt_unbind_vma;
 
if (IS_CHERRYVIEW(dev_priv))
dev_priv->gtt.base.insert_entries = gen8_ggtt_insert_entries__BKL;
 
return ret;
}
 
3298,7 → 3357,7
intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view,
struct drm_i915_gem_object *obj)
{
struct intel_rotation_info *rot_info = &ggtt_view->rotation_info;
struct intel_rotation_info *rot_info = &ggtt_view->params.rotation_info;
unsigned int size_pages = rot_info->size >> PAGE_SHIFT;
unsigned int size_pages_uv;
struct sg_page_iter sg_iter;
3530,7 → 3589,7
if (view->type == I915_GGTT_VIEW_NORMAL) {
return obj->base.size;
} else if (view->type == I915_GGTT_VIEW_ROTATED) {
return view->rotation_info.size;
return view->params.rotation_info.size;
} else if (view->type == I915_GGTT_VIEW_PARTIAL) {
return view->params.partial.size << PAGE_SHIFT;
} else {