Subversion Repositories Kolibri OS

Rev

Rev 6660 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 6660 Rev 6937
Line 102... Line 102...
102
 
102
 
103
static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
103
static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
104
{
104
{
105
	bool has_aliasing_ppgtt;
105
	bool has_aliasing_ppgtt;
-
 
106
	bool has_full_ppgtt;
Line 106... Line 107...
106
	bool has_full_ppgtt;
107
	bool has_full_48bit_ppgtt;
107
 
108
 
-
 
109
	has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6;
Line 108... Line 110...
108
	has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6;
110
	has_full_ppgtt = INTEL_INFO(dev)->gen >= 7;
109
	has_full_ppgtt = INTEL_INFO(dev)->gen >= 7;
111
	has_full_48bit_ppgtt = IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9;
Line 110... Line 112...
110
 
112
 
Line 123... Line 125...
123
		return 1;
125
		return 1;
Line 124... Line 126...
124
 
126
 
125
	if (enable_ppgtt == 2 && has_full_ppgtt)
127
	if (enable_ppgtt == 2 && has_full_ppgtt)
Line -... Line 128...
-
 
128
		return 2;
-
 
129
 
-
 
130
	if (enable_ppgtt == 3 && has_full_48bit_ppgtt)
126
		return 2;
131
		return 3;
127
 
132
 
128
#ifdef CONFIG_INTEL_IOMMU
133
#ifdef CONFIG_INTEL_IOMMU
129
	/* Disable ppgtt on SNB if VT-d is on. */
134
	/* Disable ppgtt on SNB if VT-d is on. */
130
	if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) {
135
	if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) {
131
		DRM_INFO("Disabling PPGTT because VT-d is on\n");
136
		DRM_INFO("Disabling PPGTT because VT-d is on\n");
132
		return 0;
137
		return 0;
Line 133... Line 138...
133
	}
138
	}
134
#endif
-
 
135
 
139
#endif
136
	/* Early VLV doesn't have this */
140
 
137
	if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
141
	/* Early VLV doesn't have this */
138
	    dev->pdev->revision < 0xb) {
142
	if (IS_VALLEYVIEW(dev) && dev->pdev->revision < 0xb) {
Line 139... Line 143...
139
		DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n");
143
		DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n");
140
		return 0;
144
		return 0;
141
	}
145
	}
142
 
146
 
143
	if (INTEL_INFO(dev)->gen >= 8 && i915.enable_execlists)
147
	if (INTEL_INFO(dev)->gen >= 8 && i915.enable_execlists)
Line 144... Line 148...
144
		return 2;
148
		return has_full_48bit_ppgtt ? 3 : 2;
Line 652... Line 656...
652
	ret = intel_ring_begin(req, 6);
656
	ret = intel_ring_begin(req, 6);
653
	if (ret)
657
	if (ret)
654
		return ret;
658
		return ret;
Line 655... Line 659...
655
 
659
 
656
	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
660
	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
657
	intel_ring_emit(ring, GEN8_RING_PDP_UDW(ring, entry));
661
	intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(ring, entry));
658
	intel_ring_emit(ring, upper_32_bits(addr));
662
	intel_ring_emit(ring, upper_32_bits(addr));
659
	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
663
	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
660
	intel_ring_emit(ring, GEN8_RING_PDP_LDW(ring, entry));
664
	intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(ring, entry));
661
	intel_ring_emit(ring, lower_32_bits(addr));
665
	intel_ring_emit(ring, lower_32_bits(addr));
Line 662... Line 666...
662
	intel_ring_advance(ring);
666
	intel_ring_advance(ring);
663
 
667
 
Line 755... Line 759...
755
 
759
 
756
	if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
760
	if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
757
		gen8_ppgtt_clear_pte_range(vm, &ppgtt->pdp, start, length,
761
		gen8_ppgtt_clear_pte_range(vm, &ppgtt->pdp, start, length,
758
					   scratch_pte);
762
					   scratch_pte);
759
	} else {
763
	} else {
760
		uint64_t templ4, pml4e;
764
		uint64_t pml4e;
Line 761... Line 765...
761
		struct i915_page_directory_pointer *pdp;
765
		struct i915_page_directory_pointer *pdp;
762
 
766
 
763
		gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, templ4, pml4e) {
767
		gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, pml4e) {
764
			gen8_ppgtt_clear_pte_range(vm, pdp, start, length,
768
			gen8_ppgtt_clear_pte_range(vm, pdp, start, length,
765
						   scratch_pte);
769
						   scratch_pte);
766
		}
770
		}
Line 824... Line 828...
824
	if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
828
	if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
825
		gen8_ppgtt_insert_pte_entries(vm, &ppgtt->pdp, &sg_iter, start,
829
		gen8_ppgtt_insert_pte_entries(vm, &ppgtt->pdp, &sg_iter, start,
826
					      cache_level);
830
					      cache_level);
827
	} else {
831
	} else {
828
		struct i915_page_directory_pointer *pdp;
832
		struct i915_page_directory_pointer *pdp;
829
		uint64_t templ4, pml4e;
833
		uint64_t pml4e;
830
		uint64_t length = (uint64_t)pages->orig_nents << PAGE_SHIFT;
834
		uint64_t length = (uint64_t)pages->orig_nents << PAGE_SHIFT;
Line 831... Line 835...
831
 
835
 
832
		gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, templ4, pml4e) {
836
		gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, pml4e) {
833
			gen8_ppgtt_insert_pte_entries(vm, pdp, &sg_iter,
837
			gen8_ppgtt_insert_pte_entries(vm, pdp, &sg_iter,
834
						      start, cache_level);
838
						      start, cache_level);
835
		}
839
		}
836
	}
840
	}
Line 895... Line 899...
895
static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
899
static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
896
{
900
{
897
	enum vgt_g2v_type msg;
901
	enum vgt_g2v_type msg;
898
	struct drm_device *dev = ppgtt->base.dev;
902
	struct drm_device *dev = ppgtt->base.dev;
899
	struct drm_i915_private *dev_priv = dev->dev_private;
903
	struct drm_i915_private *dev_priv = dev->dev_private;
900
	unsigned int offset = vgtif_reg(pdp0_lo);
-
 
901
	int i;
904
	int i;
Line 902... Line 905...
902
 
905
 
903
	if (USES_FULL_48BIT_PPGTT(dev)) {
906
	if (USES_FULL_48BIT_PPGTT(dev)) {
Line 904... Line 907...
904
		u64 daddr = px_dma(&ppgtt->pml4);
907
		u64 daddr = px_dma(&ppgtt->pml4);
905
 
908
 
Line 906... Line 909...
906
		I915_WRITE(offset, lower_32_bits(daddr));
909
		I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
907
		I915_WRITE(offset + 4, upper_32_bits(daddr));
910
		I915_WRITE(vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
908
 
911
 
909
		msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
912
		msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
910
				VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY);
913
				VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY);
Line 911... Line 914...
911
	} else {
914
	} else {
912
		for (i = 0; i < GEN8_LEGACY_PDPES; i++) {
915
		for (i = 0; i < GEN8_LEGACY_PDPES; i++) {
913
			u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
-
 
914
 
-
 
915
			I915_WRITE(offset, lower_32_bits(daddr));
916
			u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
Line 916... Line 917...
916
			I915_WRITE(offset + 4, upper_32_bits(daddr));
917
 
917
 
918
			I915_WRITE(vgtif_reg(pdp[i].lo), lower_32_bits(daddr));
918
			offset += 8;
919
			I915_WRITE(vgtif_reg(pdp[i].hi), upper_32_bits(daddr));
Line 1008... Line 1009...
1008
				     uint64_t length,
1009
				     uint64_t length,
1009
				     unsigned long *new_pts)
1010
				     unsigned long *new_pts)
1010
{
1011
{
1011
	struct drm_device *dev = vm->dev;
1012
	struct drm_device *dev = vm->dev;
1012
	struct i915_page_table *pt;
1013
	struct i915_page_table *pt;
1013
	uint64_t temp;
-
 
1014
	uint32_t pde;
1014
	uint32_t pde;
Line 1015... Line 1015...
1015
 
1015
 
1016
	gen8_for_each_pde(pt, pd, start, length, temp, pde) {
1016
	gen8_for_each_pde(pt, pd, start, length, pde) {
1017
		/* Don't reallocate page tables */
1017
		/* Don't reallocate page tables */
1018
		if (test_bit(pde, pd->used_pdes)) {
1018
		if (test_bit(pde, pd->used_pdes)) {
1019
			/* Scratch is never allocated this way */
1019
			/* Scratch is never allocated this way */
1020
			WARN_ON(pt == vm->scratch_pt);
1020
			WARN_ON(pt == vm->scratch_pt);
Line 1070... Line 1070...
1070
				  uint64_t length,
1070
				  uint64_t length,
1071
				  unsigned long *new_pds)
1071
				  unsigned long *new_pds)
1072
{
1072
{
1073
	struct drm_device *dev = vm->dev;
1073
	struct drm_device *dev = vm->dev;
1074
	struct i915_page_directory *pd;
1074
	struct i915_page_directory *pd;
1075
	uint64_t temp;
-
 
1076
	uint32_t pdpe;
1075
	uint32_t pdpe;
1077
	uint32_t pdpes = I915_PDPES_PER_PDP(dev);
1076
	uint32_t pdpes = I915_PDPES_PER_PDP(dev);
Line 1078... Line 1077...
1078
 
1077
 
Line 1079... Line 1078...
1079
	WARN_ON(!bitmap_empty(new_pds, pdpes));
1078
	WARN_ON(!bitmap_empty(new_pds, pdpes));
1080
 
1079
 
1081
	gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) {
1080
	gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
Line 1082... Line 1081...
1082
		if (test_bit(pdpe, pdp->used_pdpes))
1081
		if (test_bit(pdpe, pdp->used_pdpes))
1083
			continue;
1082
			continue;
Line 1124... Line 1123...
1124
				  uint64_t length,
1123
				  uint64_t length,
1125
				  unsigned long *new_pdps)
1124
				  unsigned long *new_pdps)
1126
{
1125
{
1127
	struct drm_device *dev = vm->dev;
1126
	struct drm_device *dev = vm->dev;
1128
	struct i915_page_directory_pointer *pdp;
1127
	struct i915_page_directory_pointer *pdp;
1129
	uint64_t temp;
-
 
1130
	uint32_t pml4e;
1128
	uint32_t pml4e;
Line 1131... Line 1129...
1131
 
1129
 
Line 1132... Line 1130...
1132
	WARN_ON(!bitmap_empty(new_pdps, GEN8_PML4ES_PER_PML4));
1130
	WARN_ON(!bitmap_empty(new_pdps, GEN8_PML4ES_PER_PML4));
1133
 
1131
 
1134
	gen8_for_each_pml4e(pdp, pml4, start, length, temp, pml4e) {
1132
	gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
1135
		if (!test_bit(pml4e, pml4->used_pml4es)) {
1133
		if (!test_bit(pml4e, pml4->used_pml4es)) {
1136
			pdp = alloc_pdp(dev);
1134
			pdp = alloc_pdp(dev);
Line 1213... Line 1211...
1213
	unsigned long *new_page_dirs, *new_page_tables;
1211
	unsigned long *new_page_dirs, *new_page_tables;
1214
	struct drm_device *dev = vm->dev;
1212
	struct drm_device *dev = vm->dev;
1215
	struct i915_page_directory *pd;
1213
	struct i915_page_directory *pd;
1216
	const uint64_t orig_start = start;
1214
	const uint64_t orig_start = start;
1217
	const uint64_t orig_length = length;
1215
	const uint64_t orig_length = length;
1218
	uint64_t temp;
-
 
1219
	uint32_t pdpe;
1216
	uint32_t pdpe;
1220
	uint32_t pdpes = I915_PDPES_PER_PDP(dev);
1217
	uint32_t pdpes = I915_PDPES_PER_PDP(dev);
1221
	int ret;
1218
	int ret;
Line 1222... Line 1219...
1222
 
1219
 
Line 1240... Line 1237...
1240
		free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
1237
		free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
1241
		return ret;
1238
		return ret;
1242
	}
1239
	}
Line 1243... Line 1240...
1243
 
1240
 
1244
	/* For every page directory referenced, allocate page tables */
1241
	/* For every page directory referenced, allocate page tables */
1245
	gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) {
1242
	gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
1246
		ret = gen8_ppgtt_alloc_pagetabs(vm, pd, start, length,
1243
		ret = gen8_ppgtt_alloc_pagetabs(vm, pd, start, length,
1247
						new_page_tables + pdpe * BITS_TO_LONGS(I915_PDES));
1244
						new_page_tables + pdpe * BITS_TO_LONGS(I915_PDES));
1248
		if (ret)
1245
		if (ret)
1249
			goto err_out;
1246
			goto err_out;
Line 1252... Line 1249...
1252
	start = orig_start;
1249
	start = orig_start;
1253
	length = orig_length;
1250
	length = orig_length;
Line 1254... Line 1251...
1254
 
1251
 
1255
	/* Allocations have completed successfully, so set the bitmaps, and do
1252
	/* Allocations have completed successfully, so set the bitmaps, and do
1256
	 * the mappings. */
1253
	 * the mappings. */
1257
	gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) {
1254
	gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
1258
		gen8_pde_t *const page_directory = kmap_px(pd);
1255
		gen8_pde_t *const page_directory = kmap_px(pd);
1259
		struct i915_page_table *pt;
1256
		struct i915_page_table *pt;
1260
		uint64_t pd_len = length;
1257
		uint64_t pd_len = length;
1261
		uint64_t pd_start = start;
1258
		uint64_t pd_start = start;
Line 1262... Line 1259...
1262
		uint32_t pde;
1259
		uint32_t pde;
1263
 
1260
 
Line 1264... Line 1261...
1264
		/* Every pd should be allocated, we just did that above. */
1261
		/* Every pd should be allocated, we just did that above. */
1265
		WARN_ON(!pd);
1262
		WARN_ON(!pd);
1266
 
1263
 
1267
		gen8_for_each_pde(pt, pd, pd_start, pd_len, temp, pde) {
1264
		gen8_for_each_pde(pt, pd, pd_start, pd_len, pde) {
1268
			/* Same reasoning as pd */
1265
			/* Same reasoning as pd */
Line 1299... Line 1296...
1299
	mark_tlbs_dirty(ppgtt);
1296
	mark_tlbs_dirty(ppgtt);
1300
	return 0;
1297
	return 0;
Line 1301... Line 1298...
1301
 
1298
 
1302
err_out:
1299
err_out:
-
 
1300
	while (pdpe--) {
-
 
1301
		unsigned long temp;
1303
	while (pdpe--) {
1302
 
1304
		for_each_set_bit(temp, new_page_tables + pdpe *
1303
		for_each_set_bit(temp, new_page_tables + pdpe *
1305
				BITS_TO_LONGS(I915_PDES), I915_PDES)
1304
				BITS_TO_LONGS(I915_PDES), I915_PDES)
1306
			free_pt(dev, pdp->page_directory[pdpe]->page_table[temp]);
1305
			free_pt(dev, pdp->page_directory[pdpe]->page_table[temp]);
Line 1321... Line 1320...
1321
{
1320
{
1322
	DECLARE_BITMAP(new_pdps, GEN8_PML4ES_PER_PML4);
1321
	DECLARE_BITMAP(new_pdps, GEN8_PML4ES_PER_PML4);
1323
	struct i915_hw_ppgtt *ppgtt =
1322
	struct i915_hw_ppgtt *ppgtt =
1324
			container_of(vm, struct i915_hw_ppgtt, base);
1323
			container_of(vm, struct i915_hw_ppgtt, base);
1325
	struct i915_page_directory_pointer *pdp;
1324
	struct i915_page_directory_pointer *pdp;
1326
	uint64_t temp, pml4e;
1325
	uint64_t pml4e;
1327
	int ret = 0;
1326
	int ret = 0;
Line 1328... Line 1327...
1328
 
1327
 
1329
	/* Do the pml4 allocations first, so we don't need to track the newly
1328
	/* Do the pml4 allocations first, so we don't need to track the newly
1330
	 * allocated tables below the pdp */
1329
	 * allocated tables below the pdp */
Line 1340... Line 1339...
1340
 
1339
 
1341
	WARN(bitmap_weight(new_pdps, GEN8_PML4ES_PER_PML4) > 2,
1340
	WARN(bitmap_weight(new_pdps, GEN8_PML4ES_PER_PML4) > 2,
1342
	     "The allocation has spanned more than 512GB. "
1341
	     "The allocation has spanned more than 512GB. "
Line 1343... Line 1342...
1343
	     "It is highly likely this is incorrect.");
1342
	     "It is highly likely this is incorrect.");
1344
 
1343
 
Line 1345... Line 1344...
1345
	gen8_for_each_pml4e(pdp, pml4, start, length, temp, pml4e) {
1344
	gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
1346
		WARN_ON(!pdp);
1345
		WARN_ON(!pdp);
1347
 
1346
 
Line 1380... Line 1379...
1380
			  uint64_t start, uint64_t length,
1379
			  uint64_t start, uint64_t length,
1381
			  gen8_pte_t scratch_pte,
1380
			  gen8_pte_t scratch_pte,
1382
			  struct seq_file *m)
1381
			  struct seq_file *m)
1383
{
1382
{
1384
	struct i915_page_directory *pd;
1383
	struct i915_page_directory *pd;
1385
	uint64_t temp;
-
 
1386
	uint32_t pdpe;
1384
	uint32_t pdpe;
Line 1387... Line 1385...
1387
 
1385
 
1388
	gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) {
1386
	gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
1389
		struct i915_page_table *pt;
1387
		struct i915_page_table *pt;
1390
		uint64_t pd_len = length;
1388
		uint64_t pd_len = length;
1391
		uint64_t pd_start = start;
1389
		uint64_t pd_start = start;
Line 1392... Line 1390...
1392
		uint32_t pde;
1390
		uint32_t pde;
1393
 
1391
 
Line 1394... Line 1392...
1394
		if (!test_bit(pdpe, pdp->used_pdpes))
1392
		if (!test_bit(pdpe, pdp->used_pdpes))
1395
			continue;
1393
			continue;
1396
 
1394
 
1397
		seq_printf(m, "\tPDPE #%d\n", pdpe);
1395
		seq_printf(m, "\tPDPE #%d\n", pdpe);
Line 1398... Line 1396...
1398
		gen8_for_each_pde(pt, pd, pd_start, pd_len, temp, pde) {
1396
		gen8_for_each_pde(pt, pd, pd_start, pd_len, pde) {
1399
			uint32_t  pte;
1397
			uint32_t  pte;
Line 1443... Line 1441...
1443
						 I915_CACHE_LLC, true);
1441
						 I915_CACHE_LLC, true);
Line 1444... Line 1442...
1444
 
1442
 
1445
	if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
1443
	if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
1446
		gen8_dump_pdp(&ppgtt->pdp, start, length, scratch_pte, m);
1444
		gen8_dump_pdp(&ppgtt->pdp, start, length, scratch_pte, m);
1447
	} else {
1445
	} else {
1448
		uint64_t templ4, pml4e;
1446
		uint64_t pml4e;
1449
		struct i915_pml4 *pml4 = &ppgtt->pml4;
1447
		struct i915_pml4 *pml4 = &ppgtt->pml4;
Line 1450... Line 1448...
1450
		struct i915_page_directory_pointer *pdp;
1448
		struct i915_page_directory_pointer *pdp;
1451
 
1449
 
1452
		gen8_for_each_pml4e(pdp, pml4, start, length, templ4, pml4e) {
1450
		gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
Line 1453... Line 1451...
1453
			if (!test_bit(pml4e, pml4->used_pml4es))
1451
			if (!test_bit(pml4e, pml4->used_pml4es))
1454
				continue;
1452
				continue;
Line 1653... Line 1651...
1653
	ret = intel_ring_begin(req, 6);
1651
	ret = intel_ring_begin(req, 6);
1654
	if (ret)
1652
	if (ret)
1655
		return ret;
1653
		return ret;
Line 1656... Line 1654...
1656
 
1654
 
1657
	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
1655
	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
1658
	intel_ring_emit(ring, RING_PP_DIR_DCLV(ring));
1656
	intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(ring));
1659
	intel_ring_emit(ring, PP_DIR_DCLV_2G);
1657
	intel_ring_emit(ring, PP_DIR_DCLV_2G);
1660
	intel_ring_emit(ring, RING_PP_DIR_BASE(ring));
1658
	intel_ring_emit_reg(ring, RING_PP_DIR_BASE(ring));
1661
	intel_ring_emit(ring, get_pd_offset(ppgtt));
1659
	intel_ring_emit(ring, get_pd_offset(ppgtt));
1662
	intel_ring_emit(ring, MI_NOOP);
1660
	intel_ring_emit(ring, MI_NOOP);
Line 1663... Line 1661...
1663
	intel_ring_advance(ring);
1661
	intel_ring_advance(ring);
Line 1690... Line 1688...
1690
	ret = intel_ring_begin(req, 6);
1688
	ret = intel_ring_begin(req, 6);
1691
	if (ret)
1689
	if (ret)
1692
		return ret;
1690
		return ret;
Line 1693... Line 1691...
1693
 
1691
 
1694
	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
1692
	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
1695
	intel_ring_emit(ring, RING_PP_DIR_DCLV(ring));
1693
	intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(ring));
1696
	intel_ring_emit(ring, PP_DIR_DCLV_2G);
1694
	intel_ring_emit(ring, PP_DIR_DCLV_2G);
1697
	intel_ring_emit(ring, RING_PP_DIR_BASE(ring));
1695
	intel_ring_emit_reg(ring, RING_PP_DIR_BASE(ring));
1698
	intel_ring_emit(ring, get_pd_offset(ppgtt));
1696
	intel_ring_emit(ring, get_pd_offset(ppgtt));
1699
	intel_ring_emit(ring, MI_NOOP);
1697
	intel_ring_emit(ring, MI_NOOP);
Line 1700... Line 1698...
1700
	intel_ring_advance(ring);
1698
	intel_ring_advance(ring);
Line 2343... Line 2341...
2343
	gen8_pte_t __iomem *gtt_entries =
2341
	gen8_pte_t __iomem *gtt_entries =
2344
		(gen8_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
2342
		(gen8_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
2345
	int i = 0;
2343
	int i = 0;
2346
	struct sg_page_iter sg_iter;
2344
	struct sg_page_iter sg_iter;
2347
	dma_addr_t addr = 0; /* shut up gcc */
2345
	dma_addr_t addr = 0; /* shut up gcc */
-
 
2346
	int rpm_atomic_seq;
-
 
2347
 
-
 
2348
	rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
Line 2348... Line 2349...
2348
 
2349
 
2349
	for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
2350
	for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
2350
		addr = sg_dma_address(sg_iter.sg) +
2351
		addr = sg_dma_address(sg_iter.sg) +
2351
			(sg_iter.sg_pgoffset << PAGE_SHIFT);
2352
			(sg_iter.sg_pgoffset << PAGE_SHIFT);
Line 2369... Line 2370...
2369
	 * want to flush the TLBs only after we're certain all the PTE updates
2370
	 * want to flush the TLBs only after we're certain all the PTE updates
2370
	 * have finished.
2371
	 * have finished.
2371
	 */
2372
	 */
2372
	I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
2373
	I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
2373
	POSTING_READ(GFX_FLSH_CNTL_GEN6);
2374
	POSTING_READ(GFX_FLSH_CNTL_GEN6);
-
 
2375
 
-
 
2376
	assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
-
 
2377
}
-
 
2378
 
-
 
2379
struct insert_entries {
-
 
2380
	struct i915_address_space *vm;
-
 
2381
	struct sg_table *st;
-
 
2382
	uint64_t start;
-
 
2383
	enum i915_cache_level level;
-
 
2384
	u32 flags;
-
 
2385
};
-
 
2386
 
-
 
2387
static int gen8_ggtt_insert_entries__cb(void *_arg)
-
 
2388
{
-
 
2389
	struct insert_entries *arg = _arg;
-
 
2390
	gen8_ggtt_insert_entries(arg->vm, arg->st,
-
 
2391
				 arg->start, arg->level, arg->flags);
-
 
2392
	return 0;
-
 
2393
}
-
 
2394
 
-
 
2395
static void gen8_ggtt_insert_entries__BKL(struct i915_address_space *vm,
-
 
2396
					  struct sg_table *st,
-
 
2397
					  uint64_t start,
-
 
2398
					  enum i915_cache_level level,
-
 
2399
					  u32 flags)
-
 
2400
{
-
 
2401
	struct insert_entries arg = { vm, st, start, level, flags };
-
 
2402
	gen8_ggtt_insert_entries__cb, &arg;
2374
}
2403
}
Line 2375... Line 2404...
2375
 
2404
 
2376
/*
2405
/*
2377
 * Binds an object into the global gtt with the specified cache level. The object
2406
 * Binds an object into the global gtt with the specified cache level. The object
Line 2389... Line 2418...
2389
	gen6_pte_t __iomem *gtt_entries =
2418
	gen6_pte_t __iomem *gtt_entries =
2390
		(gen6_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
2419
		(gen6_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
2391
	int i = 0;
2420
	int i = 0;
2392
	struct sg_page_iter sg_iter;
2421
	struct sg_page_iter sg_iter;
2393
	dma_addr_t addr = 0;
2422
	dma_addr_t addr = 0;
-
 
2423
	int rpm_atomic_seq;
-
 
2424
 
-
 
2425
	rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
Line 2394... Line 2426...
2394
 
2426
 
2395
	for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
2427
	for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
2396
		addr = sg_page_iter_dma_address(&sg_iter);
2428
		addr = sg_page_iter_dma_address(&sg_iter);
2397
		iowrite32(vm->pte_encode(addr, level, true, flags), >t_entries[i]);
2429
		iowrite32(vm->pte_encode(addr, level, true, flags), >t_entries[i]);
Line 2413... Line 2445...
2413
	 * want to flush the TLBs only after we're certain all the PTE updates
2445
	 * want to flush the TLBs only after we're certain all the PTE updates
2414
	 * have finished.
2446
	 * have finished.
2415
	 */
2447
	 */
2416
	I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
2448
	I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
2417
	POSTING_READ(GFX_FLSH_CNTL_GEN6);
2449
	POSTING_READ(GFX_FLSH_CNTL_GEN6);
-
 
2450
 
-
 
2451
	assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
2418
}
2452
}
Line 2419... Line 2453...
2419
 
2453
 
2420
static void gen8_ggtt_clear_range(struct i915_address_space *vm,
2454
static void gen8_ggtt_clear_range(struct i915_address_space *vm,
2421
				  uint64_t start,
2455
				  uint64_t start,
Line 2427... Line 2461...
2427
	unsigned num_entries = length >> PAGE_SHIFT;
2461
	unsigned num_entries = length >> PAGE_SHIFT;
2428
	gen8_pte_t scratch_pte, __iomem *gtt_base =
2462
	gen8_pte_t scratch_pte, __iomem *gtt_base =
2429
		(gen8_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
2463
		(gen8_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
2430
	const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
2464
	const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
2431
	int i;
2465
	int i;
-
 
2466
	int rpm_atomic_seq;
-
 
2467
 
-
 
2468
	rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
Line 2432... Line 2469...
2432
 
2469
 
2433
	if (WARN(num_entries > max_entries,
2470
	if (WARN(num_entries > max_entries,
2434
		 "First entry = %d; Num entries = %d (max=%d)\n",
2471
		 "First entry = %d; Num entries = %d (max=%d)\n",
2435
		 first_entry, num_entries, max_entries))
2472
		 first_entry, num_entries, max_entries))
Line 2439... Line 2476...
2439
				      I915_CACHE_LLC,
2476
				      I915_CACHE_LLC,
2440
				      use_scratch);
2477
				      use_scratch);
2441
	for (i = 0; i < num_entries; i++)
2478
	for (i = 0; i < num_entries; i++)
2442
		gen8_set_pte(>t_base[i], scratch_pte);
2479
		gen8_set_pte(>t_base[i], scratch_pte);
2443
	readl(gtt_base);
2480
	readl(gtt_base);
-
 
2481
 
-
 
2482
	assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
2444
}
2483
}
Line 2445... Line 2484...
2445
 
2484
 
2446
static void gen6_ggtt_clear_range(struct i915_address_space *vm,
2485
static void gen6_ggtt_clear_range(struct i915_address_space *vm,
2447
				  uint64_t start,
2486
				  uint64_t start,
Line 2453... Line 2492...
2453
	unsigned num_entries = length >> PAGE_SHIFT;
2492
	unsigned num_entries = length >> PAGE_SHIFT;
2454
	gen6_pte_t scratch_pte, __iomem *gtt_base =
2493
	gen6_pte_t scratch_pte, __iomem *gtt_base =
2455
		(gen6_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
2494
		(gen6_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
2456
	const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
2495
	const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
2457
	int i;
2496
	int i;
-
 
2497
	int rpm_atomic_seq;
-
 
2498
 
-
 
2499
	rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
Line 2458... Line 2500...
2458
 
2500
 
2459
	if (WARN(num_entries > max_entries,
2501
	if (WARN(num_entries > max_entries,
2460
		 "First entry = %d; Num entries = %d (max=%d)\n",
2502
		 "First entry = %d; Num entries = %d (max=%d)\n",
2461
		 first_entry, num_entries, max_entries))
2503
		 first_entry, num_entries, max_entries))
Line 2465... Line 2507...
2465
				     I915_CACHE_LLC, use_scratch, 0);
2507
				     I915_CACHE_LLC, use_scratch, 0);
Line 2466... Line 2508...
2466
 
2508
 
2467
	for (i = 0; i < num_entries; i++)
2509
	for (i = 0; i < num_entries; i++)
2468
		iowrite32(scratch_pte, >t_base[i]);
2510
		iowrite32(scratch_pte, >t_base[i]);
-
 
2511
	readl(gtt_base);
-
 
2512
 
2469
	readl(gtt_base);
2513
	assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
Line 2470... Line 2514...
2470
}
2514
}
2471
 
2515
 
2472
static void i915_ggtt_insert_entries(struct i915_address_space *vm,
2516
static void i915_ggtt_insert_entries(struct i915_address_space *vm,
2473
				     struct sg_table *pages,
2517
				     struct sg_table *pages,
2474
				     uint64_t start,
2518
				     uint64_t start,
-
 
2519
				     enum i915_cache_level cache_level, u32 unused)
2475
				     enum i915_cache_level cache_level, u32 unused)
2520
{
2476
{
2521
	struct drm_i915_private *dev_priv = vm->dev->dev_private;
-
 
2522
	unsigned int flags = (cache_level == I915_CACHE_NONE) ?
-
 
2523
		AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
-
 
2524
	int rpm_atomic_seq;
Line 2477... Line 2525...
2477
	unsigned int flags = (cache_level == I915_CACHE_NONE) ?
2525
 
Line -... Line 2526...
-
 
2526
	rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
-
 
2527
 
2478
		AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
2528
	intel_gtt_insert_sg_entries(pages, start >> PAGE_SHIFT, flags);
Line 2479... Line 2529...
2479
 
2529
 
2480
	intel_gtt_insert_sg_entries(pages, start >> PAGE_SHIFT, flags);
2530
	assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
2481
 
2531
 
2482
}
2532
}
2483
 
2533
 
-
 
2534
static void i915_ggtt_clear_range(struct i915_address_space *vm,
2484
static void i915_ggtt_clear_range(struct i915_address_space *vm,
2535
				  uint64_t start,
2485
				  uint64_t start,
2536
				  uint64_t length,
-
 
2537
				  bool unused)
-
 
2538
{
-
 
2539
	struct drm_i915_private *dev_priv = vm->dev->dev_private;
-
 
2540
	unsigned first_entry = start >> PAGE_SHIFT;
2486
				  uint64_t length,
2541
	unsigned num_entries = length >> PAGE_SHIFT;
-
 
2542
	int rpm_atomic_seq;
-
 
2543
 
2487
				  bool unused)
2544
	rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
Line 2488... Line 2545...
2488
{
2545
 
2489
	unsigned first_entry = start >> PAGE_SHIFT;
2546
	intel_gtt_clear_range(first_entry, num_entries);
2490
	unsigned num_entries = length >> PAGE_SHIFT;
2547
 
Line 2738... Line 2795...
2738
 
2795
 
2739
	if (dev_priv->mm.aliasing_ppgtt) {
2796
	if (dev_priv->mm.aliasing_ppgtt) {
Line 2740... Line 2797...
2740
		struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2797
		struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2741
 
-
 
2742
		ppgtt->base.cleanup(&ppgtt->base);
2798
 
Line 2743... Line 2799...
2743
		kfree(ppgtt);
2799
		ppgtt->base.cleanup(&ppgtt->base);
2744
	}
2800
	}
2745
 
2801
 
Line 2988... Line 3044...
2988
	dev_priv->gtt.base.clear_range = gen8_ggtt_clear_range;
3044
	dev_priv->gtt.base.clear_range = gen8_ggtt_clear_range;
2989
	dev_priv->gtt.base.insert_entries = gen8_ggtt_insert_entries;
3045
	dev_priv->gtt.base.insert_entries = gen8_ggtt_insert_entries;
2990
	dev_priv->gtt.base.bind_vma = ggtt_bind_vma;
3046
	dev_priv->gtt.base.bind_vma = ggtt_bind_vma;
2991
	dev_priv->gtt.base.unbind_vma = ggtt_unbind_vma;
3047
	dev_priv->gtt.base.unbind_vma = ggtt_unbind_vma;
Line -... Line 3048...
-
 
3048
 
-
 
3049
	if (IS_CHERRYVIEW(dev_priv))
-
 
3050
		dev_priv->gtt.base.insert_entries = gen8_ggtt_insert_entries__BKL;
2992
 
3051
 
2993
	return ret;
3052
	return ret;
Line 2994... Line 3053...
2994
}
3053
}
2995
 
3054
 
Line 3296... Line 3355...
3296
 
3355
 
3297
static struct sg_table *
3356
static struct sg_table *
3298
intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view,
3357
intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view,
3299
			  struct drm_i915_gem_object *obj)
3358
			  struct drm_i915_gem_object *obj)
3300
{
3359
{
3301
	struct intel_rotation_info *rot_info = &ggtt_view->rotation_info;
3360
	struct intel_rotation_info *rot_info = &ggtt_view->params.rotation_info;
3302
	unsigned int size_pages = rot_info->size >> PAGE_SHIFT;
3361
	unsigned int size_pages = rot_info->size >> PAGE_SHIFT;
3303
	unsigned int size_pages_uv;
3362
	unsigned int size_pages_uv;
3304
	struct sg_page_iter sg_iter;
3363
	struct sg_page_iter sg_iter;
3305
	unsigned long i;
3364
	unsigned long i;
Line 3528... Line 3587...
3528
		    const struct i915_ggtt_view *view)
3587
		    const struct i915_ggtt_view *view)
3529
{
3588
{
3530
	if (view->type == I915_GGTT_VIEW_NORMAL) {
3589
	if (view->type == I915_GGTT_VIEW_NORMAL) {
3531
		return obj->base.size;
3590
		return obj->base.size;
3532
	} else if (view->type == I915_GGTT_VIEW_ROTATED) {
3591
	} else if (view->type == I915_GGTT_VIEW_ROTATED) {
3533
		return view->rotation_info.size;
3592
		return view->params.rotation_info.size;
3534
	} else if (view->type == I915_GGTT_VIEW_PARTIAL) {
3593
	} else if (view->type == I915_GGTT_VIEW_PARTIAL) {
3535
		return view->params.partial.size << PAGE_SHIFT;
3594
		return view->params.partial.size << PAGE_SHIFT;
3536
	} else {
3595
	} else {
3537
		WARN_ONCE(1, "GGTT view %u not implemented!\n", view->type);
3596
		WARN_ONCE(1, "GGTT view %u not implemented!\n", view->type);
3538
		return obj->base.size;
3597
		return obj->base.size;