Subversion Repositories Kolibri OS

Rev

Rev 6937 | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 6937 Rev 7144
Line 93... Line 93...
93
 */
93
 */
Line 94... Line 94...
94
 
94
 
95
static int
95
static int
Line 96... Line 96...
96
i915_get_ggtt_vma_pages(struct i915_vma *vma);
96
i915_get_ggtt_vma_pages(struct i915_vma *vma);
-
 
97
 
-
 
98
const struct i915_ggtt_view i915_ggtt_view_normal = {
97
 
99
	.type = I915_GGTT_VIEW_NORMAL,
98
const struct i915_ggtt_view i915_ggtt_view_normal;
100
};
99
const struct i915_ggtt_view i915_ggtt_view_rotated = {
101
const struct i915_ggtt_view i915_ggtt_view_rotated = {
Line 100... Line 102...
100
        .type = I915_GGTT_VIEW_ROTATED
102
	.type = I915_GGTT_VIEW_ROTATED,
101
};
103
};
102
 
104
 
Line 2120... Line 2122...
2120
	INIT_LIST_HEAD(&vm->active_list);
2122
	INIT_LIST_HEAD(&vm->active_list);
2121
	INIT_LIST_HEAD(&vm->inactive_list);
2123
	INIT_LIST_HEAD(&vm->inactive_list);
2122
	list_add_tail(&vm->global_link, &dev_priv->vm_list);
2124
	list_add_tail(&vm->global_link, &dev_priv->vm_list);
2123
}
2125
}
Line -... Line 2126...
-
 
2126
 
-
 
2127
static void gtt_write_workarounds(struct drm_device *dev)
-
 
2128
{
-
 
2129
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
2130
 
-
 
2131
	/* This function is for gtt related workarounds. This function is
-
 
2132
	 * called on driver load and after a GPU reset, so you can place
-
 
2133
	 * workarounds here even if they get overwritten by GPU reset.
-
 
2134
	 */
-
 
2135
	/* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt */
-
 
2136
	if (IS_BROADWELL(dev))
-
 
2137
		I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
-
 
2138
	else if (IS_CHERRYVIEW(dev))
-
 
2139
		I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
-
 
2140
	else if (IS_SKYLAKE(dev))
-
 
2141
		I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
-
 
2142
	else if (IS_BROXTON(dev))
-
 
2143
		I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
-
 
2144
}
2124
 
2145
 
2125
int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
2146
int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
2126
{
2147
{
2127
	struct drm_i915_private *dev_priv = dev->dev_private;
2148
	struct drm_i915_private *dev_priv = dev->dev_private;
Line 2136... Line 2157...
2136
	return ret;
2157
	return ret;
2137
}
2158
}
Line 2138... Line 2159...
2138
 
2159
 
2139
int i915_ppgtt_init_hw(struct drm_device *dev)
2160
int i915_ppgtt_init_hw(struct drm_device *dev)
-
 
2161
{
-
 
2162
	gtt_write_workarounds(dev);
2140
{
2163
 
2141
	/* In the case of execlists, PPGTT is enabled by the context descriptor
2164
	/* In the case of execlists, PPGTT is enabled by the context descriptor
2142
	 * and the PDPs are contained within the context itself.  We don't
2165
	 * and the PDPs are contained within the context itself.  We don't
2143
	 * need to do anything here. */
2166
	 * need to do anything here. */
2144
	if (i915.enable_execlists)
2167
	if (i915.enable_execlists)
Line 2725... Line 2748...
2725
			DRM_DEBUG_KMS("Reservation failed: %i\n", ret);
2748
			DRM_DEBUG_KMS("Reservation failed: %i\n", ret);
2726
			return ret;
2749
			return ret;
2727
		}
2750
		}
2728
		vma->bound |= GLOBAL_BIND;
2751
		vma->bound |= GLOBAL_BIND;
2729
		__i915_vma_set_map_and_fenceable(vma);
2752
		__i915_vma_set_map_and_fenceable(vma);
2730
		list_add_tail(&vma->mm_list, &ggtt_vm->inactive_list);
2753
		list_add_tail(&vma->vm_link, &ggtt_vm->inactive_list);
2731
	}
2754
	}
Line 2732... Line 2755...
2732
 
2755
 
2733
	/* Clear any non-preallocated blocks */
2756
	/* Clear any non-preallocated blocks */
2734
	drm_mm_for_each_hole(entry, &ggtt_vm->mm, hole_start, hole_end) {
2757
	drm_mm_for_each_hole(entry, &ggtt_vm->mm, hole_start, hole_end) {
Line 2797... Line 2820...
2797
		struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2820
		struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
Line 2798... Line 2821...
2798
 
2821
 
2799
		ppgtt->base.cleanup(&ppgtt->base);
2822
		ppgtt->base.cleanup(&ppgtt->base);
Line -... Line 2823...
-
 
2823
	}
-
 
2824
 
2800
	}
2825
	i915_gem_cleanup_stolen(dev);
2801
 
2826
 
2802
	if (drm_mm_initialized(&vm->mm)) {
2827
	if (drm_mm_initialized(&vm->mm)) {
Line 2803... Line 2828...
2803
		if (intel_vgpu_active(dev))
2828
		if (intel_vgpu_active(dev))
Line 3014... Line 3039...
3014
 
3039
 
3015
	/* TODO: We're not aware of mappable constraints on gen8 yet */
3040
	/* TODO: We're not aware of mappable constraints on gen8 yet */
3016
	*mappable_base = pci_resource_start(dev->pdev, 2);
3041
	*mappable_base = pci_resource_start(dev->pdev, 2);
Line 3017... Line -...
3017
	*mappable_end = pci_resource_len(dev->pdev, 2);
-
 
3018
 
-
 
3019
	if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(39)))
-
 
3020
		pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(39));
3042
	*mappable_end = pci_resource_len(dev->pdev, 2);
Line 3021... Line 3043...
3021
 
3043
 
3022
	pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
3044
	pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
3023
 
3045
 
Line 3073... Line 3095...
3073
		DRM_ERROR("Unknown GMADR size (%llx)\n",
3095
		DRM_ERROR("Unknown GMADR size (%llx)\n",
3074
			  dev_priv->gtt.mappable_end);
3096
			  dev_priv->gtt.mappable_end);
3075
		return -ENXIO;
3097
		return -ENXIO;
3076
	}
3098
	}
Line 3077... Line -...
3077
 
-
 
3078
	if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
-
 
3079
		pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
3099
 
Line 3080... Line 3100...
3080
	pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
3100
	pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
Line 3081... Line 3101...
3081
 
3101
 
Line 3163... Line 3183...
3163
		dev_priv->gtt.gtt_probe = gen8_gmch_probe;
3183
		dev_priv->gtt.gtt_probe = gen8_gmch_probe;
3164
		dev_priv->gtt.base.cleanup = gen6_gmch_remove;
3184
		dev_priv->gtt.base.cleanup = gen6_gmch_remove;
3165
	}
3185
	}
Line 3166... Line 3186...
3166
 
3186
 
-
 
3187
	gtt->base.dev = dev;
Line 3167... Line 3188...
3167
	gtt->base.dev = dev;
3188
	gtt->base.is_ggtt = true;
3168
 
3189
 
3169
	ret = gtt->gtt_probe(dev, >t->base.total, >t->stolen_size,
3190
	ret = gtt->gtt_probe(dev, >t->base.total, >t->stolen_size,
3170
			     >t->mappable_base, >t->mappable_end);
3191
			     >t->mappable_base, >t->mappable_end);
Line -... Line 3192...
-
 
3192
	if (ret)
-
 
3193
		return ret;
-
 
3194
 
-
 
3195
	/*
-
 
3196
	 * Initialise stolen early so that we may reserve preallocated
-
 
3197
	 * objects for the BIOS to KMS transition.
-
 
3198
	 */
-
 
3199
	ret = i915_gem_init_stolen(dev);
3171
	if (ret)
3200
	if (ret)
3172
		return ret;
3201
		goto out_gtt_cleanup;
3173
 
3202
 
3174
	/* GMADR is the PCI mmio aperture into the global GTT. */
3203
	/* GMADR is the PCI mmio aperture into the global GTT. */
3175
	DRM_INFO("Memory usable by graphics device = %lluM\n",
3204
	DRM_INFO("Memory usable by graphics device = %lluM\n",
Line 3188... Line 3217...
3188
	 */
3217
	 */
3189
	i915.enable_ppgtt = sanitize_enable_ppgtt(dev, i915.enable_ppgtt);
3218
	i915.enable_ppgtt = sanitize_enable_ppgtt(dev, i915.enable_ppgtt);
3190
	DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
3219
	DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
Line 3191... Line 3220...
3191
 
3220
 
-
 
3221
	return 0;
-
 
3222
 
-
 
3223
out_gtt_cleanup:
-
 
3224
	gtt->base.cleanup(&dev_priv->gtt.base);
-
 
3225
 
3192
	return 0;
3226
	return ret;
Line 3193... Line 3227...
3193
}
3227
}
3194
 
3228
 
3195
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
3229
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
Line 3210... Line 3244...
3210
 
3244
 
3211
	/* Cache flush objects bound into GGTT and rebind them. */
3245
	/* Cache flush objects bound into GGTT and rebind them. */
3212
	vm = &dev_priv->gtt.base;
3246
	vm = &dev_priv->gtt.base;
3213
	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
3247
	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
3214
		flush = false;
3248
		flush = false;
3215
		list_for_each_entry(vma, &obj->vma_list, vma_link) {
3249
		list_for_each_entry(vma, &obj->vma_list, obj_link) {
3216
			if (vma->vm != vm)
3250
			if (vma->vm != vm)
Line 3217... Line 3251...
3217
				continue;
3251
				continue;
3218
 
3252
 
Line 3267... Line 3301...
3267
//	vma = kmem_cache_zalloc(to_i915(obj->base.dev)->vmas, GFP_KERNEL);
3301
//	vma = kmem_cache_zalloc(to_i915(obj->base.dev)->vmas, GFP_KERNEL);
3268
	vma = kzalloc(sizeof(*vma), GFP_KERNEL);
3302
	vma = kzalloc(sizeof(*vma), GFP_KERNEL);
3269
	if (vma == NULL)
3303
	if (vma == NULL)
3270
		return ERR_PTR(-ENOMEM);
3304
		return ERR_PTR(-ENOMEM);
Line 3271... Line 3305...
3271
 
3305
 
3272
	INIT_LIST_HEAD(&vma->vma_link);
3306
	INIT_LIST_HEAD(&vma->vm_link);
3273
	INIT_LIST_HEAD(&vma->mm_list);
3307
	INIT_LIST_HEAD(&vma->obj_link);
3274
	INIT_LIST_HEAD(&vma->exec_list);
3308
	INIT_LIST_HEAD(&vma->exec_list);
3275
	vma->vm = vm;
3309
	vma->vm = vm;
-
 
3310
	vma->obj = obj;
Line 3276... Line 3311...
3276
	vma->obj = obj;
3311
	vma->is_ggtt = i915_is_ggtt(vm);
3277
 
3312
 
3278
	if (i915_is_ggtt(vm))
3313
	if (i915_is_ggtt(vm))
3279
		vma->ggtt_view = *ggtt_view;
-
 
3280
 
-
 
3281
	list_add_tail(&vma->vma_link, &obj->vma_list);
3314
		vma->ggtt_view = *ggtt_view;
Line -... Line 3315...
-
 
3315
	else
-
 
3316
		i915_ppgtt_get(i915_vm_to_ppgtt(vm));
3282
	if (!i915_is_ggtt(vm))
3317
 
3283
		i915_ppgtt_get(i915_vm_to_ppgtt(vm));
3318
	list_add_tail(&vma->obj_link, &obj->vma_list);
Line 3284... Line 3319...
3284
 
3319
 
3285
	return vma;
3320
	return vma;
Line 3320... Line 3355...
3320
	return vma;
3355
	return vma;
Line 3321... Line 3356...
3321
 
3356
 
Line 3322... Line 3357...
3322
}
3357
}
3323
 
3358
 
3324
static struct scatterlist *
3359
static struct scatterlist *
-
 
3360
rotate_pages(const dma_addr_t *in, unsigned int offset,
3325
rotate_pages(dma_addr_t *in, unsigned int offset,
3361
	     unsigned int width, unsigned int height,
3326
	     unsigned int width, unsigned int height,
3362
	     unsigned int stride,
3327
	     struct sg_table *st, struct scatterlist *sg)
3363
	     struct sg_table *st, struct scatterlist *sg)
3328
{
3364
{
Line 3333... Line 3369...
3333
		st->nents = 0;
3369
		st->nents = 0;
3334
		sg = st->sgl;
3370
		sg = st->sgl;
3335
	}
3371
	}
Line 3336... Line 3372...
3336
 
3372
 
3337
	for (column = 0; column < width; column++) {
3373
	for (column = 0; column < width; column++) {
3338
		src_idx = width * (height - 1) + column;
3374
		src_idx = stride * (height - 1) + column;
3339
		for (row = 0; row < height; row++) {
3375
		for (row = 0; row < height; row++) {
3340
			st->nents++;
3376
			st->nents++;
3341
			/* We don't need the pages, but need to initialize
3377
			/* We don't need the pages, but need to initialize
3342
			 * the entries so the sg list can be happily traversed.
3378
			 * the entries so the sg list can be happily traversed.
3343
			 * The only thing we need are DMA addresses.
3379
			 * The only thing we need are DMA addresses.
3344
			 */
3380
			 */
3345
			sg_set_page(sg, NULL, PAGE_SIZE, 0);
3381
			sg_set_page(sg, NULL, PAGE_SIZE, 0);
3346
			sg_dma_address(sg) = in[offset + src_idx];
3382
			sg_dma_address(sg) = in[offset + src_idx];
3347
			sg_dma_len(sg) = PAGE_SIZE;
3383
			sg_dma_len(sg) = PAGE_SIZE;
3348
			sg = sg_next(sg);
3384
			sg = sg_next(sg);
3349
			src_idx -= width;
3385
			src_idx -= stride;
3350
		}
3386
		}
Line 3351... Line 3387...
3351
	}
3387
	}
3352
 
3388
 
Line 3353... Line 3389...
3353
	return sg;
3389
	return sg;
3354
}
3390
}
3355
 
3391
 
3356
static struct sg_table *
3392
static struct sg_table *
3357
intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view,
-
 
3358
			  struct drm_i915_gem_object *obj)
3393
intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info,
3359
{
3394
			  struct drm_i915_gem_object *obj)
3360
	struct intel_rotation_info *rot_info = &ggtt_view->params.rotation_info;
3395
{
3361
	unsigned int size_pages = rot_info->size >> PAGE_SHIFT;
3396
	unsigned int size_pages = rot_info->size >> PAGE_SHIFT;
3362
	unsigned int size_pages_uv;
3397
	unsigned int size_pages_uv;
Line 3397... Line 3432...
3397
	}
3432
	}
Line 3398... Line 3433...
3398
 
3433
 
3399
	/* Rotate the pages. */
3434
	/* Rotate the pages. */
3400
	sg = rotate_pages(page_addr_list, 0,
3435
	sg = rotate_pages(page_addr_list, 0,
-
 
3436
		     rot_info->width_pages, rot_info->height_pages,
3401
		     rot_info->width_pages, rot_info->height_pages,
3437
		     rot_info->width_pages,
Line 3402... Line 3438...
3402
		     st, NULL);
3438
		     st, NULL);
3403
 
3439
 
3404
	/* Append the UV plane if NV12. */
3440
	/* Append the UV plane if NV12. */
Line 3412... Line 3448...
3412
		rot_info->uv_start_page = uv_start_page;
3448
		rot_info->uv_start_page = uv_start_page;
Line 3413... Line 3449...
3413
 
3449
 
3414
		rotate_pages(page_addr_list, uv_start_page,
3450
		rotate_pages(page_addr_list, uv_start_page,
3415
			     rot_info->width_pages_uv,
3451
			     rot_info->width_pages_uv,
-
 
3452
			     rot_info->height_pages_uv,
3416
			     rot_info->height_pages_uv,
3453
			     rot_info->width_pages_uv,
3417
			     st, sg);
3454
			     st, sg);
Line 3418... Line 3455...
3418
	}
3455
	}
3419
 
3456
 
Line 3493... Line 3530...
3493
 
3530
 
3494
	if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
3531
	if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
3495
		vma->ggtt_view.pages = vma->obj->pages;
3532
		vma->ggtt_view.pages = vma->obj->pages;
3496
	else if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED)
3533
	else if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED)
3497
		vma->ggtt_view.pages =
3534
		vma->ggtt_view.pages =
3498
			intel_rotate_fb_obj_pages(&vma->ggtt_view, vma->obj);
3535
			intel_rotate_fb_obj_pages(&vma->ggtt_view.params.rotated, vma->obj);
3499
	else if (vma->ggtt_view.type == I915_GGTT_VIEW_PARTIAL)
3536
	else if (vma->ggtt_view.type == I915_GGTT_VIEW_PARTIAL)
3500
		vma->ggtt_view.pages =
3537
		vma->ggtt_view.pages =
3501
			intel_partial_pages(&vma->ggtt_view, vma->obj);
3538
			intel_partial_pages(&vma->ggtt_view, vma->obj);
3502
	else
3539
	else
Line 3549... Line 3586...
3549
 
3586
 
3550
	if (bind_flags == 0)
3587
	if (bind_flags == 0)
Line 3551... Line 3588...
3551
		return 0;
3588
		return 0;
3552
 
-
 
3553
	if (vma->bound == 0 && vma->vm->allocate_va_range) {
-
 
3554
		trace_i915_va_alloc(vma->vm,
-
 
3555
				    vma->node.start,
-
 
3556
				    vma->node.size,
-
 
3557
				    VM_TO_TRACE_NAME(vma->vm));
3589
 
3558
 
3590
	if (vma->bound == 0 && vma->vm->allocate_va_range) {
3559
		/* XXX: i915_vma_pin() will fix this +- hack */
3591
		/* XXX: i915_vma_pin() will fix this +- hack */
3560
		vma->pin_count++;
3592
		vma->pin_count++;
3561
		ret = vma->vm->allocate_va_range(vma->vm,
3593
		ret = vma->vm->allocate_va_range(vma->vm,
Line 3587... Line 3619...
3587
		    const struct i915_ggtt_view *view)
3619
		    const struct i915_ggtt_view *view)
3588
{
3620
{
3589
	if (view->type == I915_GGTT_VIEW_NORMAL) {
3621
	if (view->type == I915_GGTT_VIEW_NORMAL) {
3590
		return obj->base.size;
3622
		return obj->base.size;
3591
	} else if (view->type == I915_GGTT_VIEW_ROTATED) {
3623
	} else if (view->type == I915_GGTT_VIEW_ROTATED) {
3592
		return view->params.rotation_info.size;
3624
		return view->params.rotated.size;
3593
	} else if (view->type == I915_GGTT_VIEW_PARTIAL) {
3625
	} else if (view->type == I915_GGTT_VIEW_PARTIAL) {
3594
		return view->params.partial.size << PAGE_SHIFT;
3626
		return view->params.partial.size << PAGE_SHIFT;
3595
	} else {
3627
	} else {
3596
		WARN_ONCE(1, "GGTT view %u not implemented!\n", view->type);
3628
		WARN_ONCE(1, "GGTT view %u not implemented!\n", view->type);
3597
		return obj->base.size;
3629
		return obj->base.size;