Subversion Repositories Kolibri OS

Rev

Rev 3764 | Rev 4560 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 3764 Rev 4104
Line 43... Line 43...
43
 */
43
 */
Line 44... Line 44...
44
 
44
 
45
static unsigned long i915_stolen_to_physical(struct drm_device *dev)
45
static unsigned long i915_stolen_to_physical(struct drm_device *dev)
46
{
46
{
47
	struct drm_i915_private *dev_priv = dev->dev_private;
47
	struct drm_i915_private *dev_priv = dev->dev_private;
48
	struct pci_dev *pdev = dev_priv->bridge_dev;
48
	struct resource *r;
Line 49... Line 49...
49
	u32 base;
49
	u32 base;
-
 
50
 
-
 
51
	/* Almost universally we can find the Graphics Base of Stolen Memory
-
 
52
	 * at offset 0x5c in the igfx configuration space. On a few (desktop)
50
 
53
	 * machines this is also mirrored in the bridge device at different
51
	/* On the machines I have tested the Graphics Base of Stolen Memory
54
	 * locations, or in the MCHBAR. On gen2, the layout is again slightly
-
 
55
	 * different with the Graphics Segment immediately following Top of
52
	 * is unreliable, so on those compute the base by subtracting the
56
	 * Memory (or Top of Usable DRAM). Note it appears that TOUD is only
53
	 * stolen memory from the Top of Low Usable DRAM which is where the
57
	 * reported by 865g, so we just use the top of memory as determined
54
	 * BIOS places the graphics stolen memory.
-
 
55
	 *
-
 
56
	 * On gen2, the layout is slightly different with the Graphics Segment
-
 
57
	 * immediately following Top of Memory (or Top of Usable DRAM). Note
-
 
58
	 * it appears that TOUD is only reported by 865g, so we just use the
-
 
59
	 * top of memory as determined by the e820 probe.
58
	 * by the e820 probe.
60
	 *
-
 
61
	 * XXX gen2 requires an unavailable symbol and 945gm fails with
59
	 *
62
	 * its value of TOLUD.
60
	 * XXX However gen2 requires an unavailable symbol.
63
	 */
61
	 */
64
	base = 0;
-
 
65
	if (INTEL_INFO(dev)->gen >= 6) {
-
 
66
		/* Read Base Data of Stolen Memory Register (BDSM) directly.
-
 
67
		 * Note that there is also a MCHBAR miror at 0x1080c0 or
-
 
68
		 * we could use device 2:0x5c instead.
-
 
69
	 */
-
 
70
		pci_read_config_dword(pdev, 0xB0, &base);
-
 
71
		base &= ~4095; /* lower bits used for locking register */
62
	base = 0;
72
	} else if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
63
	if (INTEL_INFO(dev)->gen >= 3) {
-
 
64
		/* Read Graphics Base of Stolen Memory directly */
-
 
65
		pci_read_config_dword(dev->pdev, 0x5c, &base);
73
		/* Read Graphics Base of Stolen Memory directly */
66
		base &= ~((1<<20) - 1);
74
		pci_read_config_dword(pdev, 0xA4, &base);
-
 
75
#if 0
-
 
76
	} else if (IS_GEN3(dev)) {
-
 
77
		u8 val;
-
 
78
		/* Stolen is immediately below Top of Low Usable DRAM */
-
 
79
		pci_read_config_byte(pdev, 0x9c, &val);
-
 
80
		base = val >> 3 << 27;
-
 
81
	base -= dev_priv->mm.gtt->stolen_size;
67
	} else { /* GEN2 */
82
	} else {
68
#if 0
83
		/* Stolen is immediately above Top of Memory */
69
		/* Stolen is immediately above Top of Memory */
84
		base = max_low_pfn_mapped << PAGE_SHIFT;
70
		base = max_low_pfn_mapped << PAGE_SHIFT;
Line -... Line 71...
-
 
71
#endif
-
 
72
	}
-
 
73
 
-
 
74
	if (base == 0)
-
 
75
		return 0;
-
 
76
#if 0
-
 
77
	/* Verify that nothing else uses this physical address. Stolen
-
 
78
	 * memory should be reserved by the BIOS and hidden from the
-
 
79
	 * kernel. So if the region is already marked as busy, something
-
 
80
	 * is seriously wrong.
-
 
81
	 */
-
 
82
	r = devm_request_mem_region(dev->dev, base, dev_priv->gtt.stolen_size,
-
 
83
				    "Graphics Stolen Memory");
-
 
84
	if (r == NULL) {
-
 
85
		DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
-
 
86
			  base, base + (uint32_t)dev_priv->gtt.stolen_size);
85
#endif
87
		base = 0;
86
	}
88
	}
Line 87... Line 89...
87
 
89
#endif
88
	return base;
90
	return base;
89
}
91
}
90
 
92
 
-
 
93
static int i915_setup_compression(struct drm_device *dev, int size)
Line 91... Line -...
91
static int i915_setup_compression(struct drm_device *dev, int size)
-
 
92
{
-
 
93
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
94
	struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb);
-
 
95
 
-
 
96
	/* Try to over-allocate to reduce reallocations and fragmentation */
-
 
97
	compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen,
-
 
98
					   size <<= 1, 4096, 0);
94
{
99
	if (!compressed_fb)
95
	struct drm_i915_private *dev_priv = dev->dev_private;
100
		compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen,
96
	struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb);
-
 
97
	int ret;
-
 
98
 
-
 
99
	compressed_fb = kzalloc(sizeof(*compressed_fb), GFP_KERNEL);
-
 
100
	if (!compressed_fb)
-
 
101
		goto err_llb;
-
 
102
 
-
 
103
	/* Try to over-allocate to reduce reallocations and fragmentation */
-
 
104
	ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_fb,
-
 
105
				 size <<= 1, 4096, DRM_MM_SEARCH_DEFAULT);
-
 
106
	if (ret)
Line 101... Line 107...
101
						   size >>= 1, 4096, 0);
107
		ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_fb,
102
	if (compressed_fb)
108
					 size >>= 1, 4096,
103
		compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
109
					 DRM_MM_SEARCH_DEFAULT);
104
	if (!compressed_fb)
110
	if (ret)
105
		goto err;
111
		goto err_llb;
106
 
-
 
107
	if (HAS_PCH_SPLIT(dev))
-
 
108
		I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
-
 
109
	else if (IS_GM45(dev)) {
112
 
110
		I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
-
 
111
	} else {
113
	if (HAS_PCH_SPLIT(dev))
112
		compressed_llb = drm_mm_search_free(&dev_priv->mm.stolen,
114
		I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
Line -... Line 115...
-
 
115
	else if (IS_GM45(dev)) {
-
 
116
		I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
-
 
117
	} else {
-
 
118
		compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
-
 
119
		if (!compressed_llb)
113
						    4096, 4096, 0);
120
			goto err_fb;
Line 114... Line 121...
114
		if (compressed_llb)
121
 
115
			compressed_llb = drm_mm_get_block(compressed_llb,
122
		ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_llb,
116
							  4096, 4096);
123
					 4096, 4096, DRM_MM_SEARCH_DEFAULT);
117
		if (!compressed_llb)
124
		if (ret)
118
			goto err_fb;
125
			goto err_fb;
Line 119... Line 126...
119
 
126
 
120
		dev_priv->compressed_llb = compressed_llb;
127
		dev_priv->fbc.compressed_llb = compressed_llb;
Line 121... Line 128...
121
 
128
 
122
		I915_WRITE(FBC_CFB_BASE,
129
		I915_WRITE(FBC_CFB_BASE,
Line 123... Line 130...
123
			   dev_priv->mm.stolen_base + compressed_fb->start);
130
			   dev_priv->mm.stolen_base + compressed_fb->start);
Line 124... Line 131...
124
		I915_WRITE(FBC_LL_BASE,
131
		I915_WRITE(FBC_LL_BASE,
-
 
132
			   dev_priv->mm.stolen_base + compressed_llb->start);
125
			   dev_priv->mm.stolen_base + compressed_llb->start);
133
	}
126
	}
134
 
-
 
135
	dev_priv->fbc.compressed_fb = compressed_fb;
-
 
136
	dev_priv->fbc.size = size;
127
 
137
 
128
	dev_priv->compressed_fb = compressed_fb;
138
	DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n",
Line 129... Line 139...
129
	dev_priv->cfb_size = size;
139
		      size);
130
 
140
 
131
	DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n",
141
	return 0;
Line 132... Line 142...
132
		      size);
142
 
133
 
143
err_fb:
Line 134... Line 144...
134
	return 0;
144
	kfree(compressed_llb);
135
 
145
	drm_mm_remove_node(compressed_fb);
Line 136... Line 146...
136
err_fb:
146
err_llb:
137
	drm_mm_put_block(compressed_fb);
147
	kfree(compressed_fb);
Line 157... Line 167...
157
 
167
 
158
void i915_gem_stolen_cleanup_compression(struct drm_device *dev)
168
void i915_gem_stolen_cleanup_compression(struct drm_device *dev)
159
{
169
{
Line 160... Line 170...
160
	struct drm_i915_private *dev_priv = dev->dev_private;
170
	struct drm_i915_private *dev_priv = dev->dev_private;
161
 
171
 
Line 162... Line 172...
162
	if (dev_priv->cfb_size == 0)
172
	if (dev_priv->fbc.size == 0)
163
		return;
173
		return;
-
 
174
 
-
 
175
	if (dev_priv->fbc.compressed_fb) {
Line 164... Line 176...
164
 
176
		drm_mm_remove_node(dev_priv->fbc.compressed_fb);
165
	if (dev_priv->compressed_fb)
177
		kfree(dev_priv->fbc.compressed_fb);
-
 
178
	}
-
 
179
 
Line 166... Line 180...
166
	drm_mm_put_block(dev_priv->compressed_fb);
180
	if (dev_priv->fbc.compressed_llb) {
167
 
181
		drm_mm_remove_node(dev_priv->fbc.compressed_llb);
Line 168... Line 182...
168
	if (dev_priv->compressed_llb)
182
		kfree(dev_priv->fbc.compressed_llb);
169
		drm_mm_put_block(dev_priv->compressed_llb);
183
	}
170
 
184
 
Line -... Line 185...
-
 
185
	dev_priv->fbc.size = 0;
-
 
186
}
-
 
187
 
171
	dev_priv->cfb_size = 0;
188
void i915_gem_cleanup_stolen(struct drm_device *dev)
172
}
189
{
173
 
190
	struct drm_i915_private *dev_priv = dev->dev_private;
Line 174... Line 191...
174
void i915_gem_cleanup_stolen(struct drm_device *dev)
191
 
175
{
192
	if (!drm_mm_initialized(&dev_priv->mm.stolen))
176
	struct drm_i915_private *dev_priv = dev->dev_private;
193
		return;
-
 
194
 
-
 
195
	i915_gem_stolen_cleanup_compression(dev);
-
 
196
	drm_mm_takedown(&dev_priv->mm.stolen);
-
 
197
}
Line 177... Line 198...
177
 
198
 
178
	i915_gem_stolen_cleanup_compression(dev);
199
int i915_gem_init_stolen(struct drm_device *dev)
179
	drm_mm_takedown(&dev_priv->mm.stolen);
200
{
Line 180... Line 201...
180
}
201
	struct drm_i915_private *dev_priv = dev->dev_private;
181
 
202
	int bios_reserved = 0;
Line -... Line 203...
-
 
203
 
-
 
204
	if (dev_priv->gtt.stolen_size == 0)
-
 
205
		return 0;
-
 
206
 
-
 
207
	dev_priv->mm.stolen_base = i915_stolen_to_physical(dev);
-
 
208
	if (dev_priv->mm.stolen_base == 0)
182
int i915_gem_init_stolen(struct drm_device *dev)
209
		return 0;
183
{
210
 
-
 
211
	DRM_DEBUG_KMS("found %zd bytes of stolen memory at %08lx\n",
Line 184... Line 212...
184
	struct drm_i915_private *dev_priv = dev->dev_private;
212
		      dev_priv->gtt.stolen_size, dev_priv->mm.stolen_base);
185
 
213
 
Line 186... Line 214...
186
	dev_priv->mm.stolen_base = i915_stolen_to_physical(dev);
214
	if (IS_VALLEYVIEW(dev))
Line 257... Line 285...
257
 
285
 
258
	obj = i915_gem_object_alloc(dev);
286
	obj = i915_gem_object_alloc(dev);
259
	if (obj == NULL)
287
	if (obj == NULL)
Line 260... Line 288...
260
		return NULL;
288
		return NULL;
261
 
-
 
262
	if (drm_gem_private_object_init(dev, &obj->base, stolen->size))
-
 
263
		goto cleanup;
289
 
Line 264... Line 290...
264
 
290
	drm_gem_private_object_init(dev, &obj->base, stolen->size);
265
	i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
291
	i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
266
 
292
 
267
	obj->pages = i915_pages_create_for_stolen(dev,
293
	obj->pages = i915_pages_create_for_stolen(dev,
Line 268... Line 294...
268
						  stolen->start, stolen->size);
294
						  stolen->start, stolen->size);
269
	if (obj->pages == NULL)
295
	if (obj->pages == NULL)
270
		goto cleanup;
296
		goto cleanup;
Line 271... Line -...
271
 
-
 
272
	obj->has_dma_mapping = true;
297
 
273
	obj->pages_pin_count = 1;
298
	obj->has_dma_mapping = true;
Line 274... Line 299...
274
	obj->stolen = stolen;
299
	i915_gem_object_pin_pages(obj);
Line 275... Line 300...
275
 
300
	obj->stolen = stolen;
276
	obj->base.write_domain = I915_GEM_DOMAIN_GTT;
301
 
Line 288... Line 313...
288
i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
313
i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
289
{
314
{
290
	struct drm_i915_private *dev_priv = dev->dev_private;
315
	struct drm_i915_private *dev_priv = dev->dev_private;
291
	struct drm_i915_gem_object *obj;
316
	struct drm_i915_gem_object *obj;
292
	struct drm_mm_node *stolen;
317
	struct drm_mm_node *stolen;
-
 
318
	int ret;
Line 293... Line 319...
293
 
319
 
294
	if (dev_priv->mm.stolen_base == 0)
320
	if (!drm_mm_initialized(&dev_priv->mm.stolen))
Line 295... Line 321...
295
		return NULL;
321
		return NULL;
296
 
322
 
297
	DRM_DEBUG_KMS("creating stolen object: size=%x\n", size);
323
	DRM_DEBUG_KMS("creating stolen object: size=%x\n", size);
Line 298... Line 324...
298
	if (size == 0)
324
	if (size == 0)
299
		return NULL;
325
		return NULL;
300
 
-
 
301
	stolen = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0);
-
 
302
	if (stolen)
326
 
Line -... Line 327...
-
 
327
	stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
-
 
328
	if (!stolen)
-
 
329
		return NULL;
-
 
330
 
-
 
331
	ret = drm_mm_insert_node(&dev_priv->mm.stolen, stolen, size,
-
 
332
				 4096, DRM_MM_SEARCH_DEFAULT);
-
 
333
	if (ret) {
303
		stolen = drm_mm_get_block(stolen, size, 4096);
334
		kfree(stolen);
304
	if (stolen == NULL)
335
		return NULL;
305
		return NULL;
336
	}
Line 306... Line 337...
306
 
337
 
-
 
338
	obj = _i915_gem_object_create_stolen(dev, stolen);
307
	obj = _i915_gem_object_create_stolen(dev, stolen);
339
	if (obj)
308
	if (obj)
340
		return obj;
Line 309... Line 341...
309
		return obj;
341
 
310
 
342
	drm_mm_remove_node(stolen);
311
	drm_mm_put_block(stolen);
343
	kfree(stolen);
312
	return NULL;
344
	return NULL;
313
}
345
}
314
 
346
 
315
struct drm_i915_gem_object *
347
struct drm_i915_gem_object *
-
 
348
i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
316
i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
349
					       u32 stolen_offset,
317
					       u32 stolen_offset,
350
					       u32 gtt_offset,
-
 
351
					       u32 size)
-
 
352
{
Line 318... Line 353...
318
					       u32 gtt_offset,
353
	struct drm_i915_private *dev_priv = dev->dev_private;
319
					       u32 size)
354
	struct i915_address_space *ggtt = &dev_priv->gtt.base;
Line 320... Line 355...
320
{
355
	struct drm_i915_gem_object *obj;
321
	struct drm_i915_private *dev_priv = dev->dev_private;
356
	struct drm_mm_node *stolen;
Line 322... Line 357...
322
	struct drm_i915_gem_object *obj;
357
	struct i915_vma *vma;
323
	struct drm_mm_node *stolen;
358
	int ret;
324
 
-
 
325
	if (dev_priv->mm.stolen_base == 0)
359
 
Line 326... Line 360...
326
		return NULL;
360
	if (!drm_mm_initialized(&dev_priv->mm.stolen))
327
 
361
		return NULL;
Line 328... Line 362...
328
	DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n",
362
 
-
 
363
	DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n",
-
 
364
			stolen_offset, gtt_offset, size);
-
 
365
 
329
			stolen_offset, gtt_offset, size);
366
	/* KISS and expect everything to be page-aligned */
330
 
367
	BUG_ON(stolen_offset & 4095);
-
 
368
	BUG_ON(size & 4095);
331
	/* KISS and expect everything to be page-aligned */
369
 
332
	BUG_ON(stolen_offset & 4095);
370
	if (WARN_ON(size == 0))
-
 
371
		return NULL;
333
	BUG_ON(gtt_offset & 4095);
372
 
334
	BUG_ON(size & 4095);
373
	stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
Line 335... Line 374...
335
 
374
	if (!stolen)
336
	if (WARN_ON(size == 0))
375
		return NULL;
337
		return NULL;
376
 
338
 
377
	stolen->start = stolen_offset;
-
 
378
	stolen->size = size;
339
	stolen = drm_mm_create_block(&dev_priv->mm.stolen,
379
	ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen);
340
				     stolen_offset, size,
380
	if (ret) {
Line -... Line 381...
-
 
381
		DRM_DEBUG_KMS("failed to allocate stolen space\n");
-
 
382
		kfree(stolen);
-
 
383
		return NULL;
-
 
384
	}
-
 
385
 
-
 
386
	obj = _i915_gem_object_create_stolen(dev, stolen);
-
 
387
	if (obj == NULL) {
-
 
388
		DRM_DEBUG_KMS("failed to allocate stolen object\n");
-
 
389
		drm_mm_remove_node(stolen);
-
 
390
		kfree(stolen);
341
				     false);
391
		return NULL;
342
	if (stolen == NULL) {
392
	}
343
		DRM_DEBUG_KMS("failed to allocate stolen space\n");
393
 
344
		return NULL;
394
	/* Some objects just need physical mem from stolen space */
345
	}
395
	if (gtt_offset == I915_GTT_OFFSET_NONE)
346
 
396
		return obj;
347
	obj = _i915_gem_object_create_stolen(dev, stolen);
397
 
348
	if (obj == NULL) {
398
	vma = i915_gem_vma_create(obj, ggtt);
349
		DRM_DEBUG_KMS("failed to allocate stolen object\n");
399
	if (IS_ERR(vma)) {
350
		drm_mm_put_block(stolen);
400
		ret = PTR_ERR(vma);
351
		return NULL;
401
		goto err_out;
352
	}
402
	}
353
 
403
 
354
	/* To simplify the initialisation sequence between KMS and GTT,
404
	/* To simplify the initialisation sequence between KMS and GTT,
355
	 * we allow construction of the stolen object prior to
-
 
356
	 * setting up the GTT space. The actual reservation will occur
-
 
Line 357... Line -...
357
	 * later.
-
 
358
	 */
405
	 * we allow construction of the stolen object prior to
Line 359... Line 406...
359
	if (drm_mm_initialized(&dev_priv->mm.gtt_space)) {
406
	 * setting up the GTT space. The actual reservation will occur
360
		obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space,
407
	 * later.
Line 361... Line 408...
361
						     gtt_offset, size,
408
	 */
-
 
409
	vma->node.start = gtt_offset;
-
 
410
	vma->node.size = size;
-
 
411
	if (drm_mm_initialized(&ggtt->mm)) {
-
 
412
		ret = drm_mm_reserve_node(&ggtt->mm, &vma->node);
-
 
413
		if (ret) {
-
 
414
			DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
-
 
415
			goto err_vma;
-
 
416
		}
362
						     false);
417
		}
Line 363... Line 418...
363
		if (obj->gtt_space == NULL) {
418
 
364
			DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
419
	obj->has_global_gtt_mapping = 1;
365
			drm_gem_object_unreference(&obj->base);
420
 
366
			return NULL;
421
	list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
367
		}
422
	list_add_tail(&vma->mm_list, &ggtt->inactive_list);
-
 
423
 
368
	} else
424
	return obj;
369
		obj->gtt_space = I915_GTT_RESERVED;
425
 
370
 
426
err_vma: