Subversion Repositories Kolibri OS

Rev

Rev 3033 | Rev 3480 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 3033 Rev 3243
Line 20... Line 20...
20
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
20
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21
 * IN THE SOFTWARE.
21
 * IN THE SOFTWARE.
22
 *
22
 *
23
 */
23
 */
Line -... Line 24...
-
 
24
 
-
 
25
#define iowrite32(v, addr)      writel((v), (addr))
24
 
26
 
25
#include 
27
#include 
26
#include 
28
#include 
27
#include "i915_drv.h"
29
#include "i915_drv.h"
28
#include "i915_trace.h"
30
#include "i915_trace.h"
Line 29... Line 31...
29
#include "intel_drv.h"
31
#include "intel_drv.h"
30
 
32
 
31
#define AGP_USER_TYPES          (1 << 16)
33
#define AGP_USER_TYPES          (1 << 16)
Line -... Line 34...
-
 
34
#define AGP_USER_MEMORY         (AGP_USER_TYPES)
-
 
35
#define AGP_USER_CACHED_MEMORY  (AGP_USER_TYPES + 1)
-
 
36
 
-
 
37
typedef uint32_t gtt_pte_t;
-
 
38
 
-
 
39
/* PPGTT stuff */
-
 
40
#define GEN6_GTT_ADDR_ENCODE(addr)	((addr) | (((addr) >> 28) & 0xff0))
-
 
41
 
-
 
42
#define GEN6_PDE_VALID			(1 << 0)
-
 
43
/* gen6+ has bit 11-4 for physical addr bit 39-32 */
-
 
44
#define GEN6_PDE_ADDR_ENCODE(addr)	GEN6_GTT_ADDR_ENCODE(addr)
-
 
45
 
-
 
46
#define GEN6_PTE_VALID			(1 << 0)
-
 
47
#define GEN6_PTE_UNCACHED		(1 << 1)
-
 
48
#define HSW_PTE_UNCACHED		(0)
-
 
49
#define GEN6_PTE_CACHE_LLC		(2 << 1)
-
 
50
#define GEN6_PTE_CACHE_LLC_MLC		(3 << 1)
-
 
51
#define GEN6_PTE_ADDR_ENCODE(addr)	GEN6_GTT_ADDR_ENCODE(addr)
-
 
52
 
-
 
53
static inline gtt_pte_t pte_encode(struct drm_device *dev,
-
 
54
				   dma_addr_t addr,
-
 
55
				   enum i915_cache_level level)
-
 
56
{
-
 
57
	gtt_pte_t pte = GEN6_PTE_VALID;
-
 
58
	pte |= GEN6_PTE_ADDR_ENCODE(addr);
-
 
59
 
-
 
60
	switch (level) {
-
 
61
	case I915_CACHE_LLC_MLC:
-
 
62
		/* Haswell doesn't set L3 this way */
-
 
63
		if (IS_HASWELL(dev))
-
 
64
			pte |= GEN6_PTE_CACHE_LLC;
-
 
65
		else
-
 
66
			pte |= GEN6_PTE_CACHE_LLC_MLC;
-
 
67
		break;
-
 
68
	case I915_CACHE_LLC:
-
 
69
		pte |= GEN6_PTE_CACHE_LLC;
-
 
70
		break;
-
 
71
	case I915_CACHE_NONE:
-
 
72
		if (IS_HASWELL(dev))
-
 
73
			pte |= HSW_PTE_UNCACHED;
-
 
74
		else
-
 
75
			pte |= GEN6_PTE_UNCACHED;
-
 
76
		break;
-
 
77
	default:
-
 
78
		BUG();
-
 
79
	}
-
 
80
 
-
 
81
 
32
#define AGP_USER_MEMORY         (AGP_USER_TYPES)
82
	return pte;
33
#define AGP_USER_CACHED_MEMORY  (AGP_USER_TYPES + 1)
83
}
34
 
84
 
35
/* PPGTT support for Sandybdrige/Gen6 and later */
85
/* PPGTT support for Sandybdrige/Gen6 and later */
36
static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
86
static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
37
				   unsigned first_entry,
87
				   unsigned first_entry,
38
				   unsigned num_entries)
88
				   unsigned num_entries)
39
{
89
{
40
	uint32_t *pt_vaddr;
90
	gtt_pte_t *pt_vaddr;
41
	uint32_t scratch_pte;
91
	gtt_pte_t scratch_pte;
Line 42... Line 92...
42
	unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
92
	unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
43
	unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
93
	unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
Line 44... Line 94...
44
	unsigned last_pte, i;
94
	unsigned last_pte, i;
Line 45... Line 95...
45
 
95
 
46
	scratch_pte = GEN6_PTE_ADDR_ENCODE(ppgtt->scratch_page_dma_addr);
96
	scratch_pte = pte_encode(ppgtt->dev, ppgtt->scratch_page_dma_addr,
Line 54... Line 104...
54
        {
104
        {
55
            last_pte = first_pte + num_entries;
105
            last_pte = first_pte + num_entries;
56
            if (last_pte > I915_PPGTT_PT_ENTRIES)
106
            if (last_pte > I915_PPGTT_PT_ENTRIES)
57
                last_pte = I915_PPGTT_PT_ENTRIES;
107
                last_pte = I915_PPGTT_PT_ENTRIES;
Line 58... Line 108...
58
 
108
 
Line 59... Line 109...
59
            MapPage(pt_vaddr,ppgtt->pt_pages[act_pd], 3);
109
            MapPage(pt_vaddr,(addr_t)(ppgtt->pt_pages[act_pd]), 3);
60
 
110
 
Line 61... Line 111...
61
            for (i = first_pte; i < last_pte; i++)
111
            for (i = first_pte; i < last_pte; i++)
Line 85... Line 135...
85
	ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
135
	ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
86
	if (!ppgtt)
136
	if (!ppgtt)
87
		return ret;
137
		return ret;
Line 88... Line 138...
88
 
138
 
89
	ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
139
	ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
90
    ppgtt->pt_pages = kzalloc(sizeof(dma_addr_t)*ppgtt->num_pd_entries,
140
	ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries,
91
				  GFP_KERNEL);
141
				  GFP_KERNEL);
92
	if (!ppgtt->pt_pages)
142
	if (!ppgtt->pt_pages)
Line 93... Line 143...
93
		goto err_ppgtt;
143
		goto err_ppgtt;
94
 
144
 
95
	for (i = 0; i < ppgtt->num_pd_entries; i++) {
145
	for (i = 0; i < ppgtt->num_pd_entries; i++) {
96
        ppgtt->pt_pages[i] = AllocPage();
146
		ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL);
97
		if (!ppgtt->pt_pages[i])
147
		if (!ppgtt->pt_pages[i])
Line 98... Line 148...
98
			goto err_pt_alloc;
148
			goto err_pt_alloc;
Line 126... Line 176...
126
	ppgtt->scratch_page_dma_addr = dev_priv->mm.gtt->scratch_page_dma;
176
	ppgtt->scratch_page_dma_addr = dev_priv->mm.gtt->scratch_page_dma;
Line 127... Line 177...
127
 
177
 
128
	i915_ppgtt_clear_range(ppgtt, 0,
178
	i915_ppgtt_clear_range(ppgtt, 0,
Line 129... Line 179...
129
			       ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
179
			       ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
Line 130... Line 180...
130
 
180
 
Line 131... Line 181...
131
	ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(uint32_t);
181
	ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(gtt_pte_t);
Line 142... Line 192...
142
//   }
192
//   }
143
err_pt_alloc:
193
err_pt_alloc:
144
//   kfree(ppgtt->pt_dma_addr);
194
//   kfree(ppgtt->pt_dma_addr);
145
	for (i = 0; i < ppgtt->num_pd_entries; i++) {
195
	for (i = 0; i < ppgtt->num_pd_entries; i++) {
146
		if (ppgtt->pt_pages[i])
196
		if (ppgtt->pt_pages[i])
147
            FreePage(ppgtt->pt_pages[i]);
197
            FreePage((addr_t)(ppgtt->pt_pages[i]));
148
	}
198
	}
149
	kfree(ppgtt->pt_pages);
199
	kfree(ppgtt->pt_pages);
150
err_ppgtt:
200
err_ppgtt:
151
	kfree(ppgtt);
201
	kfree(ppgtt);
Line 168... Line 218...
168
//                      4096, PCI_DMA_BIDIRECTIONAL);
218
//                      4096, PCI_DMA_BIDIRECTIONAL);
169
//   }
219
//   }
Line 170... Line 220...
170
 
220
 
171
//   kfree(ppgtt->pt_dma_addr);
221
//   kfree(ppgtt->pt_dma_addr);
172
	for (i = 0; i < ppgtt->num_pd_entries; i++)
222
	for (i = 0; i < ppgtt->num_pd_entries; i++)
173
        FreePage(ppgtt->pt_pages[i]);
223
        FreePage((addr_t)(ppgtt->pt_pages[i]));
174
	kfree(ppgtt->pt_pages);
224
	kfree(ppgtt->pt_pages);
175
	kfree(ppgtt);
225
	kfree(ppgtt);
Line 176... Line 226...
176
}
226
}
177
 
227
 
178
static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
228
static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
179
                     const struct pagelist *pages,
229
					 const struct sg_table *pages,
180
					 unsigned first_entry,
230
					 unsigned first_entry,
181
					 uint32_t pte_flags)
231
					 enum i915_cache_level cache_level)
182
{
232
{
183
	uint32_t *pt_vaddr, pte;
233
	gtt_pte_t *pt_vaddr;
184
	unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
234
	unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
185
	unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
235
	unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
-
 
236
	unsigned i, j, m, segment_len;
Line -... Line 237...
-
 
237
	dma_addr_t page_addr;
-
 
238
	struct scatterlist *sg;
186
    unsigned i, j;
239
 
-
 
240
	/* init sg walking */
-
 
241
	sg = pages->sgl;
Line 187... Line 242...
187
	dma_addr_t page_addr;
242
	i = 0;
-
 
243
	segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
-
 
244
	m = 0;
Line 188... Line 245...
188
 
245
 
-
 
246
    pt_vaddr = AllocKernelSpace(4096);
189
	i = 0;
247
	if( pt_vaddr == NULL)
-
 
248
		return;
-
 
249
 
-
 
250
	while (i < pages->nents) {
-
 
251
		MapPage(pt_vaddr,(addr_t)ppgtt->pt_pages[act_pd], 3);
-
 
252
 
190
 
253
        for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) {
191
    pt_vaddr = AllocKernelSpace(4096);
254
			page_addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
192
 
255
			pt_vaddr[j] = pte_encode(ppgtt->dev, page_addr,
-
 
256
						 cache_level);
Line 193... Line -...
193
    if( pt_vaddr != NULL)
-
 
194
    {
257
 
195
        while (i < pages->nents)
258
			/* grab the next page */
196
        {
259
			if (++m == segment_len) {
197
            MapPage(pt_vaddr, ppgtt->pt_pages[act_pd], 3);
260
				if (++i == pages->nents)
-
 
261
					break;
-
 
262
 
Line 198... Line 263...
198
 
263
				sg = sg_next(sg);
199
            for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++, i++) {
264
				segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
200
                page_addr = pages->page[i];
265
				m = 0;
201
                pte = GEN6_PTE_ADDR_ENCODE(page_addr);
266
			}
202
                pt_vaddr[j] = pte | pte_flags;
-
 
203
            }
267
		}
Line 204... Line 268...
204
 
268
 
205
            first_pte = 0;
269
 
206
            act_pd++;
270
        first_pte = 0;
207
        }
271
        act_pd++;
208
        FreeKernelSpace(pt_vaddr);
-
 
209
    };
-
 
210
}
-
 
211
 
-
 
212
void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
-
 
213
			    struct drm_i915_gem_object *obj,
-
 
214
			    enum i915_cache_level cache_level)
-
 
215
{
-
 
216
	uint32_t pte_flags = GEN6_PTE_VALID;
-
 
217
 
-
 
218
	switch (cache_level) {
-
 
219
	case I915_CACHE_LLC_MLC:
-
 
220
		pte_flags |= GEN6_PTE_CACHE_LLC_MLC;
-
 
221
		break;
-
 
222
	case I915_CACHE_LLC:
-
 
223
		pte_flags |= GEN6_PTE_CACHE_LLC;
-
 
224
		break;
-
 
225
	case I915_CACHE_NONE:
-
 
226
		if (IS_HASWELL(obj->base.dev))
-
 
227
			pte_flags |= HSW_PTE_UNCACHED;
272
        }
228
		else
273
        FreeKernelSpace(pt_vaddr);
229
			pte_flags |= GEN6_PTE_UNCACHED;
274
}
230
		break;
275
 
231
	default:
276
void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
Line 232... Line 277...
232
		BUG();
277
			    struct drm_i915_gem_object *obj,
233
	}
278
			    enum i915_cache_level cache_level)
234
 
279
{
235
	i915_ppgtt_insert_sg_entries(ppgtt,
280
	i915_ppgtt_insert_sg_entries(ppgtt,
236
                     &obj->pages,
281
				     obj->pages,
237
				     obj->gtt_space->start >> PAGE_SHIFT,
282
				     obj->gtt_space->start >> PAGE_SHIFT,
238
				     pte_flags);
283
				     cache_level);
Line 239... Line -...
239
}
-
 
240
 
284
}
241
void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
-
 
242
			      struct drm_i915_gem_object *obj)
285
 
-
 
286
void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
243
{
287
			      struct drm_i915_gem_object *obj)
-
 
288
{
-
 
289
	i915_ppgtt_clear_range(ppgtt,
244
	i915_ppgtt_clear_range(ppgtt,
290
			       obj->gtt_space->start >> PAGE_SHIFT,
245
			       obj->gtt_space->start >> PAGE_SHIFT,
291
			       obj->base.size >> PAGE_SHIFT);
-
 
292
}
-
 
293
 
246
			       obj->base.size >> PAGE_SHIFT);
294
void i915_gem_init_ppgtt(struct drm_device *dev)
-
 
295
{
-
 
296
	drm_i915_private_t *dev_priv = dev->dev_private;
-
 
297
	uint32_t pd_offset;
247
}
298
	struct intel_ring_buffer *ring;
248
 
299
	struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
249
/* XXX kill agp_type! */
300
	uint32_t __iomem *pd_addr;
-
 
301
	uint32_t pd_entry;
-
 
302
	int i;
-
 
303
 
250
static unsigned int cache_level_to_agp_type(struct drm_device *dev,
304
	if (!dev_priv->mm.aliasing_ppgtt)
-
 
305
		return;
-
 
306
 
-
 
307
 
251
					    enum i915_cache_level cache_level)
308
	pd_addr = dev_priv->mm.gtt->gtt + ppgtt->pd_offset/sizeof(uint32_t);
-
 
309
	for (i = 0; i < ppgtt->num_pd_entries; i++) {
252
{
310
		dma_addr_t pt_addr;
-
 
311
 
253
	switch (cache_level) {
312
		if (dev_priv->mm.gtt->needs_dmar)
-
 
313
			pt_addr = ppgtt->pt_dma_addr[i];
-
 
314
		else
-
 
315
			pt_addr = page_to_phys(ppgtt->pt_pages[i]);
-
 
316
 
-
 
317
		pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
-
 
318
		pd_entry |= GEN6_PDE_VALID;
-
 
319
 
-
 
320
		writel(pd_entry, pd_addr + i);
-
 
321
	}
-
 
322
	readl(pd_addr);
-
 
323
 
254
	case I915_CACHE_LLC_MLC:
324
	pd_offset = ppgtt->pd_offset;
-
 
325
	pd_offset /= 64; /* in cachelines, */
-
 
326
	pd_offset <<= 16;
255
		if (INTEL_INFO(dev)->gen >= 6)
327
 
-
 
328
	if (INTEL_INFO(dev)->gen == 6) {
-
 
329
		uint32_t ecochk, gab_ctl, ecobits;
-
 
330
 
-
 
331
		ecobits = I915_READ(GAC_ECO_BITS);
-
 
332
		I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
-
 
333
 
-
 
334
		gab_ctl = I915_READ(GAB_CTL);
-
 
335
		I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
-
 
336
 
-
 
337
		ecochk = I915_READ(GAM_ECOCHK);
-
 
338
		I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
-
 
339
				       ECOCHK_PPGTT_CACHE64B);
-
 
340
		I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
-
 
341
	} else if (INTEL_INFO(dev)->gen >= 7) {
-
 
342
		I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B);
256
			return AGP_USER_CACHED_MEMORY_LLC_MLC;
343
		/* GFX_MODE is per-ring on gen7+ */
257
		/* Older chipsets do not have this extra level of CPU
344
	}
Line 258... Line 345...
258
		 * cacheing, so fallthrough and request the PTE simply
345
 
259
		 * as cached.
346
	for_each_ring(ring, dev_priv, i) {
Line 286... Line 373...
286
{
373
{
287
	if (unlikely(dev_priv->mm.gtt->do_idle_maps))
374
	if (unlikely(dev_priv->mm.gtt->do_idle_maps))
288
		dev_priv->mm.interruptible = interruptible;
375
		dev_priv->mm.interruptible = interruptible;
289
}
376
}
Line -... Line 377...
-
 
377
 
-
 
378
 
-
 
379
static void i915_ggtt_clear_range(struct drm_device *dev,
-
 
380
				 unsigned first_entry,
-
 
381
				 unsigned num_entries)
-
 
382
{
-
 
383
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
384
	gtt_pte_t scratch_pte;
-
 
385
	gtt_pte_t __iomem *gtt_base = dev_priv->mm.gtt->gtt + first_entry;
-
 
386
	const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry;
-
 
387
	int i;
-
 
388
 
-
 
389
	if (INTEL_INFO(dev)->gen < 6) {
-
 
390
		intel_gtt_clear_range(first_entry, num_entries);
-
 
391
		return;
-
 
392
	}
-
 
393
 
-
 
394
	if (WARN(num_entries > max_entries,
-
 
395
		 "First entry = %d; Num entries = %d (max=%d)\n",
-
 
396
		 first_entry, num_entries, max_entries))
-
 
397
		num_entries = max_entries;
-
 
398
 
-
 
399
	scratch_pte = pte_encode(dev, dev_priv->mm.gtt->scratch_page_dma, I915_CACHE_LLC);
-
 
400
	for (i = 0; i < num_entries; i++)
-
 
401
		iowrite32(scratch_pte, >t_base[i]);
-
 
402
	readl(gtt_base);
-
 
403
}
-
 
404
 
290
 
405
 
291
#if 0
406
#if 0
292
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
407
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
293
{
408
{
294
	struct drm_i915_private *dev_priv = dev->dev_private;
409
	struct drm_i915_private *dev_priv = dev->dev_private;
Line 295... Line 410...
295
	struct drm_i915_gem_object *obj;
410
	struct drm_i915_gem_object *obj;
296
 
411
 
297
	/* First fill our portion of the GTT with scratch pages */
412
	/* First fill our portion of the GTT with scratch pages */
Line 298... Line 413...
298
	intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE,
413
	i915_ggtt_clear_range(dev, dev_priv->mm.gtt_start / PAGE_SIZE,
299
			      (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
414
			      (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
300
 
415
 
301
	list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
416
	list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
Line 302... Line 417...
302
		i915_gem_clflush_object(obj);
417
		i915_gem_clflush_object(obj);
303
		i915_gem_gtt_bind_object(obj, obj->cache_level);
418
		i915_gem_gtt_bind_object(obj, obj->cache_level);
304
	}
419
	}
Line 305... Line 420...
305
 
420
 
306
	intel_gtt_chipset_flush();
421
	i915_gem_chipset_flush(dev);
-
 
422
}
-
 
423
#endif
-
 
424
 
-
 
425
int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
-
 
426
{
-
 
427
    struct scatterlist *sg, *s;
-
 
428
    unsigned int nents ;
-
 
429
    int i;
-
 
430
 
-
 
431
	if (obj->has_dma_mapping)
-
 
432
		return 0;
-
 
433
 
-
 
434
    sg    = obj->pages->sgl;
-
 
435
    nents = obj->pages->nents;
-
 
436
 
-
 
437
 
-
 
438
    WARN_ON(nents == 0 || sg[0].length == 0);
-
 
439
 
-
 
440
    for_each_sg(sg, s, nents, i) {
-
 
441
             BUG_ON(!sg_page(s));
307
}
442
             s->dma_address = sg_phys(s);
308
#endif
443
    }
Line -... Line 444...
-
 
444
 
-
 
445
    asm volatile("lock; addl $0,0(%%esp)": : :"memory");
-
 
446
 
-
 
447
	return 0;
-
 
448
}
-
 
449
 
-
 
450
/*
-
 
451
 * Binds an object into the global gtt with the specified cache level. The object
-
 
452
 * will be accessible to the GPU via commands whose operands reference offsets
-
 
453
 * within the global GTT as well as accessible by the GPU through the GMADR
-
 
454
 * mapped BAR (dev_priv->mm.gtt->gtt).
-
 
455
 */
-
 
456
static void gen6_ggtt_bind_object(struct drm_i915_gem_object *obj,
-
 
457
				  enum i915_cache_level level)
-
 
458
{
-
 
459
	struct drm_device *dev = obj->base.dev;
-
 
460
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
461
	struct sg_table *st = obj->pages;
-
 
462
	struct scatterlist *sg = st->sgl;
-
 
463
	const int first_entry = obj->gtt_space->start >> PAGE_SHIFT;
-
 
464
	const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry;
-
 
465
	gtt_pte_t __iomem *gtt_entries = dev_priv->mm.gtt->gtt + first_entry;
-
 
466
	int unused, i = 0;
-
 
467
	unsigned int len, m = 0;
-
 
468
	dma_addr_t addr;
-
 
469
 
-
 
470
	for_each_sg(st->sgl, sg, st->nents, unused) {
-
 
471
		len = sg_dma_len(sg) >> PAGE_SHIFT;
-
 
472
		for (m = 0; m < len; m++) {
-
 
473
			addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
-
 
474
			iowrite32(pte_encode(dev, addr, level), >t_entries[i]);
-
 
475
			i++;
-
 
476
		}
-
 
477
	}
-
 
478
 
-
 
479
	BUG_ON(i > max_entries);
-
 
480
	BUG_ON(i != obj->base.size / PAGE_SIZE);
-
 
481
 
-
 
482
	/* XXX: This serves as a posting read to make sure that the PTE has
-
 
483
	 * actually been updated. There is some concern that even though
-
 
484
	 * registers and PTEs are within the same BAR that they are potentially
-
 
485
	 * of NUMA access patterns. Therefore, even with the way we assume
-
 
486
	 * hardware should work, we must keep this posting read for paranoia.
-
 
487
	 */
-
 
488
	if (i != 0)
-
 
489
		WARN_ON(readl(>t_entries[i-1]) != pte_encode(dev, addr, level));
-
 
490
 
-
 
491
	/* This next bit makes the above posting read even more important. We
-
 
492
	 * want to flush the TLBs only after we're certain all the PTE updates
309
 
493
	 * have finished.
310
int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
494
	 */
311
{
495
	I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
312
	return 0;
496
	POSTING_READ(GFX_FLSH_CNTL_GEN6);
-
 
497
}
313
}
498
 
314
 
-
 
-
 
499
void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
315
void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
500
				enum i915_cache_level cache_level)
316
				enum i915_cache_level cache_level)
501
{
317
{
502
	struct drm_device *dev = obj->base.dev;
-
 
503
	if (INTEL_INFO(dev)->gen < 6) {
-
 
504
		unsigned int flags = (cache_level == I915_CACHE_NONE) ?
-
 
505
			AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
-
 
506
		intel_gtt_insert_sg_entries(obj->pages,
318
	struct drm_device *dev = obj->base.dev;
507
					    obj->gtt_space->start >> PAGE_SHIFT,
319
	unsigned int agp_type = cache_level_to_agp_type(dev, cache_level);
508
					    flags);
Line 320... Line 509...
320
 
509
	} else {
321
    intel_gtt_insert_sg_entries(&obj->pages,
510
		gen6_ggtt_bind_object(obj, cache_level);
-
 
511
	}
322
				    obj->gtt_space->start >> PAGE_SHIFT,
512
 
323
				       agp_type);
513
	obj->has_global_gtt_mapping = 1;
Line 324... Line 514...
324
	obj->has_global_gtt_mapping = 1;
514
}
325
}
515
 
Line 382... Line 572...
382
	dev_priv->mm.gtt_end = end;
572
	dev_priv->mm.gtt_end = end;
383
	dev_priv->mm.gtt_total = end - start;
573
	dev_priv->mm.gtt_total = end - start;
384
	dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
574
	dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
Line 385... Line 575...
385
 
575
 
386
	/* ... but ensure that we clear the entire range. */
576
	/* ... but ensure that we clear the entire range. */
387
	intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);
577
	i915_ggtt_clear_range(dev, start / PAGE_SIZE, (end-start) / PAGE_SIZE);
-
 
578
}
-
 
579
 
-
 
580
static int setup_scratch_page(struct drm_device *dev)
-
 
581
{
-
 
582
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
583
	struct page *page;
-
 
584
	dma_addr_t dma_addr;
-
 
585
 
-
 
586
	page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
-
 
587
	if (page == NULL)
-
 
588
		return -ENOMEM;
-
 
589
 
-
 
590
#ifdef CONFIG_INTEL_IOMMU
-
 
591
	dma_addr = pci_map_page(dev->pdev, page, 0, PAGE_SIZE,
-
 
592
				PCI_DMA_BIDIRECTIONAL);
-
 
593
	if (pci_dma_mapping_error(dev->pdev, dma_addr))
-
 
594
		return -EINVAL;
-
 
595
#else
-
 
596
	dma_addr = page_to_phys(page);
-
 
597
#endif
-
 
598
	dev_priv->mm.gtt->scratch_page = page;
-
 
599
	dev_priv->mm.gtt->scratch_page_dma = dma_addr;
-
 
600
 
-
 
601
	return 0;
-
 
602
}
-
 
603
 
-
 
604
 
-
 
605
static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
-
 
606
{
-
 
607
	snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
-
 
608
	snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
-
 
609
	return snb_gmch_ctl << 20;
-
 
610
}
-
 
611
 
-
 
612
static inline unsigned int gen6_get_stolen_size(u16 snb_gmch_ctl)
-
 
613
{
-
 
614
	snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
-
 
615
	snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
-
 
616
	return snb_gmch_ctl << 25; /* 32 MB units */
-
 
617
}
-
 
618
 
-
 
619
static inline unsigned int gen7_get_stolen_size(u16 snb_gmch_ctl)
-
 
620
{
-
 
621
	static const int stolen_decoder[] = {
-
 
622
		0, 0, 0, 0, 0, 32, 48, 64, 128, 256, 96, 160, 224, 352};
-
 
623
	snb_gmch_ctl >>= IVB_GMCH_GMS_SHIFT;
-
 
624
	snb_gmch_ctl &= IVB_GMCH_GMS_MASK;
-
 
625
	return stolen_decoder[snb_gmch_ctl] << 20;
-
 
626
}
-
 
627
 
-
 
628
int i915_gem_gtt_init(struct drm_device *dev)
-
 
629
{
-
 
630
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
631
	phys_addr_t gtt_bus_addr;
-
 
632
	u16 snb_gmch_ctl;
-
 
633
	int ret;
-
 
634
 
-
 
635
	/* On modern platforms we need not worry ourself with the legacy
-
 
636
	 * hostbridge query stuff. Skip it entirely
-
 
637
	 */
-
 
638
	if (INTEL_INFO(dev)->gen < 6) {
-
 
639
		ret = intel_gmch_probe(dev_priv->bridge_dev, dev->pdev, NULL);
-
 
640
		if (!ret) {
-
 
641
			DRM_ERROR("failed to set up gmch\n");
-
 
642
			return -EIO;
-
 
643
		}
-
 
644
 
-
 
645
		dev_priv->mm.gtt = intel_gtt_get();
-
 
646
		if (!dev_priv->mm.gtt) {
-
 
647
			DRM_ERROR("Failed to initialize GTT\n");
-
 
648
			return -ENODEV;
-
 
649
		}
-
 
650
		return 0;
-
 
651
	}
-
 
652
 
-
 
653
	dev_priv->mm.gtt = kzalloc(sizeof(*dev_priv->mm.gtt), GFP_KERNEL);
-
 
654
	if (!dev_priv->mm.gtt)
-
 
655
		return -ENOMEM;
-
 
656
 
-
 
657
 
-
 
658
#ifdef CONFIG_INTEL_IOMMU
-
 
659
	dev_priv->mm.gtt->needs_dmar = 1;
-
 
660
#endif
-
 
661
 
-
 
662
	/* For GEN6+ the PTEs for the ggtt live at 2MB + BAR0 */
-
 
663
	gtt_bus_addr = pci_resource_start(dev->pdev, 0) + (2<<20);
-
 
664
	dev_priv->mm.gtt->gma_bus_addr = pci_resource_start(dev->pdev, 2);
-
 
665
 
-
 
666
	/* i9xx_setup */
-
 
667
	pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
-
 
668
	dev_priv->mm.gtt->gtt_total_entries =
-
 
669
		gen6_get_total_gtt_size(snb_gmch_ctl) / sizeof(gtt_pte_t);
-
 
670
	if (INTEL_INFO(dev)->gen < 7)
-
 
671
		dev_priv->mm.gtt->stolen_size = gen6_get_stolen_size(snb_gmch_ctl);
-
 
672
	else
-
 
673
		dev_priv->mm.gtt->stolen_size = gen7_get_stolen_size(snb_gmch_ctl);
-
 
674
 
-
 
675
	dev_priv->mm.gtt->gtt_mappable_entries = pci_resource_len(dev->pdev, 2) >> PAGE_SHIFT;
-
 
676
	/* 64/512MB is the current min/max we actually know of, but this is just a
-
 
677
	 * coarse sanity check.
-
 
678
	 */
-
 
679
	if ((dev_priv->mm.gtt->gtt_mappable_entries >> 8) < 64 ||
-
 
680
	    dev_priv->mm.gtt->gtt_mappable_entries > dev_priv->mm.gtt->gtt_total_entries) {
-
 
681
		DRM_ERROR("Unknown GMADR entries (%d)\n",
-
 
682
			  dev_priv->mm.gtt->gtt_mappable_entries);
-
 
683
		ret = -ENXIO;
-
 
684
		goto err_out;
-
 
685
	}
-
 
686
 
-
 
687
	ret = setup_scratch_page(dev);
-
 
688
	if (ret) {
-
 
689
		DRM_ERROR("Scratch setup failed\n");
-
 
690
		goto err_out;
-
 
691
	}
-
 
692
 
-
 
693
	dev_priv->mm.gtt->gtt = ioremap(gtt_bus_addr,
-
 
694
					   dev_priv->mm.gtt->gtt_total_entries * sizeof(gtt_pte_t));
-
 
695
	if (!dev_priv->mm.gtt->gtt) {
-
 
696
		DRM_ERROR("Failed to map the gtt page table\n");
-
 
697
		ret = -ENOMEM;
-
 
698
		goto err_out;
-
 
699
	}
-
 
700
 
-
 
701
	/* GMADR is the PCI aperture used by SW to access tiled GFX surfaces in a linear fashion. */
-
 
702
	DRM_INFO("Memory usable by graphics device = %dM\n", dev_priv->mm.gtt->gtt_total_entries >> 8);
-
 
703
	DRM_DEBUG_DRIVER("GMADR size = %dM\n", dev_priv->mm.gtt->gtt_mappable_entries >> 8);
-
 
704
	DRM_DEBUG_DRIVER("GTT stolen size = %dM\n", dev_priv->mm.gtt->stolen_size >> 20);
-
 
705
 
-
 
706
	return 0;
-
 
707
 
-
 
708
err_out:
-
 
709
	kfree(dev_priv->mm.gtt);
-
 
710
	return ret;
-
 
711
}
-
 
712
 
-
 
713
 
-
 
714
struct scatterlist *sg_next(struct scatterlist *sg)
-
 
715
{
-
 
716
    if (sg_is_last(sg))
-
 
717
        return NULL;
-
 
718
 
-
 
719
    sg++;
-
 
720
    if (unlikely(sg_is_chain(sg)))
-
 
721
            sg = sg_chain_ptr(sg);
-
 
722
 
-
 
723
    return sg;
-
 
724
}
-
 
725
 
-
 
726
 
-
 
727
void __sg_free_table(struct sg_table *table, unsigned int max_ents,
-
 
728
                     sg_free_fn *free_fn)
-
 
729
{
-
 
730
    struct scatterlist *sgl, *next;
-
 
731
 
-
 
732
    if (unlikely(!table->sgl))
-
 
733
            return;
-
 
734
 
-
 
735
    sgl = table->sgl;
-
 
736
    while (table->orig_nents) {
-
 
737
        unsigned int alloc_size = table->orig_nents;
-
 
738
        unsigned int sg_size;
-
 
739
 
-
 
740
        /*
-
 
741
         * If we have more than max_ents segments left,
-
 
742
         * then assign 'next' to the sg table after the current one.
-
 
743
         * sg_size is then one less than alloc size, since the last
-
 
744
         * element is the chain pointer.
-
 
745
         */
-
 
746
        if (alloc_size > max_ents) {
-
 
747
                next = sg_chain_ptr(&sgl[max_ents - 1]);
-
 
748
                alloc_size = max_ents;
-
 
749
                sg_size = alloc_size - 1;
-
 
750
        } else {
-
 
751
                sg_size = alloc_size;
-
 
752
                next = NULL;
-
 
753
        }
-
 
754
 
-
 
755
        table->orig_nents -= sg_size;
-
 
756
        kfree(sgl);
-
 
757
        sgl = next;
-
 
758
    }
-
 
759
 
-
 
760
    table->sgl = NULL;
-
 
761
}
-
 
762
 
-
 
763
void sg_free_table(struct sg_table *table)
-
 
764
{
-
 
765
    __sg_free_table(table, SG_MAX_SINGLE_ALLOC, NULL);
-
 
766
}
-
 
767
 
-
 
768
int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
-
 
769
{
-
 
770
    struct scatterlist *sg, *prv;
-
 
771
    unsigned int left;
-
 
772
    unsigned int max_ents = SG_MAX_SINGLE_ALLOC;
-
 
773
 
-
 
774
#ifndef ARCH_HAS_SG_CHAIN
-
 
775
    BUG_ON(nents > max_ents);
-
 
776
#endif
-
 
777
 
-
 
778
    memset(table, 0, sizeof(*table));
-
 
779
 
-
 
780
    left = nents;
-
 
781
    prv = NULL;
-
 
782
    do {
-
 
783
        unsigned int sg_size, alloc_size = left;
-
 
784
 
-
 
785
        if (alloc_size > max_ents) {
-
 
786
                alloc_size = max_ents;
-
 
787
                sg_size = alloc_size - 1;
-
 
788
        } else
-
 
789
                sg_size = alloc_size;
-
 
790
 
-
 
791
        left -= sg_size;
-
 
792
 
-
 
793
        sg = kmalloc(alloc_size * sizeof(struct scatterlist), gfp_mask);
-
 
794
        if (unlikely(!sg)) {
-
 
795
                /*
-
 
796
                 * Adjust entry count to reflect that the last
-
 
797
                 * entry of the previous table won't be used for
-
 
798
                 * linkage.  Without this, sg_kfree() may get
-
 
799
                 * confused.
-
 
800
                 */
-
 
801
                if (prv)
-
 
802
                        table->nents = ++table->orig_nents;
-
 
803
 
-
 
804
                goto err;
-
 
805
        }
-
 
806
 
-
 
807
        sg_init_table(sg, alloc_size);
-
 
808
        table->nents = table->orig_nents += sg_size;
-
 
809
 
-
 
810
        /*
-
 
811
         * If this is the first mapping, assign the sg table header.
-
 
812
         * If this is not the first mapping, chain previous part.
-
 
813
         */
-
 
814
        if (prv)
-
 
815
                sg_chain(prv, max_ents, sg);
-
 
816
        else
-
 
817
                table->sgl = sg;
-
 
818
 
-
 
819
        /*
-
 
820
         * If no more entries after this one, mark the end
-
 
821
         */
-
 
822
        if (!left)
-
 
823
                sg_mark_end(&sg[sg_size - 1]);
-
 
824
 
-
 
825
        prv = sg;
-
 
826
    } while (left);
-
 
827
 
-
 
828
    return 0;
-
 
829
 
-
 
830
err:
-
 
831
    __sg_free_table(table, SG_MAX_SINGLE_ALLOC, NULL);
-
 
832
 
-
 
833
    return -ENOMEM;
-
 
834
}
-
 
835
 
-
 
836
 
-
 
837
void sg_init_table(struct scatterlist *sgl, unsigned int nents)
-
 
838
{
-
 
839
    memset(sgl, 0, sizeof(*sgl) * nents);
-
 
840
#ifdef CONFIG_DEBUG_SG
-
 
841
    {
-
 
842
            unsigned int i;
-
 
843
            for (i = 0; i < nents; i++)
-
 
844
                    sgl[i].sg_magic = SG_MAGIC;
-
 
845
    }
-
 
846
#endif
-
 
847
    sg_mark_end(&sgl[nents - 1]);
-
 
848
}