Subversion Repositories Kolibri OS

Rev

Rev 2360 | Rev 3033 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2332 Serge 1
/*
2
 * Copyright © 2010 Daniel Vetter
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * copy of this software and associated documentation files (the "Software"),
6
 * to deal in the Software without restriction, including without limitation
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 * and/or sell copies of the Software, and to permit persons to whom the
9
 * Software is furnished to do so, subject to the following conditions:
10
 *
11
 * The above copyright notice and this permission notice (including the next
12
 * paragraph) shall be included in all copies or substantial portions of the
13
 * Software.
14
 *
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21
 * IN THE SOFTWARE.
22
 *
23
 */
24
 
3031 serge 25
#include 
26
#include 
2332 Serge 27
#include "i915_drv.h"
2351 Serge 28
#include "i915_trace.h"
2332 Serge 29
#include "intel_drv.h"
30
 
31
#define AGP_USER_TYPES          (1 << 16)
32
#define AGP_USER_MEMORY         (AGP_USER_TYPES)
33
#define AGP_USER_CACHED_MEMORY  (AGP_USER_TYPES + 1)
34
 
3031 serge 35
/* PPGTT support for Sandybdrige/Gen6 and later */
36
static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
37
				   unsigned first_entry,
38
				   unsigned num_entries)
39
{
40
	uint32_t *pt_vaddr;
41
	uint32_t scratch_pte;
42
	unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
43
	unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
44
	unsigned last_pte, i;
45
 
46
	scratch_pte = GEN6_PTE_ADDR_ENCODE(ppgtt->scratch_page_dma_addr);
47
	scratch_pte |= GEN6_PTE_VALID | GEN6_PTE_CACHE_LLC;
48
 
49
    pt_vaddr = AllocKernelSpace(4096);
50
 
51
    if(pt_vaddr != NULL)
52
    {
53
        while (num_entries)
54
        {
55
            last_pte = first_pte + num_entries;
56
            if (last_pte > I915_PPGTT_PT_ENTRIES)
57
                last_pte = I915_PPGTT_PT_ENTRIES;
58
 
59
            MapPage(pt_vaddr,ppgtt->pt_pages[act_pd], 3);
60
 
61
            for (i = first_pte; i < last_pte; i++)
62
                pt_vaddr[i] = scratch_pte;
63
 
64
            num_entries -= last_pte - first_pte;
65
            first_pte = 0;
66
            act_pd++;
67
        }
68
        FreeKernelSpace(pt_vaddr);
69
    };
70
}
71
 
72
int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
73
{
74
	struct drm_i915_private *dev_priv = dev->dev_private;
75
	struct i915_hw_ppgtt *ppgtt;
76
	unsigned first_pd_entry_in_global_pt;
77
	int i;
78
	int ret = -ENOMEM;
79
 
80
	/* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
81
	 * entries. For aliasing ppgtt support we just steal them at the end for
82
	 * now. */
83
	first_pd_entry_in_global_pt = dev_priv->mm.gtt->gtt_total_entries - I915_PPGTT_PD_ENTRIES;
84
 
85
	ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
86
	if (!ppgtt)
87
		return ret;
88
 
89
	ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
90
    ppgtt->pt_pages = kzalloc(sizeof(dma_addr_t)*ppgtt->num_pd_entries,
91
				  GFP_KERNEL);
92
	if (!ppgtt->pt_pages)
93
		goto err_ppgtt;
94
 
95
	for (i = 0; i < ppgtt->num_pd_entries; i++) {
96
        ppgtt->pt_pages[i] = AllocPage();
97
		if (!ppgtt->pt_pages[i])
98
			goto err_pt_alloc;
99
	}
100
 
101
/*
102
	if (dev_priv->mm.gtt->needs_dmar) {
103
		ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t)
104
						*ppgtt->num_pd_entries,
105
					     GFP_KERNEL);
106
		if (!ppgtt->pt_dma_addr)
107
			goto err_pt_alloc;
108
 
109
		for (i = 0; i < ppgtt->num_pd_entries; i++) {
110
			dma_addr_t pt_addr;
111
 
112
			pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i],
113
					       0, 4096,
114
					       PCI_DMA_BIDIRECTIONAL);
115
 
116
			if (pci_dma_mapping_error(dev->pdev,
117
						  pt_addr)) {
118
				ret = -EIO;
119
				goto err_pd_pin;
120
 
121
			}
122
			ppgtt->pt_dma_addr[i] = pt_addr;
123
		}
124
	}
125
*/
126
	ppgtt->scratch_page_dma_addr = dev_priv->mm.gtt->scratch_page_dma;
127
 
128
	i915_ppgtt_clear_range(ppgtt, 0,
129
			       ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
130
 
131
	ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(uint32_t);
132
 
133
	dev_priv->mm.aliasing_ppgtt = ppgtt;
134
 
135
	return 0;
136
 
137
err_pd_pin:
138
//   if (ppgtt->pt_dma_addr) {
139
//       for (i--; i >= 0; i--)
140
//           pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
141
//                      4096, PCI_DMA_BIDIRECTIONAL);
142
//   }
143
err_pt_alloc:
144
//   kfree(ppgtt->pt_dma_addr);
145
	for (i = 0; i < ppgtt->num_pd_entries; i++) {
146
		if (ppgtt->pt_pages[i])
147
            FreePage(ppgtt->pt_pages[i]);
148
	}
149
	kfree(ppgtt->pt_pages);
150
err_ppgtt:
151
	kfree(ppgtt);
152
 
153
	return ret;
154
}
155
 
156
void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
157
{
158
	struct drm_i915_private *dev_priv = dev->dev_private;
159
	struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
160
	int i;
161
 
162
	if (!ppgtt)
163
		return;
164
 
165
//   if (ppgtt->pt_dma_addr) {
166
//       for (i = 0; i < ppgtt->num_pd_entries; i++)
167
//           pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
168
//                      4096, PCI_DMA_BIDIRECTIONAL);
169
//   }
170
 
171
//   kfree(ppgtt->pt_dma_addr);
172
	for (i = 0; i < ppgtt->num_pd_entries; i++)
173
        FreePage(ppgtt->pt_pages[i]);
174
	kfree(ppgtt->pt_pages);
175
	kfree(ppgtt);
176
}
177
 
178
static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
179
                     const struct pagelist *pages,
180
					 unsigned first_entry,
181
					 uint32_t pte_flags)
182
{
183
	uint32_t *pt_vaddr, pte;
184
	unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
185
	unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
186
    unsigned i, j;
187
	dma_addr_t page_addr;
188
 
189
	i = 0;
190
 
191
    pt_vaddr = AllocKernelSpace(4096);
192
 
193
    if( pt_vaddr != NULL)
194
    {
195
        while (i < pages->nents)
196
        {
197
            MapPage(pt_vaddr, ppgtt->pt_pages[act_pd], 3);
198
 
199
            for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++, i++) {
200
                page_addr = pages->page[i];
201
                pte = GEN6_PTE_ADDR_ENCODE(page_addr);
202
                pt_vaddr[j] = pte | pte_flags;
203
            }
204
 
205
            first_pte = 0;
206
            act_pd++;
207
        }
208
        FreeKernelSpace(pt_vaddr);
209
    };
210
}
211
 
212
void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
213
			    struct drm_i915_gem_object *obj,
214
			    enum i915_cache_level cache_level)
215
{
216
	uint32_t pte_flags = GEN6_PTE_VALID;
217
 
218
	switch (cache_level) {
219
	case I915_CACHE_LLC_MLC:
220
		pte_flags |= GEN6_PTE_CACHE_LLC_MLC;
221
		break;
222
	case I915_CACHE_LLC:
223
		pte_flags |= GEN6_PTE_CACHE_LLC;
224
		break;
225
	case I915_CACHE_NONE:
226
		if (IS_HASWELL(obj->base.dev))
227
			pte_flags |= HSW_PTE_UNCACHED;
228
		else
229
			pte_flags |= GEN6_PTE_UNCACHED;
230
		break;
231
	default:
232
		BUG();
233
	}
234
 
235
	i915_ppgtt_insert_sg_entries(ppgtt,
236
                     &obj->pages,
237
				     obj->gtt_space->start >> PAGE_SHIFT,
238
				     pte_flags);
239
}
240
 
241
void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
242
			      struct drm_i915_gem_object *obj)
243
{
244
	i915_ppgtt_clear_range(ppgtt,
245
			       obj->gtt_space->start >> PAGE_SHIFT,
246
			       obj->base.size >> PAGE_SHIFT);
247
}
248
 
2332 Serge 249
/* XXX kill agp_type! */
250
static unsigned int cache_level_to_agp_type(struct drm_device *dev,
251
					    enum i915_cache_level cache_level)
252
{
253
	switch (cache_level) {
254
	case I915_CACHE_LLC_MLC:
255
		if (INTEL_INFO(dev)->gen >= 6)
256
			return AGP_USER_CACHED_MEMORY_LLC_MLC;
257
		/* Older chipsets do not have this extra level of CPU
258
		 * cacheing, so fallthrough and request the PTE simply
259
		 * as cached.
260
		 */
261
	case I915_CACHE_LLC:
262
		return AGP_USER_CACHED_MEMORY;
263
	default:
264
	case I915_CACHE_NONE:
265
		return AGP_USER_MEMORY;
266
	}
267
}
268
 
2344 Serge 269
static bool do_idling(struct drm_i915_private *dev_priv)
270
{
271
	bool ret = dev_priv->mm.interruptible;
272
 
273
	if (unlikely(dev_priv->mm.gtt->do_idle_maps)) {
274
		dev_priv->mm.interruptible = false;
275
		if (i915_gpu_idle(dev_priv->dev)) {
276
			DRM_ERROR("Couldn't idle GPU\n");
277
			/* Wait a bit, in hopes it avoids the hang */
278
			udelay(10);
279
		}
280
	}
281
 
282
	return ret;
283
}
284
 
285
static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
286
{
287
	if (unlikely(dev_priv->mm.gtt->do_idle_maps))
288
		dev_priv->mm.interruptible = interruptible;
289
}
290
 
2332 Serge 291
#if 0
292
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
293
{
294
	struct drm_i915_private *dev_priv = dev->dev_private;
295
	struct drm_i915_gem_object *obj;
296
 
297
	/* First fill our portion of the GTT with scratch pages */
298
	intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE,
299
			      (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
300
 
3031 serge 301
	list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
2332 Serge 302
		i915_gem_clflush_object(obj);
3031 serge 303
		i915_gem_gtt_bind_object(obj, obj->cache_level);
2332 Serge 304
	}
305
 
306
	intel_gtt_chipset_flush();
307
}
308
#endif
309
 
3031 serge 310
int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
2332 Serge 311
{
3031 serge 312
	if (obj->has_dma_mapping)
313
		return 0;
2332 Serge 314
 
3031 serge 315
//   if (!dma_map_sg(&obj->base.dev->pdev->dev,
316
//           obj->pages->sgl, obj->pages->nents,
317
//           PCI_DMA_BIDIRECTIONAL))
318
//       return -ENOSPC;
2332 Serge 319
 
320
	return 0;
321
}
322
 
3031 serge 323
void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
2332 Serge 324
				enum i915_cache_level cache_level)
325
{
326
	struct drm_device *dev = obj->base.dev;
327
	unsigned int agp_type = cache_level_to_agp_type(dev, cache_level);
328
 
3031 serge 329
    intel_gtt_insert_sg_entries(&obj->pages,
330
				    obj->gtt_space->start >> PAGE_SHIFT,
2332 Serge 331
				       agp_type);
3031 serge 332
	obj->has_global_gtt_mapping = 1;
2332 Serge 333
}
334
 
335
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
336
{
3031 serge 337
	intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
338
			      obj->base.size >> PAGE_SHIFT);
339
 
340
	obj->has_global_gtt_mapping = 0;
341
}
342
 
343
void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
344
{
2344 Serge 345
	struct drm_device *dev = obj->base.dev;
346
	struct drm_i915_private *dev_priv = dev->dev_private;
347
	bool interruptible;
348
 
349
	interruptible = do_idling(dev_priv);
350
 
3031 serge 351
//   if (!obj->has_dma_mapping)
352
//       dma_unmap_sg(&dev->pdev->dev,
353
//                obj->pages->sgl, obj->pages->nents,
354
//                PCI_DMA_BIDIRECTIONAL);
2332 Serge 355
 
3031 serge 356
	undo_idling(dev_priv, interruptible);
357
}
358
 
359
static void i915_gtt_color_adjust(struct drm_mm_node *node,
360
				  unsigned long color,
361
				  unsigned long *start,
362
				  unsigned long *end)
363
{
364
	if (node->color != color)
365
		*start += 4096;
366
 
367
	if (!list_empty(&node->node_list)) {
368
		node = list_entry(node->node_list.next,
369
				  struct drm_mm_node,
370
				  node_list);
371
		if (node->allocated && node->color != color)
372
			*end -= 4096;
2332 Serge 373
	}
3031 serge 374
}
2344 Serge 375
 
3031 serge 376
void i915_gem_init_global_gtt(struct drm_device *dev,
377
			      unsigned long start,
378
			      unsigned long mappable_end,
379
			      unsigned long end)
380
{
381
	drm_i915_private_t *dev_priv = dev->dev_private;
382
 
383
	/* Substract the guard page ... */
384
	drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE);
385
	if (!HAS_LLC(dev))
386
		dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust;
387
 
388
	dev_priv->mm.gtt_start = start;
389
	dev_priv->mm.gtt_mappable_end = mappable_end;
390
	dev_priv->mm.gtt_end = end;
391
	dev_priv->mm.gtt_total = end - start;
392
	dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
393
 
394
	/* ... but ensure that we clear the entire range. */
395
	intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);
2332 Serge 396
}