Subversion Repositories Kolibri OS

Rev

Rev 2360 | Rev 3033 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 2360 Rev 3031
Line 20... Line 20...
20
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
20
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21
 * IN THE SOFTWARE.
21
 * IN THE SOFTWARE.
22
 *
22
 *
23
 */
23
 */
Line 24... Line 24...
24
 
24
 
25
#include "drmP.h"
-
 
26
#include "drm.h"
25
#include 
27
#include "i915_drm.h"
26
#include 
28
#include "i915_drv.h"
27
#include "i915_drv.h"
29
#include "i915_trace.h"
28
#include "i915_trace.h"
Line 30... Line 29...
30
#include "intel_drv.h"
29
#include "intel_drv.h"
31
 
30
 
32
#define AGP_USER_TYPES          (1 << 16)
31
#define AGP_USER_TYPES          (1 << 16)
Line -... Line 32...
-
 
32
#define AGP_USER_MEMORY         (AGP_USER_TYPES)
-
 
33
#define AGP_USER_CACHED_MEMORY  (AGP_USER_TYPES + 1)
-
 
34
 
-
 
35
/* PPGTT support for Sandybdrige/Gen6 and later */
-
 
36
static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
-
 
37
				   unsigned first_entry,
-
 
38
				   unsigned num_entries)
-
 
39
{
-
 
40
	uint32_t *pt_vaddr;
-
 
41
	uint32_t scratch_pte;
-
 
42
	unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
-
 
43
	unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
-
 
44
	unsigned last_pte, i;
-
 
45
 
-
 
46
	scratch_pte = GEN6_PTE_ADDR_ENCODE(ppgtt->scratch_page_dma_addr);
-
 
47
	scratch_pte |= GEN6_PTE_VALID | GEN6_PTE_CACHE_LLC;
-
 
48
 
-
 
49
    pt_vaddr = AllocKernelSpace(4096);
-
 
50
 
-
 
51
    if(pt_vaddr != NULL)
-
 
52
    {
-
 
53
        while (num_entries)
-
 
54
        {
-
 
55
            last_pte = first_pte + num_entries;
-
 
56
            if (last_pte > I915_PPGTT_PT_ENTRIES)
-
 
57
                last_pte = I915_PPGTT_PT_ENTRIES;
-
 
58
 
-
 
59
            MapPage(pt_vaddr,ppgtt->pt_pages[act_pd], 3);
-
 
60
 
-
 
61
            for (i = first_pte; i < last_pte; i++)
-
 
62
                pt_vaddr[i] = scratch_pte;
-
 
63
 
-
 
64
            num_entries -= last_pte - first_pte;
-
 
65
            first_pte = 0;
-
 
66
            act_pd++;
-
 
67
        }
-
 
68
        FreeKernelSpace(pt_vaddr);
-
 
69
    };
-
 
70
}
-
 
71
 
-
 
72
int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
-
 
73
{
-
 
74
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
75
	struct i915_hw_ppgtt *ppgtt;
-
 
76
	unsigned first_pd_entry_in_global_pt;
-
 
77
	int i;
-
 
78
	int ret = -ENOMEM;
-
 
79
 
-
 
80
	/* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
-
 
81
	 * entries. For aliasing ppgtt support we just steal them at the end for
-
 
82
	 * now. */
-
 
83
	first_pd_entry_in_global_pt = dev_priv->mm.gtt->gtt_total_entries - I915_PPGTT_PD_ENTRIES;
-
 
84
 
-
 
85
	ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
-
 
86
	if (!ppgtt)
-
 
87
		return ret;
-
 
88
 
-
 
89
	ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
-
 
90
    ppgtt->pt_pages = kzalloc(sizeof(dma_addr_t)*ppgtt->num_pd_entries,
-
 
91
				  GFP_KERNEL);
-
 
92
	if (!ppgtt->pt_pages)
-
 
93
		goto err_ppgtt;
-
 
94
 
-
 
95
	for (i = 0; i < ppgtt->num_pd_entries; i++) {
-
 
96
        ppgtt->pt_pages[i] = AllocPage();
-
 
97
		if (!ppgtt->pt_pages[i])
-
 
98
			goto err_pt_alloc;
-
 
99
	}
-
 
100
 
-
 
101
/*
-
 
102
	if (dev_priv->mm.gtt->needs_dmar) {
-
 
103
		ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t)
-
 
104
						*ppgtt->num_pd_entries,
-
 
105
					     GFP_KERNEL);
-
 
106
		if (!ppgtt->pt_dma_addr)
-
 
107
			goto err_pt_alloc;
-
 
108
 
-
 
109
		for (i = 0; i < ppgtt->num_pd_entries; i++) {
-
 
110
			dma_addr_t pt_addr;
-
 
111
 
-
 
112
			pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i],
-
 
113
					       0, 4096,
-
 
114
					       PCI_DMA_BIDIRECTIONAL);
-
 
115
 
-
 
116
			if (pci_dma_mapping_error(dev->pdev,
-
 
117
						  pt_addr)) {
-
 
118
				ret = -EIO;
-
 
119
				goto err_pd_pin;
-
 
120
 
-
 
121
			}
-
 
122
			ppgtt->pt_dma_addr[i] = pt_addr;
-
 
123
		}
-
 
124
	}
-
 
125
*/
-
 
126
	ppgtt->scratch_page_dma_addr = dev_priv->mm.gtt->scratch_page_dma;
-
 
127
 
-
 
128
	i915_ppgtt_clear_range(ppgtt, 0,
-
 
129
			       ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
-
 
130
 
-
 
131
	ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(uint32_t);
-
 
132
 
-
 
133
	dev_priv->mm.aliasing_ppgtt = ppgtt;
-
 
134
 
-
 
135
	return 0;
-
 
136
 
-
 
137
err_pd_pin:
-
 
138
//   if (ppgtt->pt_dma_addr) {
-
 
139
//       for (i--; i >= 0; i--)
-
 
140
//           pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
-
 
141
//                      4096, PCI_DMA_BIDIRECTIONAL);
-
 
142
//   }
-
 
143
err_pt_alloc:
-
 
144
//   kfree(ppgtt->pt_dma_addr);
-
 
145
	for (i = 0; i < ppgtt->num_pd_entries; i++) {
-
 
146
		if (ppgtt->pt_pages[i])
-
 
147
            FreePage(ppgtt->pt_pages[i]);
-
 
148
	}
-
 
149
	kfree(ppgtt->pt_pages);
-
 
150
err_ppgtt:
-
 
151
	kfree(ppgtt);
-
 
152
 
-
 
153
	return ret;
-
 
154
}
-
 
155
 
-
 
156
void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
-
 
157
{
-
 
158
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
159
	struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
-
 
160
	int i;
-
 
161
 
-
 
162
	if (!ppgtt)
-
 
163
		return;
-
 
164
 
-
 
165
//   if (ppgtt->pt_dma_addr) {
-
 
166
//       for (i = 0; i < ppgtt->num_pd_entries; i++)
-
 
167
//           pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
-
 
168
//                      4096, PCI_DMA_BIDIRECTIONAL);
-
 
169
//   }
-
 
170
 
-
 
171
//   kfree(ppgtt->pt_dma_addr);
-
 
172
	for (i = 0; i < ppgtt->num_pd_entries; i++)
-
 
173
        FreePage(ppgtt->pt_pages[i]);
-
 
174
	kfree(ppgtt->pt_pages);
-
 
175
	kfree(ppgtt);
-
 
176
}
-
 
177
 
-
 
178
static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
-
 
179
                     const struct pagelist *pages,
-
 
180
					 unsigned first_entry,
-
 
181
					 uint32_t pte_flags)
-
 
182
{
-
 
183
	uint32_t *pt_vaddr, pte;
-
 
184
	unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
-
 
185
	unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
-
 
186
    unsigned i, j;
-
 
187
	dma_addr_t page_addr;
-
 
188
 
-
 
189
	i = 0;
-
 
190
 
-
 
191
    pt_vaddr = AllocKernelSpace(4096);
-
 
192
 
-
 
193
    if( pt_vaddr != NULL)
-
 
194
    {
-
 
195
        while (i < pages->nents)
-
 
196
        {
-
 
197
            MapPage(pt_vaddr, ppgtt->pt_pages[act_pd], 3);
-
 
198
 
-
 
199
            for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++, i++) {
-
 
200
                page_addr = pages->page[i];
-
 
201
                pte = GEN6_PTE_ADDR_ENCODE(page_addr);
-
 
202
                pt_vaddr[j] = pte | pte_flags;
-
 
203
            }
-
 
204
 
-
 
205
            first_pte = 0;
-
 
206
            act_pd++;
-
 
207
        }
-
 
208
        FreeKernelSpace(pt_vaddr);
-
 
209
    };
-
 
210
}
-
 
211
 
-
 
212
void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
-
 
213
			    struct drm_i915_gem_object *obj,
-
 
214
			    enum i915_cache_level cache_level)
-
 
215
{
-
 
216
	uint32_t pte_flags = GEN6_PTE_VALID;
-
 
217
 
-
 
218
	switch (cache_level) {
-
 
219
	case I915_CACHE_LLC_MLC:
-
 
220
		pte_flags |= GEN6_PTE_CACHE_LLC_MLC;
-
 
221
		break;
-
 
222
	case I915_CACHE_LLC:
-
 
223
		pte_flags |= GEN6_PTE_CACHE_LLC;
-
 
224
		break;
-
 
225
	case I915_CACHE_NONE:
-
 
226
		if (IS_HASWELL(obj->base.dev))
-
 
227
			pte_flags |= HSW_PTE_UNCACHED;
-
 
228
		else
-
 
229
			pte_flags |= GEN6_PTE_UNCACHED;
-
 
230
		break;
-
 
231
	default:
-
 
232
		BUG();
-
 
233
	}
-
 
234
 
-
 
235
	i915_ppgtt_insert_sg_entries(ppgtt,
-
 
236
                     &obj->pages,
-
 
237
				     obj->gtt_space->start >> PAGE_SHIFT,
-
 
238
				     pte_flags);
-
 
239
}
-
 
240
 
-
 
241
void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
-
 
242
			      struct drm_i915_gem_object *obj)
-
 
243
{
-
 
244
	i915_ppgtt_clear_range(ppgtt,
-
 
245
			       obj->gtt_space->start >> PAGE_SHIFT,
33
#define AGP_USER_MEMORY         (AGP_USER_TYPES)
246
			       obj->base.size >> PAGE_SHIFT);
34
#define AGP_USER_CACHED_MEMORY  (AGP_USER_TYPES + 1)
247
}
35
 
248
 
36
/* XXX kill agp_type! */
249
/* XXX kill agp_type! */
37
static unsigned int cache_level_to_agp_type(struct drm_device *dev,
250
static unsigned int cache_level_to_agp_type(struct drm_device *dev,
Line 83... Line 296...
83
 
296
 
84
	/* First fill our portion of the GTT with scratch pages */
297
	/* First fill our portion of the GTT with scratch pages */
85
	intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE,
298
	intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE,
Line 86... Line 299...
86
			      (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
299
			      (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
87
 
300
 
88
	list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
301
	list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
89
		i915_gem_clflush_object(obj);
302
		i915_gem_clflush_object(obj);
Line 90... Line 303...
90
		i915_gem_gtt_rebind_object(obj, obj->cache_level);
303
		i915_gem_gtt_bind_object(obj, obj->cache_level);
91
	}
304
	}
92
 
305
 
Line 93... Line 306...
93
	intel_gtt_chipset_flush();
306
	intel_gtt_chipset_flush();
94
}
307
}
95
#endif
308
#endif
96
 
-
 
97
int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj)
-
 
98
{
309
 
Line 99... Line -...
99
	struct drm_device *dev = obj->base.dev;
-
 
100
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
101
	unsigned int agp_type = cache_level_to_agp_type(dev, obj->cache_level);
310
int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
102
	int ret;
311
{
103
 
312
	if (obj->has_dma_mapping)
104
//   if (dev_priv->mm.gtt->needs_dmar) {
-
 
105
//       ret = intel_gtt_map_memory(obj->pages,
313
		return 0;
106
//                      obj->base.size >> PAGE_SHIFT,
-
 
107
//                      &obj->sg_list,
-
 
108
//                      &obj->num_sg);
-
 
109
//       if (ret != 0)
-
 
110
//           return ret;
-
 
111
 
-
 
112
//       intel_gtt_insert_sg_entries(obj->sg_list,
-
 
113
//                       obj->num_sg,
-
 
114
//                       obj->gtt_space->start >> PAGE_SHIFT,
-
 
115
//                       agp_type);
-
 
Line 116... Line 314...
116
//   } else
314
 
117
		intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
315
//   if (!dma_map_sg(&obj->base.dev->pdev->dev,
Line 118... Line 316...
118
				       obj->base.size >> PAGE_SHIFT,
316
//           obj->pages->sgl, obj->pages->nents,
119
				       obj->pages,
317
//           PCI_DMA_BIDIRECTIONAL))
120
				       agp_type);
318
//       return -ENOSPC;
121
 
319
 
122
	return 0;
-
 
123
}
320
	return 0;
Line 124... Line -...
124
 
-
 
125
void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj,
-
 
126
				enum i915_cache_level cache_level)
-
 
127
{
321
}
128
	struct drm_device *dev = obj->base.dev;
-
 
129
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
130
	unsigned int agp_type = cache_level_to_agp_type(dev, cache_level);
-
 
131
 
-
 
132
//   if (dev_priv->mm.gtt->needs_dmar) {
-
 
133
//       BUG_ON(!obj->sg_list);
322
 
134
 
-
 
135
//       intel_gtt_insert_sg_entries(obj->sg_list,
323
void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
-
 
324
				enum i915_cache_level cache_level)
136
//                       obj->num_sg,
325
{
Line 137... Line 326...
137
//                       obj->gtt_space->start >> PAGE_SHIFT,
326
	struct drm_device *dev = obj->base.dev;
138
//                       agp_type);
327
	unsigned int agp_type = cache_level_to_agp_type(dev, cache_level);
-
 
328
 
-
 
329
    intel_gtt_insert_sg_entries(&obj->pages,
-
 
330
				    obj->gtt_space->start >> PAGE_SHIFT,
-
 
331
				       agp_type);
-
 
332
	obj->has_global_gtt_mapping = 1;
-
 
333
}
-
 
334
 
-
 
335
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
139
//   } else
336
{
140
		intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
337
	intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
141
				       obj->base.size >> PAGE_SHIFT,
338
			      obj->base.size >> PAGE_SHIFT);
Line 142... Line 339...
142
				       obj->pages,
339
 
Line -... Line 340...
-
 
340
	obj->has_global_gtt_mapping = 0;
-
 
341
}
143
				       agp_type);
342
 
144
}
343
void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
Line 145... Line -...
145
 
-
 
146
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
-
 
147
{
344
{
148
	struct drm_device *dev = obj->base.dev;
345
	struct drm_device *dev = obj->base.dev;
Line -... Line 346...
-
 
346
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
347
	bool interruptible;
-
 
348
 
-
 
349
	interruptible = do_idling(dev_priv);
-
 
350
 
-
 
351
//   if (!obj->has_dma_mapping)
-
 
352
//       dma_unmap_sg(&dev->pdev->dev,
-
 
353
//                obj->pages->sgl, obj->pages->nents,
-
 
354
//                PCI_DMA_BIDIRECTIONAL);
-
 
355
 
-
 
356
	undo_idling(dev_priv, interruptible);
-
 
357
}
-
 
358
 
-
 
359
static void i915_gtt_color_adjust(struct drm_mm_node *node,
-
 
360
				  unsigned long color,
-
 
361
				  unsigned long *start,
-
 
362
				  unsigned long *end)
-
 
363
{
-
 
364
	if (node->color != color)
-
 
365
		*start += 4096;
-
 
366
 
-
 
367
	if (!list_empty(&node->node_list)) {
149
	struct drm_i915_private *dev_priv = dev->dev_private;
368
		node = list_entry(node->node_list.next,
-
 
369
				  struct drm_mm_node,
-
 
370
				  node_list);
-
 
371
		if (node->allocated && node->color != color)
-
 
372
			*end -= 4096;
-
 
373
	}
-
 
374
}
-
 
375
 
-
 
376
void i915_gem_init_global_gtt(struct drm_device *dev,
-
 
377
			      unsigned long start,
-
 
378
			      unsigned long mappable_end,
-
 
379
			      unsigned long end)
-
 
380
{
-
 
381
	drm_i915_private_t *dev_priv = dev->dev_private;
-
 
382
 
150
	bool interruptible;
383
	/* Substract the guard page ... */