Subversion Repositories Kolibri OS

Rev

Rev 3031 | Rev 3243 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2332 Serge 1
/*
2
 * Copyright © 2010 Daniel Vetter
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * copy of this software and associated documentation files (the "Software"),
6
 * to deal in the Software without restriction, including without limitation
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 * and/or sell copies of the Software, and to permit persons to whom the
9
 * Software is furnished to do so, subject to the following conditions:
10
 *
11
 * The above copyright notice and this permission notice (including the next
12
 * paragraph) shall be included in all copies or substantial portions of the
13
 * Software.
14
 *
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21
 * IN THE SOFTWARE.
22
 *
23
 */
24
 
3031 serge 25
#include 
26
#include 
2332 Serge 27
#include "i915_drv.h"
2351 Serge 28
#include "i915_trace.h"
2332 Serge 29
#include "intel_drv.h"
30
 
31
#define AGP_USER_TYPES          (1 << 16)
32
#define AGP_USER_MEMORY         (AGP_USER_TYPES)
33
#define AGP_USER_CACHED_MEMORY  (AGP_USER_TYPES + 1)
34
 
3031 serge 35
/* PPGTT support for Sandybdrige/Gen6 and later */
36
static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
37
				   unsigned first_entry,
38
				   unsigned num_entries)
39
{
40
	uint32_t *pt_vaddr;
41
	uint32_t scratch_pte;
42
	unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
43
	unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
44
	unsigned last_pte, i;
45
 
46
	scratch_pte = GEN6_PTE_ADDR_ENCODE(ppgtt->scratch_page_dma_addr);
47
	scratch_pte |= GEN6_PTE_VALID | GEN6_PTE_CACHE_LLC;
48
 
49
    pt_vaddr = AllocKernelSpace(4096);
50
 
51
    if(pt_vaddr != NULL)
52
    {
53
        while (num_entries)
54
        {
55
            last_pte = first_pte + num_entries;
56
            if (last_pte > I915_PPGTT_PT_ENTRIES)
57
                last_pte = I915_PPGTT_PT_ENTRIES;
58
 
59
            MapPage(pt_vaddr,ppgtt->pt_pages[act_pd], 3);
60
 
61
            for (i = first_pte; i < last_pte; i++)
62
                pt_vaddr[i] = scratch_pte;
63
 
64
            num_entries -= last_pte - first_pte;
65
            first_pte = 0;
66
            act_pd++;
67
        }
68
        FreeKernelSpace(pt_vaddr);
69
    };
70
}
71
 
72
int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
73
{
74
	struct drm_i915_private *dev_priv = dev->dev_private;
75
	struct i915_hw_ppgtt *ppgtt;
76
	unsigned first_pd_entry_in_global_pt;
77
	int i;
78
	int ret = -ENOMEM;
79
 
80
	/* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
81
	 * entries. For aliasing ppgtt support we just steal them at the end for
82
	 * now. */
83
	first_pd_entry_in_global_pt = dev_priv->mm.gtt->gtt_total_entries - I915_PPGTT_PD_ENTRIES;
84
 
85
	ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
86
	if (!ppgtt)
87
		return ret;
88
 
89
	ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
90
    ppgtt->pt_pages = kzalloc(sizeof(dma_addr_t)*ppgtt->num_pd_entries,
91
				  GFP_KERNEL);
92
	if (!ppgtt->pt_pages)
93
		goto err_ppgtt;
94
 
95
	for (i = 0; i < ppgtt->num_pd_entries; i++) {
96
        ppgtt->pt_pages[i] = AllocPage();
97
		if (!ppgtt->pt_pages[i])
98
			goto err_pt_alloc;
99
	}
100
 
101
/*
102
	if (dev_priv->mm.gtt->needs_dmar) {
103
		ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t)
104
						*ppgtt->num_pd_entries,
105
					     GFP_KERNEL);
106
		if (!ppgtt->pt_dma_addr)
107
			goto err_pt_alloc;
108
 
109
		for (i = 0; i < ppgtt->num_pd_entries; i++) {
110
			dma_addr_t pt_addr;
111
 
112
			pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i],
113
					       0, 4096,
114
					       PCI_DMA_BIDIRECTIONAL);
115
 
116
			if (pci_dma_mapping_error(dev->pdev,
117
						  pt_addr)) {
118
				ret = -EIO;
119
				goto err_pd_pin;
120
 
121
			}
122
			ppgtt->pt_dma_addr[i] = pt_addr;
123
		}
124
	}
125
*/
126
	ppgtt->scratch_page_dma_addr = dev_priv->mm.gtt->scratch_page_dma;
127
 
128
	i915_ppgtt_clear_range(ppgtt, 0,
129
			       ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
130
 
131
	ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(uint32_t);
132
 
133
	dev_priv->mm.aliasing_ppgtt = ppgtt;
134
 
135
	return 0;
136
 
137
err_pd_pin:
138
//   if (ppgtt->pt_dma_addr) {
139
//       for (i--; i >= 0; i--)
140
//           pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
141
//                      4096, PCI_DMA_BIDIRECTIONAL);
142
//   }
143
err_pt_alloc:
144
//   kfree(ppgtt->pt_dma_addr);
145
	for (i = 0; i < ppgtt->num_pd_entries; i++) {
146
		if (ppgtt->pt_pages[i])
147
            FreePage(ppgtt->pt_pages[i]);
148
	}
149
	kfree(ppgtt->pt_pages);
150
err_ppgtt:
151
	kfree(ppgtt);
152
 
153
	return ret;
154
}
155
 
156
void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
157
{
158
	struct drm_i915_private *dev_priv = dev->dev_private;
159
	struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
160
	int i;
161
 
162
	if (!ppgtt)
163
		return;
164
 
165
//   if (ppgtt->pt_dma_addr) {
166
//       for (i = 0; i < ppgtt->num_pd_entries; i++)
167
//           pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
168
//                      4096, PCI_DMA_BIDIRECTIONAL);
169
//   }
170
 
171
//   kfree(ppgtt->pt_dma_addr);
172
	for (i = 0; i < ppgtt->num_pd_entries; i++)
173
        FreePage(ppgtt->pt_pages[i]);
174
	kfree(ppgtt->pt_pages);
175
	kfree(ppgtt);
176
}
177
 
178
static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
179
                     const struct pagelist *pages,
180
					 unsigned first_entry,
181
					 uint32_t pte_flags)
182
{
183
	uint32_t *pt_vaddr, pte;
184
	unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
185
	unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
186
    unsigned i, j;
187
	dma_addr_t page_addr;
188
 
189
	i = 0;
190
 
191
    pt_vaddr = AllocKernelSpace(4096);
192
 
193
    if( pt_vaddr != NULL)
194
    {
195
        while (i < pages->nents)
196
        {
197
            MapPage(pt_vaddr, ppgtt->pt_pages[act_pd], 3);
198
 
199
            for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++, i++) {
200
                page_addr = pages->page[i];
201
                pte = GEN6_PTE_ADDR_ENCODE(page_addr);
202
                pt_vaddr[j] = pte | pte_flags;
203
            }
204
 
205
            first_pte = 0;
206
            act_pd++;
207
        }
208
        FreeKernelSpace(pt_vaddr);
209
    };
210
}
211
 
212
void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
213
			    struct drm_i915_gem_object *obj,
214
			    enum i915_cache_level cache_level)
215
{
216
	uint32_t pte_flags = GEN6_PTE_VALID;
217
 
218
	switch (cache_level) {
219
	case I915_CACHE_LLC_MLC:
220
		pte_flags |= GEN6_PTE_CACHE_LLC_MLC;
221
		break;
222
	case I915_CACHE_LLC:
223
		pte_flags |= GEN6_PTE_CACHE_LLC;
224
		break;
225
	case I915_CACHE_NONE:
226
		if (IS_HASWELL(obj->base.dev))
227
			pte_flags |= HSW_PTE_UNCACHED;
228
		else
229
			pte_flags |= GEN6_PTE_UNCACHED;
230
		break;
231
	default:
232
		BUG();
233
	}
234
 
235
	i915_ppgtt_insert_sg_entries(ppgtt,
236
                     &obj->pages,
237
				     obj->gtt_space->start >> PAGE_SHIFT,
238
				     pte_flags);
239
}
240
 
241
void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
242
			      struct drm_i915_gem_object *obj)
243
{
244
	i915_ppgtt_clear_range(ppgtt,
245
			       obj->gtt_space->start >> PAGE_SHIFT,
246
			       obj->base.size >> PAGE_SHIFT);
247
}
248
 
2332 Serge 249
/* XXX kill agp_type! */
250
static unsigned int cache_level_to_agp_type(struct drm_device *dev,
251
					    enum i915_cache_level cache_level)
252
{
253
	switch (cache_level) {
254
	case I915_CACHE_LLC_MLC:
255
		if (INTEL_INFO(dev)->gen >= 6)
256
			return AGP_USER_CACHED_MEMORY_LLC_MLC;
257
		/* Older chipsets do not have this extra level of CPU
258
		 * cacheing, so fallthrough and request the PTE simply
259
		 * as cached.
260
		 */
261
	case I915_CACHE_LLC:
262
		return AGP_USER_CACHED_MEMORY;
263
	default:
264
	case I915_CACHE_NONE:
265
		return AGP_USER_MEMORY;