Subversion Repositories Kolibri OS

Rev

Rev 2360 | Rev 3033 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 2360 Rev 3031
1
/*
1
/*
2
 * Copyright © 2010 Daniel Vetter
2
 * Copyright © 2010 Daniel Vetter
3
 *
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a
4
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * copy of this software and associated documentation files (the "Software"),
5
 * copy of this software and associated documentation files (the "Software"),
6
 * to deal in the Software without restriction, including without limitation
6
 * to deal in the Software without restriction, including without limitation
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 * and/or sell copies of the Software, and to permit persons to whom the
8
 * and/or sell copies of the Software, and to permit persons to whom the
9
 * Software is furnished to do so, subject to the following conditions:
9
 * Software is furnished to do so, subject to the following conditions:
10
 *
10
 *
11
 * The above copyright notice and this permission notice (including the next
11
 * The above copyright notice and this permission notice (including the next
12
 * paragraph) shall be included in all copies or substantial portions of the
12
 * paragraph) shall be included in all copies or substantial portions of the
13
 * Software.
13
 * Software.
14
 *
14
 *
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
20
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21
 * IN THE SOFTWARE.
21
 * IN THE SOFTWARE.
22
 *
22
 *
23
 */
23
 */
24
 
24
 
25
#include "drmP.h"
-
 
26
#include "drm.h"
25
#include 
27
#include "i915_drm.h"
26
#include 
28
#include "i915_drv.h"
27
#include "i915_drv.h"
29
#include "i915_trace.h"
28
#include "i915_trace.h"
30
#include "intel_drv.h"
29
#include "intel_drv.h"
31
 
30
 
32
#define AGP_USER_TYPES          (1 << 16)
31
#define AGP_USER_TYPES          (1 << 16)
33
#define AGP_USER_MEMORY         (AGP_USER_TYPES)
32
#define AGP_USER_MEMORY         (AGP_USER_TYPES)
34
#define AGP_USER_CACHED_MEMORY  (AGP_USER_TYPES + 1)
33
#define AGP_USER_CACHED_MEMORY  (AGP_USER_TYPES + 1)
-
 
34
 
-
 
35
/* PPGTT support for Sandybdrige/Gen6 and later */
-
 
36
static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
-
 
37
				   unsigned first_entry,
-
 
38
				   unsigned num_entries)
-
 
39
{
-
 
40
	uint32_t *pt_vaddr;
-
 
41
	uint32_t scratch_pte;
-
 
42
	unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
-
 
43
	unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
-
 
44
	unsigned last_pte, i;
-
 
45
 
-
 
46
	scratch_pte = GEN6_PTE_ADDR_ENCODE(ppgtt->scratch_page_dma_addr);
-
 
47
	scratch_pte |= GEN6_PTE_VALID | GEN6_PTE_CACHE_LLC;
-
 
48
 
-
 
49
    pt_vaddr = AllocKernelSpace(4096);
-
 
50
 
-
 
51
    if(pt_vaddr != NULL)
-
 
52
    {
-
 
53
        while (num_entries)
-
 
54
        {
-
 
55
            last_pte = first_pte + num_entries;
-
 
56
            if (last_pte > I915_PPGTT_PT_ENTRIES)
-
 
57
                last_pte = I915_PPGTT_PT_ENTRIES;
-
 
58
 
-
 
59
            MapPage(pt_vaddr,ppgtt->pt_pages[act_pd], 3);
-
 
60
 
-
 
61
            for (i = first_pte; i < last_pte; i++)
-
 
62
                pt_vaddr[i] = scratch_pte;
-
 
63
 
-
 
64
            num_entries -= last_pte - first_pte;
-
 
65
            first_pte = 0;
-
 
66
            act_pd++;
-
 
67
        }
-
 
68
        FreeKernelSpace(pt_vaddr);
-
 
69
    };
-
 
70
}
-
 
71
 
-
 
72
int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
-
 
73
{
-
 
74
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
75
	struct i915_hw_ppgtt *ppgtt;
-
 
76
	unsigned first_pd_entry_in_global_pt;
-
 
77
	int i;
-
 
78
	int ret = -ENOMEM;
-
 
79
 
-
 
80
	/* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
-
 
81
	 * entries. For aliasing ppgtt support we just steal them at the end for
-
 
82
	 * now. */
-
 
83
	first_pd_entry_in_global_pt = dev_priv->mm.gtt->gtt_total_entries - I915_PPGTT_PD_ENTRIES;
-
 
84
 
-
 
85
	ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
-
 
86
	if (!ppgtt)
-
 
87
		return ret;
-
 
88
 
-
 
89
	ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
-
 
90
    ppgtt->pt_pages = kzalloc(sizeof(dma_addr_t)*ppgtt->num_pd_entries,
-
 
91
				  GFP_KERNEL);
-
 
92
	if (!ppgtt->pt_pages)
-
 
93
		goto err_ppgtt;
-
 
94
 
-
 
95
	for (i = 0; i < ppgtt->num_pd_entries; i++) {
-
 
96
        ppgtt->pt_pages[i] = AllocPage();
-
 
97
		if (!ppgtt->pt_pages[i])
-
 
98
			goto err_pt_alloc;
-
 
99
	}
-
 
100
 
-
 
101
/*
-
 
102
	if (dev_priv->mm.gtt->needs_dmar) {
-
 
103
		ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t)
-
 
104
						*ppgtt->num_pd_entries,
-
 
105
					     GFP_KERNEL);
-
 
106
		if (!ppgtt->pt_dma_addr)
-
 
107
			goto err_pt_alloc;
-
 
108
 
-
 
109
		for (i = 0; i < ppgtt->num_pd_entries; i++) {
-
 
110
			dma_addr_t pt_addr;
-
 
111
 
-
 
112
			pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i],
-
 
113
					       0, 4096,
-
 
114
					       PCI_DMA_BIDIRECTIONAL);
-
 
115
 
-
 
116
			if (pci_dma_mapping_error(dev->pdev,
-
 
117
						  pt_addr)) {
-
 
118
				ret = -EIO;
-
 
119
				goto err_pd_pin;
-
 
120
 
-
 
121
			}
-
 
122
			ppgtt->pt_dma_addr[i] = pt_addr;
-
 
123
		}
-
 
124
	}
-
 
125
*/
-
 
126
	ppgtt->scratch_page_dma_addr = dev_priv->mm.gtt->scratch_page_dma;
-
 
127
 
-
 
128
	i915_ppgtt_clear_range(ppgtt, 0,
-
 
129
			       ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
-
 
130
 
-
 
131
	ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(uint32_t);
-
 
132
 
-
 
133
	dev_priv->mm.aliasing_ppgtt = ppgtt;
-
 
134
 
-
 
135
	return 0;
-
 
136
 
-
 
137
err_pd_pin:
-
 
138
//   if (ppgtt->pt_dma_addr) {
-
 
139
//       for (i--; i >= 0; i--)
-
 
140
//           pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
-
 
141
//                      4096, PCI_DMA_BIDIRECTIONAL);
-
 
142
//   }
-
 
143
err_pt_alloc:
-
 
144
//   kfree(ppgtt->pt_dma_addr);
-
 
145
	for (i = 0; i < ppgtt->num_pd_entries; i++) {
-
 
146
		if (ppgtt->pt_pages[i])
-
 
147
            FreePage(ppgtt->pt_pages[i]);
-
 
148
	}
-
 
149
	kfree(ppgtt->pt_pages);
-
 
150
err_ppgtt:
-
 
151
	kfree(ppgtt);
-
 
152
 
-
 
153
	return ret;
-
 
154
}
-
 
155
 
-
 
156
void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
-
 
157
{
-
 
158
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
159
	struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
-
 
160
	int i;
-
 
161
 
-
 
162
	if (!ppgtt)
-
 
163
		return;
-
 
164
 
-
 
165
//   if (ppgtt->pt_dma_addr) {
-
 
166
//       for (i = 0; i < ppgtt->num_pd_entries; i++)
-
 
167
//           pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
-
 
168
//                      4096, PCI_DMA_BIDIRECTIONAL);
-
 
169
//   }
-
 
170
 
-
 
171
//   kfree(ppgtt->pt_dma_addr);
-
 
172
	for (i = 0; i < ppgtt->num_pd_entries; i++)
-
 
173
        FreePage(ppgtt->pt_pages[i]);
-
 
174
	kfree(ppgtt->pt_pages);
-
 
175
	kfree(ppgtt);
-
 
176
}
-
 
177
 
-
 
178
static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
-
 
179
                     const struct pagelist *pages,
-
 
180
					 unsigned first_entry,
-
 
181
					 uint32_t pte_flags)
-
 
182
{
-
 
183
	uint32_t *pt_vaddr, pte;
-
 
184
	unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
-
 
185
	unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
-
 
186
    unsigned i, j;
-
 
187
	dma_addr_t page_addr;
-
 
188
 
-
 
189
	i = 0;
-
 
190
 
-
 
191
    pt_vaddr = AllocKernelSpace(4096);
-
 
192
 
-
 
193
    if( pt_vaddr != NULL)
-
 
194
    {
-
 
195
        while (i < pages->nents)
-
 
196
        {
-
 
197
            MapPage(pt_vaddr, ppgtt->pt_pages[act_pd], 3);
-
 
198
 
-
 
199
            for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++, i++) {
-
 
200
                page_addr = pages->page[i];
-
 
201
                pte = GEN6_PTE_ADDR_ENCODE(page_addr);
-
 
202
                pt_vaddr[j] = pte | pte_flags;
-
 
203
            }
-
 
204
 
-
 
205
            first_pte = 0;
-
 
206
            act_pd++;
-
 
207
        }
-
 
208
        FreeKernelSpace(pt_vaddr);
-
 
209
    };
-
 
210
}
-
 
211
 
-
 
212
void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
-
 
213
			    struct drm_i915_gem_object *obj,
-
 
214
			    enum i915_cache_level cache_level)
-
 
215
{
-
 
216
	uint32_t pte_flags = GEN6_PTE_VALID;
-
 
217
 
-
 
218
	switch (cache_level) {
-
 
219
	case I915_CACHE_LLC_MLC:
-
 
220
		pte_flags |= GEN6_PTE_CACHE_LLC_MLC;
-
 
221
		break;
-
 
222
	case I915_CACHE_LLC:
-
 
223
		pte_flags |= GEN6_PTE_CACHE_LLC;
-
 
224
		break;
-
 
225
	case I915_CACHE_NONE:
-
 
226
		if (IS_HASWELL(obj->base.dev))
-
 
227
			pte_flags |= HSW_PTE_UNCACHED;
-
 
228
		else
-
 
229
			pte_flags |= GEN6_PTE_UNCACHED;
-
 
230
		break;
-
 
231
	default:
-
 
232
		BUG();
-
 
233
	}
-
 
234
 
-
 
235
	i915_ppgtt_insert_sg_entries(ppgtt,
-
 
236
                     &obj->pages,
-
 
237
				     obj->gtt_space->start >> PAGE_SHIFT,
-
 
238
				     pte_flags);
-
 
239
}
-
 
240
 
-
 
241
void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
-
 
242
			      struct drm_i915_gem_object *obj)
-
 
243
{
-
 
244
	i915_ppgtt_clear_range(ppgtt,
-
 
245
			       obj->gtt_space->start >> PAGE_SHIFT,
-
 
246
			       obj->base.size >> PAGE_SHIFT);
-
 
247
}
35
 
248
 
36
/* XXX kill agp_type! */
249
/* XXX kill agp_type! */
37
static unsigned int cache_level_to_agp_type(struct drm_device *dev,
250
static unsigned int cache_level_to_agp_type(struct drm_device *dev,
38
					    enum i915_cache_level cache_level)
251
					    enum i915_cache_level cache_level)
39
{
252
{
40
	switch (cache_level) {
253
	switch (cache_level) {
41
	case I915_CACHE_LLC_MLC:
254
	case I915_CACHE_LLC_MLC:
42
		if (INTEL_INFO(dev)->gen >= 6)
255
		if (INTEL_INFO(dev)->gen >= 6)
43
			return AGP_USER_CACHED_MEMORY_LLC_MLC;
256
			return AGP_USER_CACHED_MEMORY_LLC_MLC;
44
		/* Older chipsets do not have this extra level of CPU
257
		/* Older chipsets do not have this extra level of CPU
45
		 * cacheing, so fallthrough and request the PTE simply
258
		 * cacheing, so fallthrough and request the PTE simply
46
		 * as cached.
259
		 * as cached.
47
		 */
260
		 */
48
	case I915_CACHE_LLC:
261
	case I915_CACHE_LLC:
49
		return AGP_USER_CACHED_MEMORY;
262
		return AGP_USER_CACHED_MEMORY;
50
	default:
263
	default:
51
	case I915_CACHE_NONE:
264
	case I915_CACHE_NONE:
52
		return AGP_USER_MEMORY;
265
		return AGP_USER_MEMORY;
53
	}
266
	}
54
}
267
}
55
 
268
 
56
static bool do_idling(struct drm_i915_private *dev_priv)
269
static bool do_idling(struct drm_i915_private *dev_priv)
57
{
270
{
58
	bool ret = dev_priv->mm.interruptible;
271
	bool ret = dev_priv->mm.interruptible;
59
 
272
 
60
	if (unlikely(dev_priv->mm.gtt->do_idle_maps)) {
273
	if (unlikely(dev_priv->mm.gtt->do_idle_maps)) {
61
		dev_priv->mm.interruptible = false;
274
		dev_priv->mm.interruptible = false;
62
		if (i915_gpu_idle(dev_priv->dev)) {
275
		if (i915_gpu_idle(dev_priv->dev)) {
63
			DRM_ERROR("Couldn't idle GPU\n");
276
			DRM_ERROR("Couldn't idle GPU\n");
64
			/* Wait a bit, in hopes it avoids the hang */
277
			/* Wait a bit, in hopes it avoids the hang */
65
			udelay(10);
278
			udelay(10);
66
		}
279
		}
67
	}
280
	}
68
 
281
 
69
	return ret;
282
	return ret;
70
}
283
}
71
 
284
 
72
static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
285
static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
73
{
286
{
74
	if (unlikely(dev_priv->mm.gtt->do_idle_maps))
287
	if (unlikely(dev_priv->mm.gtt->do_idle_maps))
75
		dev_priv->mm.interruptible = interruptible;
288
		dev_priv->mm.interruptible = interruptible;
76
}
289
}
77
 
290
 
78
#if 0
291
#if 0
79
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
292
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
80
{
293
{
81
	struct drm_i915_private *dev_priv = dev->dev_private;
294
	struct drm_i915_private *dev_priv = dev->dev_private;
82
	struct drm_i915_gem_object *obj;
295
	struct drm_i915_gem_object *obj;
83
 
296
 
84
	/* First fill our portion of the GTT with scratch pages */
297
	/* First fill our portion of the GTT with scratch pages */
85
	intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE,
298
	intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE,
86
			      (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
299
			      (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
87
 
300
 
88
	list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
301
	list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
89
		i915_gem_clflush_object(obj);
302
		i915_gem_clflush_object(obj);
90
		i915_gem_gtt_rebind_object(obj, obj->cache_level);
303
		i915_gem_gtt_bind_object(obj, obj->cache_level);
91
	}
304
	}
92
 
305
 
93
	intel_gtt_chipset_flush();
306
	intel_gtt_chipset_flush();
94
}
307
}
95
#endif
308
#endif
96
 
309
 
97
int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj)
310
int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
98
{
311
{
99
	struct drm_device *dev = obj->base.dev;
-
 
100
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
101
	unsigned int agp_type = cache_level_to_agp_type(dev, obj->cache_level);
312
	if (obj->has_dma_mapping)
102
	int ret;
-
 
103
 
-
 
104
//   if (dev_priv->mm.gtt->needs_dmar) {
313
		return 0;
105
//       ret = intel_gtt_map_memory(obj->pages,
314
 
106
//                      obj->base.size >> PAGE_SHIFT,
315
//   if (!dma_map_sg(&obj->base.dev->pdev->dev,
107
//                      &obj->sg_list,
-
 
108
//                      &obj->num_sg);
316
//           obj->pages->sgl, obj->pages->nents,
109
//       if (ret != 0)
-
 
110
//           return ret;
-
 
111
 
-
 
112
//       intel_gtt_insert_sg_entries(obj->sg_list,
-
 
113
//                       obj->num_sg,
-
 
114
//                       obj->gtt_space->start >> PAGE_SHIFT,
-
 
115
//                       agp_type);
-
 
116
//   } else
-
 
117
		intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
-
 
118
				       obj->base.size >> PAGE_SHIFT,
-
 
119
				       obj->pages,
317
//           PCI_DMA_BIDIRECTIONAL))
120
				       agp_type);
318
//       return -ENOSPC;
121
 
319
 
122
	return 0;
320
	return 0;
123
}
321
}
124
 
322
 
125
void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj,
323
void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
126
				enum i915_cache_level cache_level)
324
				enum i915_cache_level cache_level)
127
{
325
{
128
	struct drm_device *dev = obj->base.dev;
326
	struct drm_device *dev = obj->base.dev;
129
	struct drm_i915_private *dev_priv = dev->dev_private;
-
 
130
	unsigned int agp_type = cache_level_to_agp_type(dev, cache_level);
327
	unsigned int agp_type = cache_level_to_agp_type(dev, cache_level);
131
 
-
 
132
//   if (dev_priv->mm.gtt->needs_dmar) {
-
 
133
//       BUG_ON(!obj->sg_list);
-
 
134
 
328
 
135
//       intel_gtt_insert_sg_entries(obj->sg_list,
-
 
136
//                       obj->num_sg,
-
 
137
//                       obj->gtt_space->start >> PAGE_SHIFT,
-
 
138
//                       agp_type);
-
 
139
//   } else
-
 
140
		intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
329
    intel_gtt_insert_sg_entries(&obj->pages,
141
				       obj->base.size >> PAGE_SHIFT,
-
 
142
				       obj->pages,
330
				    obj->gtt_space->start >> PAGE_SHIFT,
-
 
331
				       agp_type);
143
				       agp_type);
332
	obj->has_global_gtt_mapping = 1;
144
}
333
}
145
 
334
 
146
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
335
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
147
{
336
{
-
 
337
	intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
-
 
338
			      obj->base.size >> PAGE_SHIFT);
-
 
339
 
-
 
340
	obj->has_global_gtt_mapping = 0;
-
 
341
}
-
 
342
 
-
 
343
void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
-
 
344
{
148
	struct drm_device *dev = obj->base.dev;
345
	struct drm_device *dev = obj->base.dev;
149
	struct drm_i915_private *dev_priv = dev->dev_private;
346
	struct drm_i915_private *dev_priv = dev->dev_private;
150
	bool interruptible;
347
	bool interruptible;
151
 
348
 
152
	interruptible = do_idling(dev_priv);
349
	interruptible = do_idling(dev_priv);
-
 
350
 
-
 
351
//   if (!obj->has_dma_mapping)
153
 
352
//       dma_unmap_sg(&dev->pdev->dev,
154
	intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
353
//                obj->pages->sgl, obj->pages->nents,
155
			      obj->base.size >> PAGE_SHIFT);
-
 
156
 
-
 
157
	if (obj->sg_list) {
354
//                PCI_DMA_BIDIRECTIONAL);
158
//       intel_gtt_unmap_memory(obj->sg_list, obj->num_sg);
355
 
-
 
356
	undo_idling(dev_priv, interruptible);
-
 
357
}
-
 
358
 
-
 
359
static void i915_gtt_color_adjust(struct drm_mm_node *node,
-
 
360
				  unsigned long color,
-
 
361
				  unsigned long *start,
-
 
362
				  unsigned long *end)
-
 
363
{
-
 
364
	if (node->color != color)
-
 
365
		*start += 4096;
-
 
366
 
-
 
367
	if (!list_empty(&node->node_list)) {
-
 
368
		node = list_entry(node->node_list.next,
-
 
369
				  struct drm_mm_node,
-
 
370
				  node_list);
-
 
371
		if (node->allocated && node->color != color)
-
 
372
			*end -= 4096;
-
 
373
	}
-
 
374
}
-
 
375
 
-
 
376
void i915_gem_init_global_gtt(struct drm_device *dev,
-
 
377
			      unsigned long start,
159
		obj->sg_list = NULL;
378
			      unsigned long mappable_end,
-
 
379
			      unsigned long end)
-
 
380
{
-
 
381
	drm_i915_private_t *dev_priv = dev->dev_private;
-
 
382
 
-
 
383
	/* Substract the guard page ... */
-
 
384
	drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE);
-
 
385
	if (!HAS_LLC(dev))
-
 
386
		dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust;
-
 
387
 
-
 
388
	dev_priv->mm.gtt_start = start;
-
 
389
	dev_priv->mm.gtt_mappable_end = mappable_end;
-
 
390
	dev_priv->mm.gtt_end = end;
-
 
391
	dev_priv->mm.gtt_total = end - start;
-
 
392
	dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
160
	}
393
 
161
 
394
	/* ... but ensure that we clear the entire range. */
162
	undo_idling(dev_priv, interruptible);
395
	intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);
163
}
396
}