Subversion Repositories Kolibri OS

Rev

Rev 6084 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
5060 serge 1
/*
2
 * Copyright © 2014 Intel Corporation
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * copy of this software and associated documentation files (the "Software"),
6
 * to deal in the Software without restriction, including without limitation
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 * and/or sell copies of the Software, and to permit persons to whom the
9
 * Software is furnished to do so, subject to the following conditions:
10
 *
11
 * The above copyright notice and this permission notice (including the next
12
 * paragraph) shall be included in all copies or substantial portions of the
13
 * Software.
14
 *
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21
 * IN THE SOFTWARE.
22
 *
23
 * Please try to maintain the following order within this file unless it makes
24
 * sense to do otherwise. From top to bottom:
25
 * 1. typedefs
26
 * 2. #defines, and macros
27
 * 3. structure definitions
28
 * 4. function prototypes
29
 *
30
 * Within each section, please try to order by generation in ascending order,
31
 * from top to bottom (ie. gen6 on the top, gen8 on the bottom).
32
 */
33
 
34
#ifndef __I915_GEM_GTT_H__
35
#define __I915_GEM_GTT_H__
36
 
5354 serge 37
struct drm_i915_file_private;
38
 
6084 serge 39
typedef uint32_t gen6_pte_t;
40
typedef uint64_t gen8_pte_t;
41
typedef uint64_t gen8_pde_t;
42
typedef uint64_t gen8_ppgtt_pdpe_t;
43
typedef uint64_t gen8_ppgtt_pml4e_t;
5060 serge 44
 
45
#define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT)
46
 
6084 serge 47
 
5060 serge 48
/* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
49
#define GEN6_GTT_ADDR_ENCODE(addr)	((addr) | (((addr) >> 28) & 0xff0))
50
#define GEN6_PTE_ADDR_ENCODE(addr)	GEN6_GTT_ADDR_ENCODE(addr)
51
#define GEN6_PDE_ADDR_ENCODE(addr)	GEN6_GTT_ADDR_ENCODE(addr)
52
#define GEN6_PTE_CACHE_LLC		(2 << 1)
53
#define GEN6_PTE_UNCACHED		(1 << 1)
54
#define GEN6_PTE_VALID			(1 << 0)
55
 
6084 serge 56
#define I915_PTES(pte_len)		(PAGE_SIZE / (pte_len))
57
#define I915_PTE_MASK(pte_len)		(I915_PTES(pte_len) - 1)
58
#define I915_PDES			512
59
#define I915_PDE_MASK			(I915_PDES - 1)
60
#define NUM_PTE(pde_shift)     (1 << (pde_shift - PAGE_SHIFT))
61
 
62
#define GEN6_PTES			I915_PTES(sizeof(gen6_pte_t))
63
#define GEN6_PD_SIZE		        (I915_PDES * PAGE_SIZE)
5060 serge 64
#define GEN6_PD_ALIGN			(PAGE_SIZE * 16)
6084 serge 65
#define GEN6_PDE_SHIFT			22
5060 serge 66
#define GEN6_PDE_VALID			(1 << 0)
67
 
68
#define GEN7_PTE_CACHE_L3_LLC		(3 << 1)
69
 
70
#define BYT_PTE_SNOOPED_BY_CPU_CACHES	(1 << 2)
71
#define BYT_PTE_WRITEABLE		(1 << 1)
72
 
73
/* Cacheability Control is a 4-bit value. The low three bits are stored in bits
74
 * 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE.
75
 */
76
#define HSW_CACHEABILITY_CONTROL(bits)	((((bits) & 0x7) << 1) | \
77
					 (((bits) & 0x8) << (11 - 3)))
78
#define HSW_WB_LLC_AGE3			HSW_CACHEABILITY_CONTROL(0x2)
79
#define HSW_WB_LLC_AGE0			HSW_CACHEABILITY_CONTROL(0x3)
80
#define HSW_WB_ELLC_LLC_AGE3		HSW_CACHEABILITY_CONTROL(0x8)
81
#define HSW_WB_ELLC_LLC_AGE0		HSW_CACHEABILITY_CONTROL(0xb)
82
#define HSW_WT_ELLC_LLC_AGE3		HSW_CACHEABILITY_CONTROL(0x7)
83
#define HSW_WT_ELLC_LLC_AGE0		HSW_CACHEABILITY_CONTROL(0x6)
84
#define HSW_PTE_UNCACHED		(0)
85
#define HSW_GTT_ADDR_ENCODE(addr)	((addr) | (((addr) >> 28) & 0x7f0))
86
#define HSW_PTE_ADDR_ENCODE(addr)	HSW_GTT_ADDR_ENCODE(addr)
87
 
88
/* GEN8 legacy style address is defined as a 3 level page table:
89
 * 31:30 | 29:21 | 20:12 |  11:0
90
 * PDPE  |  PDE  |  PTE  | offset
91
 * The difference as compared to normal x86 3 level page table is the PDPEs are
92
 * programmed via register.
6084 serge 93
 *
94
 * GEN8 48b legacy style address is defined as a 4 level page table:
95
 * 47:39 | 38:30 | 29:21 | 20:12 |  11:0
96
 * PML4E | PDPE  |  PDE  |  PTE  | offset
5060 serge 97
 */
6084 serge 98
#define GEN8_PML4ES_PER_PML4		512
99
#define GEN8_PML4E_SHIFT		39
100
#define GEN8_PML4E_MASK			(GEN8_PML4ES_PER_PML4 - 1)
5060 serge 101
#define GEN8_PDPE_SHIFT			30
6084 serge 102
/* NB: GEN8_PDPE_MASK is untrue for 32b platforms, but it has no impact on 32b page
103
 * tables */
104
#define GEN8_PDPE_MASK			0x1ff
5060 serge 105
#define GEN8_PDE_SHIFT			21
106
#define GEN8_PDE_MASK			0x1ff
107
#define GEN8_PTE_SHIFT			12
108
#define GEN8_PTE_MASK			0x1ff
6084 serge 109
#define GEN8_LEGACY_PDPES		4
110
#define GEN8_PTES			I915_PTES(sizeof(gen8_pte_t))
5060 serge 111
 
6084 serge 112
#define I915_PDPES_PER_PDP(dev) (USES_FULL_48BIT_PPGTT(dev) ?\
113
				 GEN8_PML4ES_PER_PML4 : GEN8_LEGACY_PDPES)
114
 
5060 serge 115
#define PPAT_UNCACHED_INDEX		(_PAGE_PWT | _PAGE_PCD)
116
#define PPAT_CACHED_PDE_INDEX		0 /* WB LLC */
117
#define PPAT_CACHED_INDEX		_PAGE_PAT /* WB LLCeLLC */
118
#define PPAT_DISPLAY_ELLC_INDEX		_PAGE_PCD /* WT eLLC */
119
 
120
#define CHV_PPAT_SNOOP			(1<<6)
121
#define GEN8_PPAT_AGE(x)		(x<<4)
122
#define GEN8_PPAT_LLCeLLC		(3<<2)
123
#define GEN8_PPAT_LLCELLC		(2<<2)
124
#define GEN8_PPAT_LLC			(1<<2)
125
#define GEN8_PPAT_WB			(3<<0)
126
#define GEN8_PPAT_WT			(2<<0)
127
#define GEN8_PPAT_WC			(1<<0)
128
#define GEN8_PPAT_UC			(0<<0)
129
#define GEN8_PPAT_ELLC_OVERRIDE		(0<<2)
130
#define GEN8_PPAT(i, x)			((uint64_t) (x) << ((i) * 8))
131
 
6084 serge 132
enum i915_ggtt_view_type {
133
	I915_GGTT_VIEW_NORMAL = 0,
134
	I915_GGTT_VIEW_ROTATED,
135
	I915_GGTT_VIEW_PARTIAL,
136
};
137
 
138
struct intel_rotation_info {
139
	unsigned int height;
140
	unsigned int pitch;
141
	unsigned int uv_offset;
142
	uint32_t pixel_format;
143
	uint64_t fb_modifier;
144
	unsigned int width_pages, height_pages;
145
	uint64_t size;
146
	unsigned int width_pages_uv, height_pages_uv;
147
	uint64_t size_uv;
148
	unsigned int uv_start_page;
149
};
150
 
151
struct i915_ggtt_view {
152
	enum i915_ggtt_view_type type;
153
 
154
	union {
155
		struct {
156
			u64 offset;
157
			unsigned int size;
158
		} partial;
6937 serge 159
		struct intel_rotation_info rotation_info;
6084 serge 160
	} params;
161
 
162
	struct sg_table *pages;
163
};
164
 
165
extern const struct i915_ggtt_view i915_ggtt_view_normal;
166
extern const struct i915_ggtt_view i915_ggtt_view_rotated;
167
 
5060 serge 168
enum i915_cache_level;
6084 serge 169
 
5060 serge 170
/**
171
 * A VMA represents a GEM BO that is bound into an address space. Therefore, a
172
 * VMA's presence cannot be guaranteed before binding, or after unbinding the
173
 * object into/from the address space.
174
 *
175
 * To make things as simple as possible (ie. no refcounting), a VMA's lifetime
176
 * will always be <= an objects lifetime. So object refcounting should cover us.
177
 */
178
struct i915_vma {
179
	struct drm_mm_node node;
180
	struct drm_i915_gem_object *obj;
181
	struct i915_address_space *vm;
182
 
5354 serge 183
	/** Flags and address space this VMA is bound to */
184
#define GLOBAL_BIND	(1<<0)
185
#define LOCAL_BIND	(1<<1)
186
	unsigned int bound : 4;
187
 
6084 serge 188
	/**
189
	 * Support different GGTT views into the same object.
190
	 * This means there can be multiple VMA mappings per object and per VM.
191
	 * i915_ggtt_view_type is used to distinguish between those entries.
192
	 * The default one of zero (I915_GGTT_VIEW_NORMAL) is default and also
193
	 * assumed in GEM functions which take no ggtt view parameter.
194
	 */
195
	struct i915_ggtt_view ggtt_view;
196
 
5060 serge 197
	/** This object's place on the active/inactive lists */
198
	struct list_head mm_list;
199
 
200
	struct list_head vma_link; /* Link in the object's VMA list */
201
 
202
	/** This vma's place in the batchbuffer or on the eviction list */
203
	struct list_head exec_list;
204
 
205
	/**
206
	 * Used for performing relocations during execbuffer insertion.
207
	 */
208
	struct hlist_node exec_node;
209
	unsigned long exec_handle;
210
	struct drm_i915_gem_exec_object2 *exec_entry;
211
 
212
	/**
213
	 * How many users have pinned this object in GTT space. The following
6084 serge 214
	 * users can each hold at most one reference: pwrite/pread, execbuffer
215
	 * (objects are not allowed multiple times for the same batchbuffer),
216
	 * and the framebuffer code. When switching/pageflipping, the
217
	 * framebuffer code has at most two buffers pinned per crtc.
5060 serge 218
	 *
219
	 * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
220
	 * bits with absolutely no headroom. So use 4 bits. */
221
	unsigned int pin_count:4;
222
#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
6084 serge 223
};
5060 serge 224
 
6084 serge 225
struct i915_page_dma {
226
	struct page *page;
227
	union {
228
		dma_addr_t daddr;
229
 
230
		/* For gen6/gen7 only. This is the offset in the GGTT
231
		 * where the page directory entries for PPGTT begin
232
		 */
233
		uint32_t ggtt_offset;
234
	};
5060 serge 235
};
236
 
6084 serge 237
#define px_base(px) (&(px)->base)
238
#define px_page(px) (px_base(px)->page)
239
#define px_dma(px) (px_base(px)->daddr)
240
 
241
struct i915_page_scratch {
242
	struct i915_page_dma base;
243
};
244
 
245
struct i915_page_table {
246
	struct i915_page_dma base;
247
 
248
	unsigned long *used_ptes;
249
};
250
 
251
struct i915_page_directory {
252
	struct i915_page_dma base;
253
 
254
	unsigned long *used_pdes;
255
	struct i915_page_table *page_table[I915_PDES]; /* PDEs */
256
};
257
 
258
struct i915_page_directory_pointer {
259
	struct i915_page_dma base;
260
 
261
	unsigned long *used_pdpes;
262
	struct i915_page_directory **page_directory;
263
};
264
 
265
struct i915_pml4 {
266
	struct i915_page_dma base;
267
 
268
	DECLARE_BITMAP(used_pml4es, GEN8_PML4ES_PER_PML4);
269
	struct i915_page_directory_pointer *pdps[GEN8_PML4ES_PER_PML4];
270
};
271
 
5060 serge 272
struct i915_address_space {
273
	struct drm_mm mm;
274
	struct drm_device *dev;
275
	struct list_head global_link;
6084 serge 276
	u64 start;		/* Start offset always 0 for dri2 */
277
	u64 total;		/* size addr space maps (ex. 2GB for ggtt) */
5060 serge 278
 
6084 serge 279
	struct i915_page_scratch *scratch_page;
280
	struct i915_page_table *scratch_pt;
281
	struct i915_page_directory *scratch_pd;
282
	struct i915_page_directory_pointer *scratch_pdp; /* GEN8+ & 48b PPGTT */
5060 serge 283
 
284
	/**
285
	 * List of objects currently involved in rendering.
286
	 *
287
	 * Includes buffers having the contents of their GPU caches
6084 serge 288
	 * flushed, not necessarily primitives. last_read_req
5060 serge 289
	 * represents when the rendering involved will be completed.
290
	 *
291
	 * A reference is held on the buffer while on this list.
292
	 */
293
	struct list_head active_list;
294
 
295
	/**
296
	 * LRU list of objects which are not in the ringbuffer and
297
	 * are ready to unbind, but are still in the GTT.
298
	 *
6084 serge 299
	 * last_read_req is NULL while an object is in this list.
5060 serge 300
	 *
301
	 * A reference is not held on the buffer while on this list,
302
	 * as merely being GTT-bound shouldn't prevent its being
303
	 * freed, and we'll pull it off the list in the free path.
304
	 */
305
	struct list_head inactive_list;
306
 
307
	/* FIXME: Need a more generic return type */
6084 serge 308
	gen6_pte_t (*pte_encode)(dma_addr_t addr,
309
				 enum i915_cache_level level,
310
				 bool valid, u32 flags); /* Create a valid PTE */
311
	/* flags for pte_encode */
312
#define PTE_READ_ONLY	(1<<0)
313
	int (*allocate_va_range)(struct i915_address_space *vm,
314
				 uint64_t start,
315
				 uint64_t length);
5060 serge 316
	void (*clear_range)(struct i915_address_space *vm,
317
			    uint64_t start,
318
			    uint64_t length,
319
			    bool use_scratch);
320
	void (*insert_entries)(struct i915_address_space *vm,
321
			       struct sg_table *st,
322
			       uint64_t start,
323
			       enum i915_cache_level cache_level, u32 flags);
324
	void (*cleanup)(struct i915_address_space *vm);
6084 serge 325
	/** Unmap an object from an address space. This usually consists of
326
	 * setting the valid PTE entries to a reserved scratch page. */
327
	void (*unbind_vma)(struct i915_vma *vma);
328
	/* Map an object into an address space with the given cache flags. */
329
	int (*bind_vma)(struct i915_vma *vma,
330
			enum i915_cache_level cache_level,
331
			u32 flags);
5060 serge 332
};
333
 
334
/* The Graphics Translation Table is the way in which GEN hardware translates a
335
 * Graphics Virtual Address into a Physical Address. In addition to the normal
336
 * collateral associated with any va->pa translations GEN hardware also has a
337
 * portion of the GTT which can be mapped by the CPU and remain both coherent
338
 * and correct (in cases like swizzling). That region is referred to as GMADR in
339
 * the spec.
340
 */
341
struct i915_gtt {
342
	struct i915_address_space base;
6084 serge 343
 
5060 serge 344
	size_t stolen_size;		/* Total size of stolen memory */
6084 serge 345
	size_t stolen_usable_size;	/* Total size minus BIOS reserved */
346
	u64 mappable_end;		/* End offset that we can CPU map */
5060 serge 347
	struct io_mapping *mappable;	/* Mapping to our CPU mappable region */
348
	phys_addr_t mappable_base;	/* PA of our GMADR */
349
 
350
	/** "Graphics Stolen Memory" holds the global PTEs */
351
	void __iomem *gsm;
352
 
353
	bool do_idle_maps;
354
 
355
	int mtrr;
356
 
357
	/* global gtt ops */
6084 serge 358
	int (*gtt_probe)(struct drm_device *dev, u64 *gtt_total,
5060 serge 359
			  size_t *stolen, phys_addr_t *mappable_base,
6084 serge 360
			  u64 *mappable_end);
5060 serge 361
};
362
 
363
struct i915_hw_ppgtt {
364
	struct i915_address_space base;
365
	struct kref ref;
366
	struct drm_mm_node node;
6084 serge 367
	unsigned long pd_dirty_rings;
5060 serge 368
	union {
6084 serge 369
		struct i915_pml4 pml4;		/* GEN8+ & 48b PPGTT */
370
		struct i915_page_directory_pointer pdp;	/* GEN8+ */
371
		struct i915_page_directory pd;		/* GEN6-7 */
5060 serge 372
	};
373
 
5354 serge 374
	struct drm_i915_file_private *file_priv;
5060 serge 375
 
6084 serge 376
	gen6_pte_t __iomem *pd_addr;
377
 
5060 serge 378
	int (*enable)(struct i915_hw_ppgtt *ppgtt);
379
	int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
6084 serge 380
			 struct drm_i915_gem_request *req);
381
	void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
5060 serge 382
};
383
 
6084 serge 384
/* For each pde iterates over every pde between from start until start + length.
385
 * If start, and start+length are not perfectly divisible, the macro will round
386
 * down, and up as needed. The macro modifies pde, start, and length. Dev is
387
 * only used to differentiate shift values. Temp is temp.  On gen6/7, start = 0,
388
 * and length = 2G effectively iterates over every PDE in the system.
389
 *
390
 * XXX: temp is not actually needed, but it saves doing the ALIGN operation.
391
 */
392
#define gen6_for_each_pde(pt, pd, start, length, temp, iter) \
393
	for (iter = gen6_pde_index(start); \
394
	     length > 0 && iter < I915_PDES ? \
395
			(pt = (pd)->page_table[iter]), 1 : 0; \
396
	     iter++, \
397
	     temp = ALIGN(start+1, 1 << GEN6_PDE_SHIFT) - start, \
398
	     temp = min_t(unsigned, temp, length), \
399
	     start += temp, length -= temp)
400
 
401
#define gen6_for_all_pdes(pt, ppgtt, iter)  \
402
	for (iter = 0;		\
403
	     pt = ppgtt->pd.page_table[iter], iter < I915_PDES;	\
404
	     iter++)
405
 
406
static inline uint32_t i915_pte_index(uint64_t address, uint32_t pde_shift)
407
{
408
	const uint32_t mask = NUM_PTE(pde_shift) - 1;
409
 
410
	return (address >> PAGE_SHIFT) & mask;
411
}
412
 
413
/* Helper to counts the number of PTEs within the given length. This count
414
 * does not cross a page table boundary, so the max value would be
415
 * GEN6_PTES for GEN6, and GEN8_PTES for GEN8.
416
*/
417
static inline uint32_t i915_pte_count(uint64_t addr, size_t length,
418
				      uint32_t pde_shift)
419
{
420
	const uint64_t mask = ~((1 << pde_shift) - 1);
421
	uint64_t end;
422
 
423
	WARN_ON(length == 0);
424
	WARN_ON(offset_in_page(addr|length));
425
 
426
	end = addr + length;
427
 
428
	if ((addr & mask) != (end & mask))
429
		return NUM_PTE(pde_shift) - i915_pte_index(addr, pde_shift);
430
 
431
	return i915_pte_index(end, pde_shift) - i915_pte_index(addr, pde_shift);
432
}
433
 
434
static inline uint32_t i915_pde_index(uint64_t addr, uint32_t shift)
435
{
436
	return (addr >> shift) & I915_PDE_MASK;
437
}
438
 
439
static inline uint32_t gen6_pte_index(uint32_t addr)
440
{
441
	return i915_pte_index(addr, GEN6_PDE_SHIFT);
442
}
443
 
444
static inline size_t gen6_pte_count(uint32_t addr, uint32_t length)
445
{
446
	return i915_pte_count(addr, length, GEN6_PDE_SHIFT);
447
}
448
 
449
static inline uint32_t gen6_pde_index(uint32_t addr)
450
{
451
	return i915_pde_index(addr, GEN6_PDE_SHIFT);
452
}
453
 
454
/* Equivalent to the gen6 version, For each pde iterates over every pde
455
 * between from start until start + length. On gen8+ it simply iterates
456
 * over every page directory entry in a page directory.
457
 */
6937 serge 458
#define gen8_for_each_pde(pt, pd, start, length, iter)			\
6084 serge 459
	for (iter = gen8_pde_index(start); \
6937 serge 460
	     length > 0 && iter < I915_PDES &&				\
461
		(pt = (pd)->page_table[iter], true);			\
462
	     ({ u64 temp = ALIGN(start+1, 1 << GEN8_PDE_SHIFT);		\
463
		    temp = min(temp - start, length);			\
464
		    start += temp, length -= temp; }), ++iter)
6084 serge 465
 
6937 serge 466
#define gen8_for_each_pdpe(pd, pdp, start, length, iter)		\
6084 serge 467
	for (iter = gen8_pdpe_index(start); \
6937 serge 468
	     length > 0 && iter < I915_PDPES_PER_PDP(dev) &&		\
469
		(pd = (pdp)->page_directory[iter], true);		\
470
	     ({ u64 temp = ALIGN(start+1, 1 << GEN8_PDPE_SHIFT);	\
471
		    temp = min(temp - start, length);			\
472
		    start += temp, length -= temp; }), ++iter)
6084 serge 473
 
6937 serge 474
#define gen8_for_each_pml4e(pdp, pml4, start, length, iter)		\
6084 serge 475
	for (iter = gen8_pml4e_index(start);	\
6937 serge 476
	     length > 0 && iter < GEN8_PML4ES_PER_PML4 &&		\
477
		(pdp = (pml4)->pdps[iter], true);			\
478
	     ({ u64 temp = ALIGN(start+1, 1ULL << GEN8_PML4E_SHIFT);	\
479
		    temp = min(temp - start, length);			\
480
		    start += temp, length -= temp; }), ++iter)
6084 serge 481
 
482
static inline uint32_t gen8_pte_index(uint64_t address)
483
{
484
	return i915_pte_index(address, GEN8_PDE_SHIFT);
485
}
486
 
487
static inline uint32_t gen8_pde_index(uint64_t address)
488
{
489
	return i915_pde_index(address, GEN8_PDE_SHIFT);
490
}
491
 
492
static inline uint32_t gen8_pdpe_index(uint64_t address)
493
{
494
	return (address >> GEN8_PDPE_SHIFT) & GEN8_PDPE_MASK;
495
}
496
 
497
static inline uint32_t gen8_pml4e_index(uint64_t address)
498
{
499
	return (address >> GEN8_PML4E_SHIFT) & GEN8_PML4E_MASK;
500
}
501
 
502
static inline size_t gen8_pte_count(uint64_t address, uint64_t length)
503
{
504
	return i915_pte_count(address, length, GEN8_PDE_SHIFT);
505
}
506
 
507
static inline dma_addr_t
508
i915_page_dir_dma_addr(const struct i915_hw_ppgtt *ppgtt, const unsigned n)
509
{
510
	return test_bit(n, ppgtt->pdp.used_pdpes) ?
511
		px_dma(ppgtt->pdp.page_directory[n]) :
512
		px_dma(ppgtt->base.scratch_pd);
513
}
514
 
5060 serge 515
int i915_gem_gtt_init(struct drm_device *dev);
516
void i915_gem_init_global_gtt(struct drm_device *dev);
5354 serge 517
void i915_global_gtt_cleanup(struct drm_device *dev);
5060 serge 518
 
519
 
5354 serge 520
int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt);
521
int i915_ppgtt_init_hw(struct drm_device *dev);
6084 serge 522
int i915_ppgtt_init_ring(struct drm_i915_gem_request *req);
5354 serge 523
void i915_ppgtt_release(struct kref *kref);
524
struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_device *dev,
525
					struct drm_i915_file_private *fpriv);
526
static inline void i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt)
527
{
528
	if (ppgtt)
529
		kref_get(&ppgtt->ref);
530
}
531
static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt)
532
{
533
	if (ppgtt)
534
		kref_put(&ppgtt->ref, i915_ppgtt_release);
535
}
536
 
5060 serge 537
void i915_check_and_clear_faults(struct drm_device *dev);
538
void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
539
void i915_gem_restore_gtt_mappings(struct drm_device *dev);
540
 
541
int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
542
void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
543
 
6084 serge 544
static inline bool
545
i915_ggtt_view_equal(const struct i915_ggtt_view *a,
546
                     const struct i915_ggtt_view *b)
547
{
548
	if (WARN_ON(!a || !b))
549
		return false;
550
 
551
	if (a->type != b->type)
552
		return false;
6937 serge 553
	if (a->type != I915_GGTT_VIEW_NORMAL)
6084 serge 554
		return !memcmp(&a->params, &b->params, sizeof(a->params));
555
	return true;
556
}
557
 
558
size_t
559
i915_ggtt_view_size(struct drm_i915_gem_object *obj,
560
		    const struct i915_ggtt_view *view);
561
 
5060 serge 562
#endif