Subversion Repositories Kolibri OS

Rev

Rev 5354 | Rev 6937 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 5354 Rev 6084
Line 34... Line 34...
34
#ifndef __I915_GEM_GTT_H__
34
#ifndef __I915_GEM_GTT_H__
35
#define __I915_GEM_GTT_H__
35
#define __I915_GEM_GTT_H__
Line 36... Line 36...
36
 
36
 
Line 37... Line 37...
37
struct drm_i915_file_private;
37
struct drm_i915_file_private;
38
 
38
 
-
 
39
typedef uint32_t gen6_pte_t;
39
typedef uint32_t gen6_gtt_pte_t;
40
typedef uint64_t gen8_pte_t;
-
 
41
typedef uint64_t gen8_pde_t;
Line 40... Line 42...
40
typedef uint64_t gen8_gtt_pte_t;
42
typedef uint64_t gen8_ppgtt_pdpe_t;
Line 41... Line -...
41
typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
-
 
-
 
43
typedef uint64_t gen8_ppgtt_pml4e_t;
42
 
44
 
43
#define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT)
45
#define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT)
44
 
46
 
45
#define I915_PPGTT_PT_ENTRIES		(PAGE_SIZE / sizeof(gen6_gtt_pte_t))
47
 
46
/* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
48
/* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
47
#define GEN6_GTT_ADDR_ENCODE(addr)	((addr) | (((addr) >> 28) & 0xff0))
49
#define GEN6_GTT_ADDR_ENCODE(addr)	((addr) | (((addr) >> 28) & 0xff0))
48
#define GEN6_PTE_ADDR_ENCODE(addr)	GEN6_GTT_ADDR_ENCODE(addr)
50
#define GEN6_PTE_ADDR_ENCODE(addr)	GEN6_GTT_ADDR_ENCODE(addr)
Line -... Line 51...
-
 
51
#define GEN6_PDE_ADDR_ENCODE(addr)	GEN6_GTT_ADDR_ENCODE(addr)
-
 
52
#define GEN6_PTE_CACHE_LLC		(2 << 1)
49
#define GEN6_PDE_ADDR_ENCODE(addr)	GEN6_GTT_ADDR_ENCODE(addr)
53
#define GEN6_PTE_UNCACHED		(1 << 1)
-
 
54
#define GEN6_PTE_VALID			(1 << 0)
-
 
55
 
-
 
56
#define I915_PTES(pte_len)		(PAGE_SIZE / (pte_len))
-
 
57
#define I915_PTE_MASK(pte_len)		(I915_PTES(pte_len) - 1)
50
#define GEN6_PTE_CACHE_LLC		(2 << 1)
58
#define I915_PDES			512
51
#define GEN6_PTE_UNCACHED		(1 << 1)
59
#define I915_PDE_MASK			(I915_PDES - 1)
-
 
60
#define NUM_PTE(pde_shift)     (1 << (pde_shift - PAGE_SHIFT))
52
#define GEN6_PTE_VALID			(1 << 0)
61
 
Line 53... Line 62...
53
 
62
#define GEN6_PTES			I915_PTES(sizeof(gen6_pte_t))
Line 54... Line 63...
54
#define GEN6_PPGTT_PD_ENTRIES		512
63
#define GEN6_PD_SIZE		        (I915_PDES * PAGE_SIZE)
Line 79... Line 88...
79
/* GEN8 legacy style address is defined as a 3 level page table:
88
/* GEN8 legacy style address is defined as a 3 level page table:
80
 * 31:30 | 29:21 | 20:12 |  11:0
89
 * 31:30 | 29:21 | 20:12 |  11:0
81
 * PDPE  |  PDE  |  PTE  | offset
90
 * PDPE  |  PDE  |  PTE  | offset
82
 * The difference as compared to normal x86 3 level page table is the PDPEs are
91
 * The difference as compared to normal x86 3 level page table is the PDPEs are
83
 * programmed via register.
92
 * programmed via register.
-
 
93
 *
-
 
94
 * GEN8 48b legacy style address is defined as a 4 level page table:
-
 
95
 * 47:39 | 38:30 | 29:21 | 20:12 |  11:0
-
 
96
 * PML4E | PDPE  |  PDE  |  PTE  | offset
84
 */
97
 */
-
 
98
#define GEN8_PML4ES_PER_PML4		512
-
 
99
#define GEN8_PML4E_SHIFT		39
-
 
100
#define GEN8_PML4E_MASK			(GEN8_PML4ES_PER_PML4 - 1)
85
#define GEN8_PDPE_SHIFT			30
101
#define GEN8_PDPE_SHIFT			30
-
 
102
/* NB: GEN8_PDPE_MASK is untrue for 32b platforms, but it has no impact on 32b page
-
 
103
 * tables */
86
#define GEN8_PDPE_MASK			0x3
104
#define GEN8_PDPE_MASK			0x1ff
87
#define GEN8_PDE_SHIFT			21
105
#define GEN8_PDE_SHIFT			21
88
#define GEN8_PDE_MASK			0x1ff
106
#define GEN8_PDE_MASK			0x1ff
89
#define GEN8_PTE_SHIFT			12
107
#define GEN8_PTE_SHIFT			12
90
#define GEN8_PTE_MASK			0x1ff
108
#define GEN8_PTE_MASK			0x1ff
91
#define GEN8_LEGACY_PDPS		4
109
#define GEN8_LEGACY_PDPES		4
92
#define GEN8_PTES_PER_PAGE		(PAGE_SIZE / sizeof(gen8_gtt_pte_t))
110
#define GEN8_PTES			I915_PTES(sizeof(gen8_pte_t))
-
 
111
 
93
#define GEN8_PDES_PER_PAGE		(PAGE_SIZE / sizeof(gen8_ppgtt_pde_t))
112
#define I915_PDPES_PER_PDP(dev) (USES_FULL_48BIT_PPGTT(dev) ?\
-
 
113
				 GEN8_PML4ES_PER_PML4 : GEN8_LEGACY_PDPES)
Line 94... Line 114...
94
 
114
 
95
#define PPAT_UNCACHED_INDEX		(_PAGE_PWT | _PAGE_PCD)
115
#define PPAT_UNCACHED_INDEX		(_PAGE_PWT | _PAGE_PCD)
96
#define PPAT_CACHED_PDE_INDEX		0 /* WB LLC */
116
#define PPAT_CACHED_PDE_INDEX		0 /* WB LLC */
97
#define PPAT_CACHED_INDEX		_PAGE_PAT /* WB LLCeLLC */
117
#define PPAT_CACHED_INDEX		_PAGE_PAT /* WB LLCeLLC */
Line 107... Line 127...
107
#define GEN8_PPAT_WC			(1<<0)
127
#define GEN8_PPAT_WC			(1<<0)
108
#define GEN8_PPAT_UC			(0<<0)
128
#define GEN8_PPAT_UC			(0<<0)
109
#define GEN8_PPAT_ELLC_OVERRIDE		(0<<2)
129
#define GEN8_PPAT_ELLC_OVERRIDE		(0<<2)
110
#define GEN8_PPAT(i, x)			((uint64_t) (x) << ((i) * 8))
130
#define GEN8_PPAT(i, x)			((uint64_t) (x) << ((i) * 8))
Line -... Line 131...
-
 
131
 
-
 
132
enum i915_ggtt_view_type {
-
 
133
	I915_GGTT_VIEW_NORMAL = 0,
-
 
134
	I915_GGTT_VIEW_ROTATED,
-
 
135
	I915_GGTT_VIEW_PARTIAL,
-
 
136
};
-
 
137
 
-
 
138
struct intel_rotation_info {
-
 
139
	unsigned int height;
-
 
140
	unsigned int pitch;
-
 
141
	unsigned int uv_offset;
-
 
142
	uint32_t pixel_format;
-
 
143
	uint64_t fb_modifier;
-
 
144
	unsigned int width_pages, height_pages;
-
 
145
	uint64_t size;
-
 
146
	unsigned int width_pages_uv, height_pages_uv;
-
 
147
	uint64_t size_uv;
-
 
148
	unsigned int uv_start_page;
-
 
149
};
-
 
150
 
-
 
151
struct i915_ggtt_view {
-
 
152
	enum i915_ggtt_view_type type;
-
 
153
 
-
 
154
	union {
-
 
155
		struct {
-
 
156
			u64 offset;
-
 
157
			unsigned int size;
-
 
158
		} partial;
-
 
159
	} params;
-
 
160
 
-
 
161
	struct sg_table *pages;
-
 
162
 
-
 
163
	union {
-
 
164
		struct intel_rotation_info rotation_info;
-
 
165
	};
-
 
166
};
-
 
167
 
-
 
168
extern const struct i915_ggtt_view i915_ggtt_view_normal;
-
 
169
extern const struct i915_ggtt_view i915_ggtt_view_rotated;
111
 
170
 
-
 
171
enum i915_cache_level;
112
enum i915_cache_level;
172
 
113
/**
173
/**
114
 * A VMA represents a GEM BO that is bound into an address space. Therefore, a
174
 * A VMA represents a GEM BO that is bound into an address space. Therefore, a
115
 * VMA's presence cannot be guaranteed before binding, or after unbinding the
175
 * VMA's presence cannot be guaranteed before binding, or after unbinding the
116
 * object into/from the address space.
176
 * object into/from the address space.
Line 124... Line 184...
124
	struct i915_address_space *vm;
184
	struct i915_address_space *vm;
Line 125... Line 185...
125
 
185
 
126
	/** Flags and address space this VMA is bound to */
186
	/** Flags and address space this VMA is bound to */
127
#define GLOBAL_BIND	(1<<0)
187
#define GLOBAL_BIND	(1<<0)
128
#define LOCAL_BIND	(1<<1)
-
 
129
#define PTE_READ_ONLY	(1<<2)
188
#define LOCAL_BIND	(1<<1)
Line -... Line 189...
-
 
189
	unsigned int bound : 4;
-
 
190
 
-
 
191
	/**
-
 
192
	 * Support different GGTT views into the same object.
-
 
193
	 * This means there can be multiple VMA mappings per object and per VM.
-
 
194
	 * i915_ggtt_view_type is used to distinguish between those entries.
-
 
195
	 * The default one of zero (I915_GGTT_VIEW_NORMAL) is default and also
-
 
196
	 * assumed in GEM functions which take no ggtt view parameter.
-
 
197
	 */
130
	unsigned int bound : 4;
198
	struct i915_ggtt_view ggtt_view;
131
 
199
 
Line 132... Line 200...
132
	/** This object's place on the active/inactive lists */
200
	/** This object's place on the active/inactive lists */
Line 144... Line 212...
144
	unsigned long exec_handle;
212
	unsigned long exec_handle;
145
	struct drm_i915_gem_exec_object2 *exec_entry;
213
	struct drm_i915_gem_exec_object2 *exec_entry;
Line 146... Line 214...
146
 
214
 
147
	/**
215
	/**
148
	 * How many users have pinned this object in GTT space. The following
216
	 * How many users have pinned this object in GTT space. The following
149
	 * users can each hold at most one reference: pwrite/pread, pin_ioctl
217
	 * users can each hold at most one reference: pwrite/pread, execbuffer
150
	 * (via user_pin_count), execbuffer (objects are not allowed multiple
218
	 * (objects are not allowed multiple times for the same batchbuffer),
151
	 * times for the same batchbuffer), and the framebuffer code. When
219
	 * and the framebuffer code. When switching/pageflipping, the
152
	 * switching/pageflipping, the framebuffer code has at most two buffers
-
 
153
	 * pinned per crtc.
220
	 * framebuffer code has at most two buffers pinned per crtc.
154
	 *
221
	 *
155
	 * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
222
	 * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
156
	 * bits with absolutely no headroom. So use 4 bits. */
223
	 * bits with absolutely no headroom. So use 4 bits. */
157
	unsigned int pin_count:4;
224
	unsigned int pin_count:4;
-
 
225
#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
Line -... Line 226...
-
 
226
};
-
 
227
 
-
 
228
struct i915_page_dma {
-
 
229
	struct page *page;
-
 
230
	union {
158
#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
231
		dma_addr_t daddr;
159
 
232
 
-
 
233
		/* For gen6/gen7 only. This is the offset in the GGTT
-
 
234
		 * where the page directory entries for PPGTT begin
-
 
235
		 */
-
 
236
		uint32_t ggtt_offset;
-
 
237
	};
-
 
238
};
-
 
239
 
-
 
240
#define px_base(px) (&(px)->base)
-
 
241
#define px_page(px) (px_base(px)->page)
-
 
242
#define px_dma(px) (px_base(px)->daddr)
-
 
243
 
-
 
244
struct i915_page_scratch {
-
 
245
	struct i915_page_dma base;
-
 
246
};
-
 
247
 
-
 
248
struct i915_page_table {
-
 
249
	struct i915_page_dma base;
-
 
250
 
-
 
251
	unsigned long *used_ptes;
-
 
252
};
160
	/** Unmap an object from an address space. This usually consists of
253
 
-
 
254
struct i915_page_directory {
-
 
255
	struct i915_page_dma base;
161
	 * setting the valid PTE entries to a reserved scratch page. */
256
 
-
 
257
	unsigned long *used_pdes;
-
 
258
	struct i915_page_table *page_table[I915_PDES]; /* PDEs */
-
 
259
};
162
	void (*unbind_vma)(struct i915_vma *vma);
260
 
-
 
261
struct i915_page_directory_pointer {
-
 
262
	struct i915_page_dma base;
163
	/* Map an object into an address space with the given cache flags. */
263
 
-
 
264
	unsigned long *used_pdpes;
-
 
265
	struct i915_page_directory **page_directory;
-
 
266
};
164
	void (*bind_vma)(struct i915_vma *vma,
267
 
-
 
268
struct i915_pml4 {
-
 
269
	struct i915_page_dma base;
-
 
270
 
165
			 enum i915_cache_level cache_level,
271
	DECLARE_BITMAP(used_pml4es, GEN8_PML4ES_PER_PML4);
Line 166... Line 272...
166
			 u32 flags);
272
	struct i915_page_directory_pointer *pdps[GEN8_PML4ES_PER_PML4];
167
};
273
};
168
 
274
 
169
struct i915_address_space {
275
struct i915_address_space {
170
	struct drm_mm mm;
276
	struct drm_mm mm;
171
	struct drm_device *dev;
277
	struct drm_device *dev;
Line 172... Line 278...
172
	struct list_head global_link;
278
	struct list_head global_link;
173
	unsigned long start;		/* Start offset always 0 for dri2 */
279
	u64 start;		/* Start offset always 0 for dri2 */
174
	size_t total;		/* size addr space maps (ex. 2GB for ggtt) */
280
	u64 total;		/* size addr space maps (ex. 2GB for ggtt) */
175
 
281
 
Line 176... Line 282...
176
	struct {
282
	struct i915_page_scratch *scratch_page;
177
		dma_addr_t addr;
283
	struct i915_page_table *scratch_pt;
178
		struct page *page;
284
	struct i915_page_directory *scratch_pd;
179
	} scratch;
285
	struct i915_page_directory_pointer *scratch_pdp; /* GEN8+ & 48b PPGTT */
180
 
286
 
181
	/**
287
	/**
182
	 * List of objects currently involved in rendering.
288
	 * List of objects currently involved in rendering.
183
	 *
289
	 *
184
	 * Includes buffers having the contents of their GPU caches
290
	 * Includes buffers having the contents of their GPU caches
185
	 * flushed, not necessarily primitives.  last_rendering_seqno
291
	 * flushed, not necessarily primitives. last_read_req
Line 186... Line 292...
186
	 * represents when the rendering involved will be completed.
292
	 * represents when the rendering involved will be completed.
187
	 *
293
	 *
188
	 * A reference is held on the buffer while on this list.
294
	 * A reference is held on the buffer while on this list.
189
	 */
295
	 */
190
	struct list_head active_list;
296
	struct list_head active_list;
191
 
297
 
192
	/**
298
	/**
193
	 * LRU list of objects which are not in the ringbuffer and
299
	 * LRU list of objects which are not in the ringbuffer and
194
	 * are ready to unbind, but are still in the GTT.
300
	 * are ready to unbind, but are still in the GTT.
195
	 *
301
	 *
196
	 * last_rendering_seqno is 0 while an object is in this list.
302
	 * last_read_req is NULL while an object is in this list.
Line 197... Line 303...
197
	 *
303
	 *
198
	 * A reference is not held on the buffer while on this list,
304
	 * A reference is not held on the buffer while on this list,
199
	 * as merely being GTT-bound shouldn't prevent its being
305
	 * as merely being GTT-bound shouldn't prevent its being
200
	 * freed, and we'll pull it off the list in the free path.
306
	 * freed, and we'll pull it off the list in the free path.
-
 
307
	 */
-
 
308
	struct list_head inactive_list;
-
 
309
 
-
 
310
	/* FIXME: Need a more generic return type */
-
 
311
	gen6_pte_t (*pte_encode)(dma_addr_t addr,
201
	 */
312
				 enum i915_cache_level level,
202
	struct list_head inactive_list;
313
				 bool valid, u32 flags); /* Create a valid PTE */
203
 
314
	/* flags for pte_encode */
204
	/* FIXME: Need a more generic return type */
315
#define PTE_READ_ONLY	(1<<0)
205
	gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
316
	int (*allocate_va_range)(struct i915_address_space *vm,
206
				     enum i915_cache_level level,
317
				 uint64_t start,
207
				     bool valid, u32 flags); /* Create a valid PTE */
318
				 uint64_t length);
208
	void (*clear_range)(struct i915_address_space *vm,
319
	void (*clear_range)(struct i915_address_space *vm,
209
			    uint64_t start,
320
			    uint64_t start,
-
 
321
			    uint64_t length,
-
 
322
			    bool use_scratch);
-
 
323
	void (*insert_entries)(struct i915_address_space *vm,
-
 
324
			       struct sg_table *st,
-
 
325
			       uint64_t start,
-
 
326
			       enum i915_cache_level cache_level, u32 flags);
-
 
327
	void (*cleanup)(struct i915_address_space *vm);
210
			    uint64_t length,
328
	/** Unmap an object from an address space. This usually consists of
Line 211... Line 329...
211
			    bool use_scratch);
329
	 * setting the valid PTE entries to a reserved scratch page. */
212
	void (*insert_entries)(struct i915_address_space *vm,
330
	void (*unbind_vma)(struct i915_vma *vma);
213
			       struct sg_table *st,
331
	/* Map an object into an address space with the given cache flags. */
Line 223... Line 341...
223
 * and correct (in cases like swizzling). That region is referred to as GMADR in
341
 * and correct (in cases like swizzling). That region is referred to as GMADR in
224
 * the spec.
342
 * the spec.
225
 */
343
 */
226
struct i915_gtt {
344
struct i915_gtt {
227
	struct i915_address_space base;
345
	struct i915_address_space base;
228
	size_t stolen_size;		/* Total size of stolen memory */
-
 
Line -... Line 346...
-
 
346
 
-
 
347
	size_t stolen_size;		/* Total size of stolen memory */
229
 
348
	size_t stolen_usable_size;	/* Total size minus BIOS reserved */
230
	unsigned long mappable_end;	/* End offset that we can CPU map */
349
	u64 mappable_end;		/* End offset that we can CPU map */
231
	struct io_mapping *mappable;	/* Mapping to our CPU mappable region */
350
	struct io_mapping *mappable;	/* Mapping to our CPU mappable region */
Line 232... Line 351...
232
	phys_addr_t mappable_base;	/* PA of our GMADR */
351
	phys_addr_t mappable_base;	/* PA of our GMADR */
233
 
352
 
Line 237... Line 356...
237
	bool do_idle_maps;
356
	bool do_idle_maps;
Line 238... Line 357...
238
 
357
 
Line 239... Line 358...
239
	int mtrr;
358
	int mtrr;
240
 
359
 
241
	/* global gtt ops */
360
	/* global gtt ops */
242
	int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total,
361
	int (*gtt_probe)(struct drm_device *dev, u64 *gtt_total,
243
			  size_t *stolen, phys_addr_t *mappable_base,
362
			  size_t *stolen, phys_addr_t *mappable_base,
Line 244... Line 363...
244
			  unsigned long *mappable_end);
363
			  u64 *mappable_end);
245
};
364
};
246
 
365
 
247
struct i915_hw_ppgtt {
366
struct i915_hw_ppgtt {
248
	struct i915_address_space base;
367
	struct i915_address_space base;
249
	struct kref ref;
-
 
250
	struct drm_mm_node node;
-
 
251
	unsigned num_pd_entries;
-
 
252
	unsigned num_pd_pages; /* gen8+ */
-
 
253
	union {
-
 
254
		struct page **pt_pages;
-
 
255
		struct page **gen8_pt_pages[GEN8_LEGACY_PDPS];
-
 
256
	};
-
 
257
	struct page *pd_pages;
-
 
258
	union {
-
 
259
		uint32_t pd_offset;
368
	struct kref ref;
-
 
369
	struct drm_mm_node node;
260
		dma_addr_t pd_dma_addr[GEN8_LEGACY_PDPS];
370
	unsigned long pd_dirty_rings;
261
	};
371
	union {
262
	union {
372
		struct i915_pml4 pml4;		/* GEN8+ & 48b PPGTT */
Line 263... Line 373...
263
		dma_addr_t *pt_dma_addr;
373
		struct i915_page_directory_pointer pdp;	/* GEN8+ */
Line -... Line 374...
-
 
374
		struct i915_page_directory pd;		/* GEN6-7 */
-
 
375
	};
264
		dma_addr_t *gen8_pt_dma_addr[4];
376
 
265
	};
377
	struct drm_i915_file_private *file_priv;
266
 
378
 
267
	struct drm_i915_file_private *file_priv;
379
	gen6_pte_t __iomem *pd_addr;
268
 
380
 
Line -... Line 381...
-
 
381
	int (*enable)(struct i915_hw_ppgtt *ppgtt);
-
 
382
	int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
-
 
383
			 struct drm_i915_gem_request *req);
-
 
384
	void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
-
 
385
};
-
 
386
 
-
 
387
/* For each pde iterates over every pde between from start until start + length.
-
 
388
 * If start, and start+length are not perfectly divisible, the macro will round
-
 
389
 * down, and up as needed. The macro modifies pde, start, and length. Dev is
-
 
390
 * only used to differentiate shift values. Temp is temp.  On gen6/7, start = 0,
-
 
391
 * and length = 2G effectively iterates over every PDE in the system.
-
 
392
 *
-
 
393
 * XXX: temp is not actually needed, but it saves doing the ALIGN operation.
-
 
394
 */
-
 
395
#define gen6_for_each_pde(pt, pd, start, length, temp, iter) \
-
 
396
	for (iter = gen6_pde_index(start); \
-
 
397
	     length > 0 && iter < I915_PDES ? \
-
 
398
			(pt = (pd)->page_table[iter]), 1 : 0; \
-
 
399
	     iter++, \
-
 
400
	     temp = ALIGN(start+1, 1 << GEN6_PDE_SHIFT) - start, \
-
 
401
	     temp = min_t(unsigned, temp, length), \
-
 
402
	     start += temp, length -= temp)
-
 
403
 
-
 
404
#define gen6_for_all_pdes(pt, ppgtt, iter)  \
-
 
405
	for (iter = 0;		\
-
 
406
	     pt = ppgtt->pd.page_table[iter], iter < I915_PDES;	\
-
 
407
	     iter++)
-
 
408
 
-
 
409
static inline uint32_t i915_pte_index(uint64_t address, uint32_t pde_shift)
-
 
410
{
-
 
411
	const uint32_t mask = NUM_PTE(pde_shift) - 1;
-
 
412
 
-
 
413
	return (address >> PAGE_SHIFT) & mask;
-
 
414
}
-
 
415
 
-
 
416
/* Helper to counts the number of PTEs within the given length. This count
-
 
417
 * does not cross a page table boundary, so the max value would be
-
 
418
 * GEN6_PTES for GEN6, and GEN8_PTES for GEN8.
-
 
419
*/
-
 
420
static inline uint32_t i915_pte_count(uint64_t addr, size_t length,
-
 
421
				      uint32_t pde_shift)
-
 
422
{
-
 
423
	const uint64_t mask = ~((1 << pde_shift) - 1);
-
 
424
	uint64_t end;
-
 
425
 
-
 
426
	WARN_ON(length == 0);
-
 
427
	WARN_ON(offset_in_page(addr|length));
-
 
428
 
-
 
429
	end = addr + length;
-
 
430
 
-
 
431
	if ((addr & mask) != (end & mask))
-
 
432
		return NUM_PTE(pde_shift) - i915_pte_index(addr, pde_shift);
-
 
433
 
-
 
434
	return i915_pte_index(end, pde_shift) - i915_pte_index(addr, pde_shift);
-
 
435
}
-
 
436
 
-
 
437
static inline uint32_t i915_pde_index(uint64_t addr, uint32_t shift)
-
 
438
{
-
 
439
	return (addr >> shift) & I915_PDE_MASK;
-
 
440
}
-
 
441
 
-
 
442
static inline uint32_t gen6_pte_index(uint32_t addr)
-
 
443
{
-
 
444
	return i915_pte_index(addr, GEN6_PDE_SHIFT);
-
 
445
}
-
 
446
 
-
 
447
static inline size_t gen6_pte_count(uint32_t addr, uint32_t length)
-
 
448
{
-
 
449
	return i915_pte_count(addr, length, GEN6_PDE_SHIFT);
-
 
450
}
-
 
451
 
-
 
452
static inline uint32_t gen6_pde_index(uint32_t addr)
-
 
453
{
-
 
454
	return i915_pde_index(addr, GEN6_PDE_SHIFT);
-
 
455
}
-
 
456
 
-
 
457
/* Equivalent to the gen6 version, For each pde iterates over every pde
-
 
458
 * between from start until start + length. On gen8+ it simply iterates
-
 
459
 * over every page directory entry in a page directory.
-
 
460
 */
-
 
461
#define gen8_for_each_pde(pt, pd, start, length, temp, iter)		\
-
 
462
	for (iter = gen8_pde_index(start); \
-
 
463
	     length > 0 && iter < I915_PDES ? \
-
 
464
			(pt = (pd)->page_table[iter]), 1 : 0; \
-
 
465
	     iter++,				\
-
 
466
	     temp = ALIGN(start+1, 1 << GEN8_PDE_SHIFT) - start,	\
-
 
467
	     temp = min(temp, length),					\
-
 
468
	     start += temp, length -= temp)
-
 
469
 
-
 
470
#define gen8_for_each_pdpe(pd, pdp, start, length, temp, iter)	\
-
 
471
	for (iter = gen8_pdpe_index(start); \
-
 
472
	     length > 0 && (iter < I915_PDPES_PER_PDP(dev)) ? \
-
 
473
			(pd = (pdp)->page_directory[iter]), 1 : 0; \
-
 
474
	     iter++,				\
-
 
475
	     temp = ALIGN(start+1, 1 << GEN8_PDPE_SHIFT) - start,	\
-
 
476
	     temp = min(temp, length),					\
-
 
477
	     start += temp, length -= temp)
-
 
478
 
-
 
479
#define gen8_for_each_pml4e(pdp, pml4, start, length, temp, iter)	\
-
 
480
	for (iter = gen8_pml4e_index(start);	\
-
 
481
	     length > 0 && iter < GEN8_PML4ES_PER_PML4 ? \
-
 
482
			(pdp = (pml4)->pdps[iter]), 1 : 0; \
-
 
483
	     iter++,				\
-
 
484
	     temp = ALIGN(start+1, 1ULL << GEN8_PML4E_SHIFT) - start,	\
-
 
485
	     temp = min(temp, length),					\
-
 
486
	     start += temp, length -= temp)
-
 
487
 
-
 
488
static inline uint32_t gen8_pte_index(uint64_t address)
-
 
489
{
-
 
490
	return i915_pte_index(address, GEN8_PDE_SHIFT);
-
 
491
}
-
 
492
 
-
 
493
static inline uint32_t gen8_pde_index(uint64_t address)
-
 
494
{
-
 
495
	return i915_pde_index(address, GEN8_PDE_SHIFT);
-
 
496
}
-
 
497
 
-
 
498
static inline uint32_t gen8_pdpe_index(uint64_t address)
-
 
499
{
-
 
500
	return (address >> GEN8_PDPE_SHIFT) & GEN8_PDPE_MASK;
-
 
501
}
-
 
502
 
-
 
503
static inline uint32_t gen8_pml4e_index(uint64_t address)
-
 
504
{
-
 
505
	return (address >> GEN8_PML4E_SHIFT) & GEN8_PML4E_MASK;
-
 
506
}
-
 
507
 
-
 
508
static inline size_t gen8_pte_count(uint64_t address, uint64_t length)
-
 
509
{
-
 
510
	return i915_pte_count(address, length, GEN8_PDE_SHIFT);
-
 
511
}
-
 
512
 
-
 
513
static inline dma_addr_t
-
 
514
i915_page_dir_dma_addr(const struct i915_hw_ppgtt *ppgtt, const unsigned n)
269
	int (*enable)(struct i915_hw_ppgtt *ppgtt);
515
{
270
	int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
516
	return test_bit(n, ppgtt->pdp.used_pdpes) ?
271
			 struct intel_engine_cs *ring);
517
		px_dma(ppgtt->pdp.page_directory[n]) :
Line 272... Line 518...
272
//   void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
518
		px_dma(ppgtt->base.scratch_pd);
273
};
519
}
-
 
520
 
274
 
521
int i915_gem_gtt_init(struct drm_device *dev);
275
int i915_gem_gtt_init(struct drm_device *dev);
522
void i915_gem_init_global_gtt(struct drm_device *dev);
276
void i915_gem_init_global_gtt(struct drm_device *dev);
523
void i915_global_gtt_cleanup(struct drm_device *dev);
277
void i915_global_gtt_cleanup(struct drm_device *dev);
524
 
278
 
525
 
Line 298... Line 545...
298
void i915_gem_restore_gtt_mappings(struct drm_device *dev);
545
void i915_gem_restore_gtt_mappings(struct drm_device *dev);
Line 299... Line 546...
299
 
546
 
300
int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
547
int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
Line -... Line 548...
-
 
548
void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
-
 
549
 
-
 
550
static inline bool
-
 
551
i915_ggtt_view_equal(const struct i915_ggtt_view *a,
-
 
552
                     const struct i915_ggtt_view *b)
-
 
553
{
-
 
554
	if (WARN_ON(!a || !b))
-
 
555
		return false;
-
 
556
 
-
 
557
	if (a->type != b->type)
-
 
558
		return false;
-
 
559
	if (a->type == I915_GGTT_VIEW_PARTIAL)
-
 
560
		return !memcmp(&a->params, &b->params, sizeof(a->params));
-
 
561
	return true;
-
 
562
}
-
 
563
 
-
 
564
size_t
-
 
565
i915_ggtt_view_size(struct drm_i915_gem_object *obj,
301
void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
566
		    const struct i915_ggtt_view *view);