Subversion Repositories Kolibri OS

Rev

Rev 6934 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
5270 serge 1
#ifndef _ASM_X86_PGTABLE_H
2
#define _ASM_X86_PGTABLE_H
3
 
4
#include 
5
#include 
6
 
7
#include 
8
 
9
/*
10
 * Macro to mark a page protection value as UC-
11
 */
12
#define pgprot_noncached(prot)						\
13
	((boot_cpu_data.x86 > 3)					\
14
	 ? (__pgprot(pgprot_val(prot) |					\
15
		     cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS)))	\
16
	 : (prot))
17
 
18
#ifndef __ASSEMBLY__
19
#include 
20
 
21
void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd);
6082 serge 22
void ptdump_walk_pgd_level_checkwx(void);
5270 serge 23
 
6082 serge 24
#ifdef CONFIG_DEBUG_WX
25
#define debug_checkwx() ptdump_walk_pgd_level_checkwx()
26
#else
27
#define debug_checkwx() do { } while (0)
28
#endif
29
 
5270 serge 30
/*
31
 * ZERO_PAGE is a global shared page that is always zero: used
32
 * for zero-mapped memory areas etc..
33
 */
34
extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
35
	__visible;
36
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
37
 
38
extern spinlock_t pgd_lock;
39
extern struct list_head pgd_list;
40
 
41
extern struct mm_struct *pgd_page_get_mm(struct page *page);
42
 
43
#ifdef CONFIG_PARAVIRT
44
#include 
45
#else  /* !CONFIG_PARAVIRT */
46
#define set_pte(ptep, pte)		native_set_pte(ptep, pte)
47
#define set_pte_at(mm, addr, ptep, pte)	native_set_pte_at(mm, addr, ptep, pte)
48
#define set_pmd_at(mm, addr, pmdp, pmd)	native_set_pmd_at(mm, addr, pmdp, pmd)
49
 
50
#define set_pte_atomic(ptep, pte)					\
51
	native_set_pte_atomic(ptep, pte)
52
 
53
#define set_pmd(pmdp, pmd)		native_set_pmd(pmdp, pmd)
54
 
55
#ifndef __PAGETABLE_PUD_FOLDED
56
#define set_pgd(pgdp, pgd)		native_set_pgd(pgdp, pgd)
57
#define pgd_clear(pgd)			native_pgd_clear(pgd)
58
#endif
59
 
60
#ifndef set_pud
61
# define set_pud(pudp, pud)		native_set_pud(pudp, pud)
62
#endif
63
 
64
#ifndef __PAGETABLE_PMD_FOLDED
65
#define pud_clear(pud)			native_pud_clear(pud)
66
#endif
67
 
68
#define pte_clear(mm, addr, ptep)	native_pte_clear(mm, addr, ptep)
69
#define pmd_clear(pmd)			native_pmd_clear(pmd)
70
 
71
#define pte_update(mm, addr, ptep)              do { } while (0)
72
 
73
#define pgd_val(x)	native_pgd_val(x)
74
#define __pgd(x)	native_make_pgd(x)
75
 
76
#ifndef __PAGETABLE_PUD_FOLDED
77
#define pud_val(x)	native_pud_val(x)
78
#define __pud(x)	native_make_pud(x)
79
#endif
80
 
81
#ifndef __PAGETABLE_PMD_FOLDED
82
#define pmd_val(x)	native_pmd_val(x)
83
#define __pmd(x)	native_make_pmd(x)
84
#endif
85
 
86
#define pte_val(x)	native_pte_val(x)
87
#define __pte(x)	native_make_pte(x)
88
 
89
#define arch_end_context_switch(prev)	do {} while(0)
90
 
91
#endif	/* CONFIG_PARAVIRT */
92
 
93
/*
94
 * The following only work if pte_present() is true.
95
 * Undefined behaviour if not..
96
 */
97
static inline int pte_dirty(pte_t pte)
98
{
99
	return pte_flags(pte) & _PAGE_DIRTY;
100
}
101
 
102
static inline int pte_young(pte_t pte)
103
{
104
	return pte_flags(pte) & _PAGE_ACCESSED;
105
}
106
 
107
static inline int pmd_dirty(pmd_t pmd)
108
{
109
	return pmd_flags(pmd) & _PAGE_DIRTY;
110
}
111
 
112
static inline int pmd_young(pmd_t pmd)
113
{
114
	return pmd_flags(pmd) & _PAGE_ACCESSED;
115
}
116
 
117
static inline int pte_write(pte_t pte)
118
{
119
	return pte_flags(pte) & _PAGE_RW;
120
}
121
 
122
static inline int pte_huge(pte_t pte)
123
{
124
	return pte_flags(pte) & _PAGE_PSE;
125
}
126
 
127
static inline int pte_global(pte_t pte)
128
{
129
	return pte_flags(pte) & _PAGE_GLOBAL;
130
}
131
 
132
static inline int pte_exec(pte_t pte)
133
{
134
	return !(pte_flags(pte) & _PAGE_NX);
135
}
136
 
137
static inline int pte_special(pte_t pte)
138
{
6082 serge 139
	return pte_flags(pte) & _PAGE_SPECIAL;
5270 serge 140
}
141
 
142
static inline unsigned long pte_pfn(pte_t pte)
143
{
144
	return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
145
}
146
 
147
static inline unsigned long pmd_pfn(pmd_t pmd)
148
{
6082 serge 149
	return (pmd_val(pmd) & pmd_pfn_mask(pmd)) >> PAGE_SHIFT;
5270 serge 150
}
151
 
152
static inline unsigned long pud_pfn(pud_t pud)
153
{
6082 serge 154
	return (pud_val(pud) & pud_pfn_mask(pud)) >> PAGE_SHIFT;
5270 serge 155
}
156
 
157
#define pte_page(pte)	pfn_to_page(pte_pfn(pte))
158
 
159
static inline int pmd_large(pmd_t pte)
160
{
161
	return pmd_flags(pte) & _PAGE_PSE;
162
}
163
 
164
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
165
static inline int pmd_trans_huge(pmd_t pmd)
166
{
6936 serge 167
	return (pmd_val(pmd) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
5270 serge 168
}
169
 
170
static inline int has_transparent_hugepage(void)
171
{
172
	return cpu_has_pse;
173
}
6936 serge 174
 
175
#ifdef __HAVE_ARCH_PTE_DEVMAP
176
static inline int pmd_devmap(pmd_t pmd)
177
{
178
	return !!(pmd_val(pmd) & _PAGE_DEVMAP);
179
}
180
#endif
5270 serge 181
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
182
 
183
static inline pte_t pte_set_flags(pte_t pte, pteval_t set)
184
{
185
	pteval_t v = native_pte_val(pte);
186
 
187
	return native_make_pte(v | set);
188
}
189
 
190
static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear)
191
{
192
	pteval_t v = native_pte_val(pte);
193
 
194
	return native_make_pte(v & ~clear);
195
}
196
 
197
static inline pte_t pte_mkclean(pte_t pte)
198
{
199
	return pte_clear_flags(pte, _PAGE_DIRTY);
200
}
201
 
202
static inline pte_t pte_mkold(pte_t pte)
203
{
204
	return pte_clear_flags(pte, _PAGE_ACCESSED);
205
}
206
 
207
static inline pte_t pte_wrprotect(pte_t pte)
208
{
209
	return pte_clear_flags(pte, _PAGE_RW);
210
}
211
 
212
static inline pte_t pte_mkexec(pte_t pte)
213
{
214
	return pte_clear_flags(pte, _PAGE_NX);
215
}
216
 
217
static inline pte_t pte_mkdirty(pte_t pte)
218
{
219
	return pte_set_flags(pte, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
220
}
221
 
222
static inline pte_t pte_mkyoung(pte_t pte)
223
{
224
	return pte_set_flags(pte, _PAGE_ACCESSED);
225
}
226
 
227
static inline pte_t pte_mkwrite(pte_t pte)
228
{
229
	return pte_set_flags(pte, _PAGE_RW);
230
}
231
 
232
static inline pte_t pte_mkhuge(pte_t pte)
233
{
234
	return pte_set_flags(pte, _PAGE_PSE);
235
}
236
 
237
static inline pte_t pte_clrhuge(pte_t pte)
238
{
239
	return pte_clear_flags(pte, _PAGE_PSE);
240
}
241
 
242
static inline pte_t pte_mkglobal(pte_t pte)
243
{
244
	return pte_set_flags(pte, _PAGE_GLOBAL);
245
}
246
 
247
static inline pte_t pte_clrglobal(pte_t pte)
248
{
249
	return pte_clear_flags(pte, _PAGE_GLOBAL);
250
}
251
 
252
static inline pte_t pte_mkspecial(pte_t pte)
253
{
254
	return pte_set_flags(pte, _PAGE_SPECIAL);
255
}
256
 
6936 serge 257
static inline pte_t pte_mkdevmap(pte_t pte)
258
{
259
	return pte_set_flags(pte, _PAGE_SPECIAL|_PAGE_DEVMAP);
260
}
261
 
5270 serge 262
static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
263
{
264
	pmdval_t v = native_pmd_val(pmd);
265
 
266
	return __pmd(v | set);
267
}
268
 
269
static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear)
270
{
271
	pmdval_t v = native_pmd_val(pmd);
272
 
273
	return __pmd(v & ~clear);
274
}
275
 
276
static inline pmd_t pmd_mkold(pmd_t pmd)
277
{
278
	return pmd_clear_flags(pmd, _PAGE_ACCESSED);
279
}
280
 
6936 serge 281
static inline pmd_t pmd_mkclean(pmd_t pmd)
282
{
283
	return pmd_clear_flags(pmd, _PAGE_DIRTY);
284
}
285
 
5270 serge 286
static inline pmd_t pmd_wrprotect(pmd_t pmd)
287
{
288
	return pmd_clear_flags(pmd, _PAGE_RW);
289
}
290
 
291
static inline pmd_t pmd_mkdirty(pmd_t pmd)
292
{
293
	return pmd_set_flags(pmd, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
294
}
295
 
6936 serge 296
static inline pmd_t pmd_mkdevmap(pmd_t pmd)
297
{
298
	return pmd_set_flags(pmd, _PAGE_DEVMAP);
299
}
300
 
5270 serge 301
static inline pmd_t pmd_mkhuge(pmd_t pmd)
302
{
303
	return pmd_set_flags(pmd, _PAGE_PSE);
304
}
305
 
306
static inline pmd_t pmd_mkyoung(pmd_t pmd)
307
{
308
	return pmd_set_flags(pmd, _PAGE_ACCESSED);
309
}
310
 
311
static inline pmd_t pmd_mkwrite(pmd_t pmd)
312
{
313
	return pmd_set_flags(pmd, _PAGE_RW);
314
}
315
 
316
static inline pmd_t pmd_mknotpresent(pmd_t pmd)
317
{
6082 serge 318
	return pmd_clear_flags(pmd, _PAGE_PRESENT | _PAGE_PROTNONE);
5270 serge 319
}
320
 
321
#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
322
static inline int pte_soft_dirty(pte_t pte)
323
{
324
	return pte_flags(pte) & _PAGE_SOFT_DIRTY;
325
}
326
 
327
static inline int pmd_soft_dirty(pmd_t pmd)
328
{
329
	return pmd_flags(pmd) & _PAGE_SOFT_DIRTY;
330
}
331
 
332
static inline pte_t pte_mksoft_dirty(pte_t pte)
333
{
334
	return pte_set_flags(pte, _PAGE_SOFT_DIRTY);
335
}
336
 
337
static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
338
{
339
	return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY);
340
}
341
 
6082 serge 342
static inline pte_t pte_clear_soft_dirty(pte_t pte)
5270 serge 343
{
344
	return pte_clear_flags(pte, _PAGE_SOFT_DIRTY);
345
}
346
 
6082 serge 347
static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
5270 serge 348
{
6082 serge 349
	return pmd_clear_flags(pmd, _PAGE_SOFT_DIRTY);
5270 serge 350
}
351
 
352
#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
353
 
354
/*
355
 * Mask out unsupported bits in a present pgprot.  Non-present pgprots
356
 * can use those bits for other purposes, so leave them be.
357
 */
358
static inline pgprotval_t massage_pgprot(pgprot_t pgprot)
359
{
360
	pgprotval_t protval = pgprot_val(pgprot);
361
 
362
	if (protval & _PAGE_PRESENT)
363
		protval &= __supported_pte_mask;
364
 
365
	return protval;
366
}
367
 
368
static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
369
{
370
	return __pte(((phys_addr_t)page_nr << PAGE_SHIFT) |
371
		     massage_pgprot(pgprot));
372
}
373
 
374
static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
375
{
376
	return __pmd(((phys_addr_t)page_nr << PAGE_SHIFT) |
377
		     massage_pgprot(pgprot));
378
}
379
 
380
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
381
{
382
	pteval_t val = pte_val(pte);
383
 
384
	/*
385
	 * Chop off the NX bit (if present), and add the NX portion of
386
	 * the newprot (if present):
387
	 */
388
	val &= _PAGE_CHG_MASK;
389
	val |= massage_pgprot(newprot) & ~_PAGE_CHG_MASK;
390
 
391
	return __pte(val);
392
}
393
 
394
static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
395
{
396
	pmdval_t val = pmd_val(pmd);
397
 
398
	val &= _HPAGE_CHG_MASK;
399
	val |= massage_pgprot(newprot) & ~_HPAGE_CHG_MASK;
400
 
401
	return __pmd(val);
402
}
403
 
404
/* mprotect needs to preserve PAT bits when updating vm_page_prot */
405
#define pgprot_modify pgprot_modify
406
static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
407
{
408
	pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK;
409
	pgprotval_t addbits = pgprot_val(newprot);
410
	return __pgprot(preservebits | addbits);
411
}
412
 
6082 serge 413
#define pte_pgprot(x) __pgprot(pte_flags(x))
414
#define pmd_pgprot(x) __pgprot(pmd_flags(x))
415
#define pud_pgprot(x) __pgprot(pud_flags(x))
5270 serge 416
 
417
#define canon_pgprot(p) __pgprot(massage_pgprot(p))
418
 
419
static inline int is_new_memtype_allowed(u64 paddr, unsigned long size,
420
					 enum page_cache_mode pcm,
421
					 enum page_cache_mode new_pcm)
422
{
423
	/*
424
	 * PAT type is always WB for untracked ranges, so no need to check.
425
	 */
426
	if (x86_platform.is_untracked_pat_range(paddr, paddr + size))
427
		return 1;
428
 
429
	/*
430
	 * Certain new memtypes are not allowed with certain
431
	 * requested memtype:
432
	 * - request is uncached, return cannot be write-back
433
	 * - request is write-combine, return cannot be write-back
6082 serge 434
	 * - request is write-through, return cannot be write-back
435
	 * - request is write-through, return cannot be write-combine
5270 serge 436
	 */
437
	if ((pcm == _PAGE_CACHE_MODE_UC_MINUS &&
438
	     new_pcm == _PAGE_CACHE_MODE_WB) ||
439
	    (pcm == _PAGE_CACHE_MODE_WC &&
6082 serge 440
	     new_pcm == _PAGE_CACHE_MODE_WB) ||
441
	    (pcm == _PAGE_CACHE_MODE_WT &&
442
	     new_pcm == _PAGE_CACHE_MODE_WB) ||
443
	    (pcm == _PAGE_CACHE_MODE_WT &&
444
	     new_pcm == _PAGE_CACHE_MODE_WC)) {
5270 serge 445
		return 0;
446
	}
447
 
448
	return 1;
449
}
450
 
451
pmd_t *populate_extra_pmd(unsigned long vaddr);
452
pte_t *populate_extra_pte(unsigned long vaddr);
453
#endif	/* __ASSEMBLY__ */
454
 
455
#ifdef CONFIG_X86_32
456
# include 
457
#else
458
# include 
459
#endif
460
 
461
#ifndef __ASSEMBLY__
462
//#include 
463
#include 
464
#include 
465
 
466
static inline int pte_none(pte_t pte)
467
{
468
	return !pte.pte;
469
}
470
 
471
#define __HAVE_ARCH_PTE_SAME
472
static inline int pte_same(pte_t a, pte_t b)
473
{
474
	return a.pte == b.pte;
475
}
476
 
477
static inline int pte_present(pte_t a)
478
{
479
	return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
480
}
481
 
6936 serge 482
#ifdef __HAVE_ARCH_PTE_DEVMAP
483
static inline int pte_devmap(pte_t a)
484
{
485
	return (pte_flags(a) & _PAGE_DEVMAP) == _PAGE_DEVMAP;
486
}
487
#endif
488
 
5270 serge 489
#define pte_accessible pte_accessible
490
static inline bool pte_accessible(struct mm_struct *mm, pte_t a)
491
{
492
	if (pte_flags(a) & _PAGE_PRESENT)
493
		return true;
494
 
6082 serge 495
	if ((pte_flags(a) & _PAGE_PROTNONE) &&
5270 serge 496
			mm_tlb_flush_pending(mm))
497
		return true;
498
 
499
	return false;
500
}
501
 
502
static inline int pte_hidden(pte_t pte)
503
{
504
	return pte_flags(pte) & _PAGE_HIDDEN;
505
}
506
 
507
static inline int pmd_present(pmd_t pmd)
508
{
509
	/*
510
	 * Checking for _PAGE_PSE is needed too because
511
	 * split_huge_page will temporarily clear the present bit (but
512
	 * the _PAGE_PSE flag will remain set at all times while the
513
	 * _PAGE_PRESENT bit is clear).
514
	 */
6082 serge 515
	return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE);
5270 serge 516
}
517
 
6082 serge 518
#ifdef CONFIG_NUMA_BALANCING
519
/*
520
 * These work without NUMA balancing but the kernel does not care. See the
521
 * comment in include/asm-generic/pgtable.h
522
 */
523
static inline int pte_protnone(pte_t pte)
524
{
525
	return (pte_flags(pte) & (_PAGE_PROTNONE | _PAGE_PRESENT))
526
		== _PAGE_PROTNONE;
527
}
528
 
529
static inline int pmd_protnone(pmd_t pmd)
530
{
531
	return (pmd_flags(pmd) & (_PAGE_PROTNONE | _PAGE_PRESENT))
532
		== _PAGE_PROTNONE;
533
}
534
#endif /* CONFIG_NUMA_BALANCING */
535
 
5270 serge 536
static inline int pmd_none(pmd_t pmd)
537
{
538
	/* Only check low word on 32-bit platforms, since it might be
539
	   out of sync with upper half. */
540
	return (unsigned long)native_pmd_val(pmd) == 0;
541
}
542
 
543
static inline unsigned long pmd_page_vaddr(pmd_t pmd)
544
{
6082 serge 545
	return (unsigned long)__va(pmd_val(pmd) & pmd_pfn_mask(pmd));
5270 serge 546
}
547
 
548
/*
549
 * Currently stuck as a macro due to indirect forward reference to
550
 * linux/mmzone.h's __section_mem_map_addr() definition:
551
 */
6082 serge 552
#define pmd_page(pmd)		\
553
	pfn_to_page((pmd_val(pmd) & pmd_pfn_mask(pmd)) >> PAGE_SHIFT)
5270 serge 554
 
555
/*
556
 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
557
 *
558
 * this macro returns the index of the entry in the pmd page which would
559
 * control the given virtual address
560
 */
561
static inline unsigned long pmd_index(unsigned long address)
562
{
563
	return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
564
}
565
 
566
/*
567
 * Conversion functions: convert a page and protection to a page entry,
568
 * and a page entry and page directory to the page they refer to.
569
 *
570
 * (Currently stuck as a macro because of indirect forward reference
571
 * to linux/mm.h:page_to_nid())
572
 */
573
#define mk_pte(page, pgprot)   pfn_pte(page_to_pfn(page), (pgprot))
574
 
575
/*
576
 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
577
 *
578
 * this function returns the index of the entry in the pte page which would
579
 * control the given virtual address
580
 */
581
static inline unsigned long pte_index(unsigned long address)
582
{
583
	return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
584
}
585
 
586
static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
587
{
588
	return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
589
}
590
 
591
static inline int pmd_bad(pmd_t pmd)
592
{
593
	return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE;
594
}
595
 
596
static inline unsigned long pages_to_mb(unsigned long npg)
597
{
598
	return npg >> (20 - PAGE_SHIFT);
599
}
600
 
6082 serge 601
#if CONFIG_PGTABLE_LEVELS > 2
5270 serge 602
static inline int pud_none(pud_t pud)
603
{
604
	return native_pud_val(pud) == 0;
605
}
606
 
607
static inline int pud_present(pud_t pud)
608
{
609
	return pud_flags(pud) & _PAGE_PRESENT;
610
}
611
 
612
static inline unsigned long pud_page_vaddr(pud_t pud)
613
{
6082 serge 614
	return (unsigned long)__va(pud_val(pud) & pud_pfn_mask(pud));
5270 serge 615
}
616
 
617
/*
618
 * Currently stuck as a macro due to indirect forward reference to
619
 * linux/mmzone.h's __section_mem_map_addr() definition:
620
 */
6082 serge 621
#define pud_page(pud)		\
622
	pfn_to_page((pud_val(pud) & pud_pfn_mask(pud)) >> PAGE_SHIFT)
5270 serge 623
 
624
/* Find an entry in the second-level page table.. */
625
static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
626
{
627
	return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
628
}
629
 
630
static inline int pud_large(pud_t pud)
631
{
632
	return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
633
		(_PAGE_PSE | _PAGE_PRESENT);
634
}
635
 
636
static inline int pud_bad(pud_t pud)
637
{
638
	return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
639
}
640
#else
641
static inline int pud_large(pud_t pud)
642
{
643
	return 0;
644
}
6082 serge 645
#endif	/* CONFIG_PGTABLE_LEVELS > 2 */
5270 serge 646
 
6082 serge 647
#if CONFIG_PGTABLE_LEVELS > 3
5270 serge 648
static inline int pgd_present(pgd_t pgd)
649
{
650
	return pgd_flags(pgd) & _PAGE_PRESENT;
651
}
652
 
653
static inline unsigned long pgd_page_vaddr(pgd_t pgd)
654
{
655
	return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK);
656
}
657
 
658
/*
659
 * Currently stuck as a macro due to indirect forward reference to
660
 * linux/mmzone.h's __section_mem_map_addr() definition:
661
 */
662
#define pgd_page(pgd)		pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
663
 
664
/* to find an entry in a page-table-directory. */
665
static inline unsigned long pud_index(unsigned long address)
666
{
667
	return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
668
}
669
 
670
static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
671
{
672
	return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(address);
673
}
674
 
675
static inline int pgd_bad(pgd_t pgd)
676
{
677
	return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
678
}
679
 
680
static inline int pgd_none(pgd_t pgd)
681
{
682
	return !native_pgd_val(pgd);
683
}
6082 serge 684
#endif	/* CONFIG_PGTABLE_LEVELS > 3 */
5270 serge 685
 
686
#endif	/* __ASSEMBLY__ */
687
 
688
/*
689
 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
690
 *
691
 * this macro returns the index of the entry in the pgd page which would
692
 * control the given virtual address
693
 */
694
#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
695
 
696
/*
697
 * pgd_offset() returns a (pgd_t *)
698
 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
699
 */
700
#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
701
/*
702
 * a shortcut which implies the use of the kernel's pgd, instead
703
 * of a process's
704
 */
705
#define pgd_offset_k(address) pgd_offset(&init_mm, (address))
706
 
707
 
708
#define KERNEL_PGD_BOUNDARY	pgd_index(PAGE_OFFSET)
709
#define KERNEL_PGD_PTRS		(PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
710
 
711
#ifndef __ASSEMBLY__
712
 
713
extern int direct_gbpages;
714
void init_mem_mapping(void);
715
void early_alloc_pgt_buf(void);
716
 
717
/* local pte updates need not use xchg for locking */
718
static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
719
{
720
	pte_t res = *ptep;
721
 
722
	/* Pure native function needs no input for mm, addr */
723
	native_pte_clear(NULL, 0, ptep);
724
	return res;
725
}
726
 
727
static inline pmd_t native_local_pmdp_get_and_clear(pmd_t *pmdp)
728
{
729
	pmd_t res = *pmdp;
730
 
731
	native_pmd_clear(pmdp);
732
	return res;
733
}
734
 
735
static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
736
				     pte_t *ptep , pte_t pte)
737
{
738
	native_set_pte(ptep, pte);
739
}
740
 
741
static inline void native_set_pmd_at(struct mm_struct *mm, unsigned long addr,
742
				     pmd_t *pmdp , pmd_t pmd)
743
{
744
	native_set_pmd(pmdp, pmd);
745
}
746
 
747
#ifndef CONFIG_PARAVIRT
748
/*
749
 * Rules for using pte_update - it must be called after any PTE update which
750
 * has not been done using the set_pte / clear_pte interfaces.  It is used by
751
 * shadow mode hypervisors to resynchronize the shadow page tables.  Kernel PTE
752
 * updates should either be sets, clears, or set_pte_atomic for P->P
753
 * transitions, which means this hook should only be called for user PTEs.
754
 * This hook implies a P->P protection or access change has taken place, which
6936 serge 755
 * requires a subsequent TLB flush.
5270 serge 756
 */
757
#define pte_update(mm, addr, ptep)		do { } while (0)
758
#endif
759
 
760
/*
761
 * We only update the dirty/accessed state if we set
762
 * the dirty bit by hand in the kernel, since the hardware
763
 * will do the accessed bit for us, and we don't want to
764
 * race with other CPU's that might be updating the dirty
765
 * bit at the same time.
766
 */
767
struct vm_area_struct;
768
 
769
#define  __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
770
extern int ptep_set_access_flags(struct vm_area_struct *vma,
771
				 unsigned long address, pte_t *ptep,
772
				 pte_t entry, int dirty);
773
 
774
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
775
extern int ptep_test_and_clear_young(struct vm_area_struct *vma,
776
				     unsigned long addr, pte_t *ptep);
777
 
778
#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
779
extern int ptep_clear_flush_young(struct vm_area_struct *vma,
780
				  unsigned long address, pte_t *ptep);
781
 
782
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
783
static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
784
				       pte_t *ptep)
785
{
786
	pte_t pte = native_ptep_get_and_clear(ptep);
787
	pte_update(mm, addr, ptep);
788
	return pte;
789
}
790
 
791
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
792
static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
793
					    unsigned long addr, pte_t *ptep,
794
					    int full)
795
{
796
	pte_t pte;
797
	if (full) {
798
		/*
799
		 * Full address destruction in progress; paravirt does not
800
		 * care about updates and native needs no locking
801
		 */
802
		pte = native_local_ptep_get_and_clear(ptep);
803
	} else {
804
		pte = ptep_get_and_clear(mm, addr, ptep);
805
	}
806
	return pte;
807
}
808
 
809
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
810
static inline void ptep_set_wrprotect(struct mm_struct *mm,
811
				      unsigned long addr, pte_t *ptep)
812
{
813
	clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
814
	pte_update(mm, addr, ptep);
815
}
816
 
817
#define flush_tlb_fix_spurious_fault(vma, address) do { } while (0)
818
 
819
#define mk_pmd(page, pgprot)   pfn_pmd(page_to_pfn(page), (pgprot))
820
 
821
#define  __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
822
extern int pmdp_set_access_flags(struct vm_area_struct *vma,
823
				 unsigned long address, pmd_t *pmdp,
824
				 pmd_t entry, int dirty);
825
 
826
#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
827
extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
828
				     unsigned long addr, pmd_t *pmdp);
829
 
830
#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
831
extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
832
				  unsigned long address, pmd_t *pmdp);
833
 
834
 
835
#define __HAVE_ARCH_PMD_WRITE
836
static inline int pmd_write(pmd_t pmd)
837
{
838
	return pmd_flags(pmd) & _PAGE_RW;
839
}
840
 
6082 serge 841
#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
842
static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long addr,
5270 serge 843
				       pmd_t *pmdp)
844
{
6936 serge 845
	return native_pmdp_get_and_clear(pmdp);
5270 serge 846
}
847
 
848
#define __HAVE_ARCH_PMDP_SET_WRPROTECT
849
static inline void pmdp_set_wrprotect(struct mm_struct *mm,
850
				      unsigned long addr, pmd_t *pmdp)
851
{
852
	clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp);
853
}
854
 
855
/*
856
 * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
857
 *
858
 *  dst - pointer to pgd range anwhere on a pgd page
859
 *  src - ""
860
 *  count - the number of pgds to copy.
861
 *
862
 * dst and src can be on the same page, but the range must not overlap,
863
 * and must not cross a page boundary.
864
 */
865
static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
866
{
867
       memcpy(dst, src, count * sizeof(pgd_t));
868
}
869
 
870
#define PTE_SHIFT ilog2(PTRS_PER_PTE)
871
static inline int page_level_shift(enum pg_level level)
872
{
873
	return (PAGE_SHIFT - PTE_SHIFT) + level * PTE_SHIFT;
874
}
875
static inline unsigned long page_level_size(enum pg_level level)
876
{
877
	return 1UL << page_level_shift(level);
878
}
879
static inline unsigned long page_level_mask(enum pg_level level)
880
{
881
	return ~(page_level_size(level) - 1);
882
}
883
 
884
/*
885
 * The x86 doesn't have any external MMU info: the kernel page
886
 * tables contain all the necessary information.
887
 */
888
static inline void update_mmu_cache(struct vm_area_struct *vma,
889
		unsigned long addr, pte_t *ptep)
890
{
891
}
892
static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
893
		unsigned long addr, pmd_t *pmd)
894
{
895
}
896
 
897
#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
898
static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
899
{
900
	return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY);
901
}
902
 
903
static inline int pte_swp_soft_dirty(pte_t pte)
904
{
905
	return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY;
906
}
907
 
908
static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
909
{
910
	return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY);
911
}
912
#endif
913
 
914
//#include 
915
#endif	/* __ASSEMBLY__ */
916
 
917
#endif /* _ASM_X86_PGTABLE_H */