Subversion Repositories Kolibri OS

Rev

Rev 6936 | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. #ifndef _ASM_X86_PGTABLE_H
  2. #define _ASM_X86_PGTABLE_H
  3.  
  4. #include <asm/page.h>
  5. #include <asm/e820.h>
  6.  
  7. #include <asm/pgtable_types.h>
  8.  
  9. /*
  10.  * Macro to mark a page protection value as UC-
  11.  */
  12. #define pgprot_noncached(prot)                                          \
  13.         ((boot_cpu_data.x86 > 3)                                        \
  14.          ? (__pgprot(pgprot_val(prot) |                                 \
  15.                      cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS)))     \
  16.          : (prot))
  17.  
  18. #ifndef __ASSEMBLY__
  19. #include <asm/x86_init.h>
  20.  
  21. void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd);
  22. void ptdump_walk_pgd_level_checkwx(void);
  23.  
  24. #ifdef CONFIG_DEBUG_WX
  25. #define debug_checkwx() ptdump_walk_pgd_level_checkwx()
  26. #else
  27. #define debug_checkwx() do { } while (0)
  28. #endif
  29.  
  30. /*
  31.  * ZERO_PAGE is a global shared page that is always zero: used
  32.  * for zero-mapped memory areas etc..
  33.  */
  34. extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
  35.         __visible;
  36. #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
  37.  
  38. extern spinlock_t pgd_lock;
  39. extern struct list_head pgd_list;
  40.  
  41. extern struct mm_struct *pgd_page_get_mm(struct page *page);
  42.  
  43. #ifdef CONFIG_PARAVIRT
  44. #include <asm/paravirt.h>
  45. #else  /* !CONFIG_PARAVIRT */
  46. #define set_pte(ptep, pte)              native_set_pte(ptep, pte)
  47. #define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte)
  48. #define set_pmd_at(mm, addr, pmdp, pmd) native_set_pmd_at(mm, addr, pmdp, pmd)
  49.  
  50. #define set_pte_atomic(ptep, pte)                                       \
  51.         native_set_pte_atomic(ptep, pte)
  52.  
  53. #define set_pmd(pmdp, pmd)              native_set_pmd(pmdp, pmd)
  54.  
  55. #ifndef __PAGETABLE_PUD_FOLDED
  56. #define set_pgd(pgdp, pgd)              native_set_pgd(pgdp, pgd)
  57. #define pgd_clear(pgd)                  native_pgd_clear(pgd)
  58. #endif
  59.  
  60. #ifndef set_pud
  61. # define set_pud(pudp, pud)             native_set_pud(pudp, pud)
  62. #endif
  63.  
  64. #ifndef __PAGETABLE_PMD_FOLDED
  65. #define pud_clear(pud)                  native_pud_clear(pud)
  66. #endif
  67.  
  68. #define pte_clear(mm, addr, ptep)       native_pte_clear(mm, addr, ptep)
  69. #define pmd_clear(pmd)                  native_pmd_clear(pmd)
  70.  
  71. #define pte_update(mm, addr, ptep)              do { } while (0)
  72.  
  73. #define pgd_val(x)      native_pgd_val(x)
  74. #define __pgd(x)        native_make_pgd(x)
  75.  
  76. #ifndef __PAGETABLE_PUD_FOLDED
  77. #define pud_val(x)      native_pud_val(x)
  78. #define __pud(x)        native_make_pud(x)
  79. #endif
  80.  
  81. #ifndef __PAGETABLE_PMD_FOLDED
  82. #define pmd_val(x)      native_pmd_val(x)
  83. #define __pmd(x)        native_make_pmd(x)
  84. #endif
  85.  
  86. #define pte_val(x)      native_pte_val(x)
  87. #define __pte(x)        native_make_pte(x)
  88.  
  89. #define arch_end_context_switch(prev)   do {} while(0)
  90.  
  91. #endif  /* CONFIG_PARAVIRT */
  92.  
  93. /*
  94.  * The following only work if pte_present() is true.
  95.  * Undefined behaviour if not..
  96.  */
  97. static inline int pte_dirty(pte_t pte)
  98. {
  99.         return pte_flags(pte) & _PAGE_DIRTY;
  100. }
  101.  
  102. static inline int pte_young(pte_t pte)
  103. {
  104.         return pte_flags(pte) & _PAGE_ACCESSED;
  105. }
  106.  
  107. static inline int pmd_dirty(pmd_t pmd)
  108. {
  109.         return pmd_flags(pmd) & _PAGE_DIRTY;
  110. }
  111.  
  112. static inline int pmd_young(pmd_t pmd)
  113. {
  114.         return pmd_flags(pmd) & _PAGE_ACCESSED;
  115. }
  116.  
  117. static inline int pte_write(pte_t pte)
  118. {
  119.         return pte_flags(pte) & _PAGE_RW;
  120. }
  121.  
  122. static inline int pte_huge(pte_t pte)
  123. {
  124.         return pte_flags(pte) & _PAGE_PSE;
  125. }
  126.  
  127. static inline int pte_global(pte_t pte)
  128. {
  129.         return pte_flags(pte) & _PAGE_GLOBAL;
  130. }
  131.  
  132. static inline int pte_exec(pte_t pte)
  133. {
  134.         return !(pte_flags(pte) & _PAGE_NX);
  135. }
  136.  
  137. static inline int pte_special(pte_t pte)
  138. {
  139.         return pte_flags(pte) & _PAGE_SPECIAL;
  140. }
  141.  
  142. static inline unsigned long pte_pfn(pte_t pte)
  143. {
  144.         return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
  145. }
  146.  
  147. static inline unsigned long pmd_pfn(pmd_t pmd)
  148. {
  149.         return (pmd_val(pmd) & pmd_pfn_mask(pmd)) >> PAGE_SHIFT;
  150. }
  151.  
  152. static inline unsigned long pud_pfn(pud_t pud)
  153. {
  154.         return (pud_val(pud) & pud_pfn_mask(pud)) >> PAGE_SHIFT;
  155. }
  156.  
  157. #define pte_page(pte)   pfn_to_page(pte_pfn(pte))
  158.  
  159. static inline int pmd_large(pmd_t pte)
  160. {
  161.         return pmd_flags(pte) & _PAGE_PSE;
  162. }
  163.  
  164. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  165. static inline int pmd_trans_huge(pmd_t pmd)
  166. {
  167.         return (pmd_val(pmd) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
  168. }
  169.  
  170. static inline int has_transparent_hugepage(void)
  171. {
  172.         return cpu_has_pse;
  173. }
  174.  
  175. #ifdef __HAVE_ARCH_PTE_DEVMAP
  176. static inline int pmd_devmap(pmd_t pmd)
  177. {
  178.         return !!(pmd_val(pmd) & _PAGE_DEVMAP);
  179. }
  180. #endif
  181. #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
  182.  
  183. static inline pte_t pte_set_flags(pte_t pte, pteval_t set)
  184. {
  185.         pteval_t v = native_pte_val(pte);
  186.  
  187.         return native_make_pte(v | set);
  188. }
  189.  
  190. static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear)
  191. {
  192.         pteval_t v = native_pte_val(pte);
  193.  
  194.         return native_make_pte(v & ~clear);
  195. }
  196.  
  197. static inline pte_t pte_mkclean(pte_t pte)
  198. {
  199.         return pte_clear_flags(pte, _PAGE_DIRTY);
  200. }
  201.  
  202. static inline pte_t pte_mkold(pte_t pte)
  203. {
  204.         return pte_clear_flags(pte, _PAGE_ACCESSED);
  205. }
  206.  
  207. static inline pte_t pte_wrprotect(pte_t pte)
  208. {
  209.         return pte_clear_flags(pte, _PAGE_RW);
  210. }
  211.  
  212. static inline pte_t pte_mkexec(pte_t pte)
  213. {
  214.         return pte_clear_flags(pte, _PAGE_NX);
  215. }
  216.  
  217. static inline pte_t pte_mkdirty(pte_t pte)
  218. {
  219.         return pte_set_flags(pte, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
  220. }
  221.  
  222. static inline pte_t pte_mkyoung(pte_t pte)
  223. {
  224.         return pte_set_flags(pte, _PAGE_ACCESSED);
  225. }
  226.  
  227. static inline pte_t pte_mkwrite(pte_t pte)
  228. {
  229.         return pte_set_flags(pte, _PAGE_RW);
  230. }
  231.  
  232. static inline pte_t pte_mkhuge(pte_t pte)
  233. {
  234.         return pte_set_flags(pte, _PAGE_PSE);
  235. }
  236.  
  237. static inline pte_t pte_clrhuge(pte_t pte)
  238. {
  239.         return pte_clear_flags(pte, _PAGE_PSE);
  240. }
  241.  
  242. static inline pte_t pte_mkglobal(pte_t pte)
  243. {
  244.         return pte_set_flags(pte, _PAGE_GLOBAL);
  245. }
  246.  
  247. static inline pte_t pte_clrglobal(pte_t pte)
  248. {
  249.         return pte_clear_flags(pte, _PAGE_GLOBAL);
  250. }
  251.  
  252. static inline pte_t pte_mkspecial(pte_t pte)
  253. {
  254.         return pte_set_flags(pte, _PAGE_SPECIAL);
  255. }
  256.  
  257. static inline pte_t pte_mkdevmap(pte_t pte)
  258. {
  259.         return pte_set_flags(pte, _PAGE_SPECIAL|_PAGE_DEVMAP);
  260. }
  261.  
  262. static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
  263. {
  264.         pmdval_t v = native_pmd_val(pmd);
  265.  
  266.         return __pmd(v | set);
  267. }
  268.  
  269. static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear)
  270. {
  271.         pmdval_t v = native_pmd_val(pmd);
  272.  
  273.         return __pmd(v & ~clear);
  274. }
  275.  
  276. static inline pmd_t pmd_mkold(pmd_t pmd)
  277. {
  278.         return pmd_clear_flags(pmd, _PAGE_ACCESSED);
  279. }
  280.  
  281. static inline pmd_t pmd_mkclean(pmd_t pmd)
  282. {
  283.         return pmd_clear_flags(pmd, _PAGE_DIRTY);
  284. }
  285.  
  286. static inline pmd_t pmd_wrprotect(pmd_t pmd)
  287. {
  288.         return pmd_clear_flags(pmd, _PAGE_RW);
  289. }
  290.  
  291. static inline pmd_t pmd_mkdirty(pmd_t pmd)
  292. {
  293.         return pmd_set_flags(pmd, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
  294. }
  295.  
  296. static inline pmd_t pmd_mkdevmap(pmd_t pmd)
  297. {
  298.         return pmd_set_flags(pmd, _PAGE_DEVMAP);
  299. }
  300.  
  301. static inline pmd_t pmd_mkhuge(pmd_t pmd)
  302. {
  303.         return pmd_set_flags(pmd, _PAGE_PSE);
  304. }
  305.  
  306. static inline pmd_t pmd_mkyoung(pmd_t pmd)
  307. {
  308.         return pmd_set_flags(pmd, _PAGE_ACCESSED);
  309. }
  310.  
  311. static inline pmd_t pmd_mkwrite(pmd_t pmd)
  312. {
  313.         return pmd_set_flags(pmd, _PAGE_RW);
  314. }
  315.  
  316. static inline pmd_t pmd_mknotpresent(pmd_t pmd)
  317. {
  318.         return pmd_clear_flags(pmd, _PAGE_PRESENT | _PAGE_PROTNONE);
  319. }
  320.  
  321. #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
  322. static inline int pte_soft_dirty(pte_t pte)
  323. {
  324.         return pte_flags(pte) & _PAGE_SOFT_DIRTY;
  325. }
  326.  
  327. static inline int pmd_soft_dirty(pmd_t pmd)
  328. {
  329.         return pmd_flags(pmd) & _PAGE_SOFT_DIRTY;
  330. }
  331.  
  332. static inline pte_t pte_mksoft_dirty(pte_t pte)
  333. {
  334.         return pte_set_flags(pte, _PAGE_SOFT_DIRTY);
  335. }
  336.  
  337. static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
  338. {
  339.         return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY);
  340. }
  341.  
  342. static inline pte_t pte_clear_soft_dirty(pte_t pte)
  343. {
  344.         return pte_clear_flags(pte, _PAGE_SOFT_DIRTY);
  345. }
  346.  
  347. static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
  348. {
  349.         return pmd_clear_flags(pmd, _PAGE_SOFT_DIRTY);
  350. }
  351.  
  352. #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
  353.  
  354. /*
  355.  * Mask out unsupported bits in a present pgprot.  Non-present pgprots
  356.  * can use those bits for other purposes, so leave them be.
  357.  */
  358. static inline pgprotval_t massage_pgprot(pgprot_t pgprot)
  359. {
  360.         pgprotval_t protval = pgprot_val(pgprot);
  361.  
  362.         if (protval & _PAGE_PRESENT)
  363.                 protval &= __supported_pte_mask;
  364.  
  365.         return protval;
  366. }
  367.  
  368. static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
  369. {
  370.         return __pte(((phys_addr_t)page_nr << PAGE_SHIFT) |
  371.                      massage_pgprot(pgprot));
  372. }
  373.  
  374. static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
  375. {
  376.         return __pmd(((phys_addr_t)page_nr << PAGE_SHIFT) |
  377.                      massage_pgprot(pgprot));
  378. }
  379.  
  380. static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
  381. {
  382.         pteval_t val = pte_val(pte);
  383.  
  384.         /*
  385.          * Chop off the NX bit (if present), and add the NX portion of
  386.          * the newprot (if present):
  387.          */
  388.         val &= _PAGE_CHG_MASK;
  389.         val |= massage_pgprot(newprot) & ~_PAGE_CHG_MASK;
  390.  
  391.         return __pte(val);
  392. }
  393.  
  394. static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
  395. {
  396.         pmdval_t val = pmd_val(pmd);
  397.  
  398.         val &= _HPAGE_CHG_MASK;
  399.         val |= massage_pgprot(newprot) & ~_HPAGE_CHG_MASK;
  400.  
  401.         return __pmd(val);
  402. }
  403.  
  404. /* mprotect needs to preserve PAT bits when updating vm_page_prot */
  405. #define pgprot_modify pgprot_modify
  406. static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
  407. {
  408.         pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK;
  409.         pgprotval_t addbits = pgprot_val(newprot);
  410.         return __pgprot(preservebits | addbits);
  411. }
  412.  
  413. #define pte_pgprot(x) __pgprot(pte_flags(x))
  414. #define pmd_pgprot(x) __pgprot(pmd_flags(x))
  415. #define pud_pgprot(x) __pgprot(pud_flags(x))
  416.  
  417. #define canon_pgprot(p) __pgprot(massage_pgprot(p))
  418.  
  419. static inline int is_new_memtype_allowed(u64 paddr, unsigned long size,
  420.                                          enum page_cache_mode pcm,
  421.                                          enum page_cache_mode new_pcm)
  422. {
  423.         /*
  424.          * PAT type is always WB for untracked ranges, so no need to check.
  425.          */
  426.         if (x86_platform.is_untracked_pat_range(paddr, paddr + size))
  427.                 return 1;
  428.  
  429.         /*
  430.          * Certain new memtypes are not allowed with certain
  431.          * requested memtype:
  432.          * - request is uncached, return cannot be write-back
  433.          * - request is write-combine, return cannot be write-back
  434.          * - request is write-through, return cannot be write-back
  435.          * - request is write-through, return cannot be write-combine
  436.          */
  437.         if ((pcm == _PAGE_CACHE_MODE_UC_MINUS &&
  438.              new_pcm == _PAGE_CACHE_MODE_WB) ||
  439.             (pcm == _PAGE_CACHE_MODE_WC &&
  440.              new_pcm == _PAGE_CACHE_MODE_WB) ||
  441.             (pcm == _PAGE_CACHE_MODE_WT &&
  442.              new_pcm == _PAGE_CACHE_MODE_WB) ||
  443.             (pcm == _PAGE_CACHE_MODE_WT &&
  444.              new_pcm == _PAGE_CACHE_MODE_WC)) {
  445.                 return 0;
  446.         }
  447.  
  448.         return 1;
  449. }
  450.  
  451. pmd_t *populate_extra_pmd(unsigned long vaddr);
  452. pte_t *populate_extra_pte(unsigned long vaddr);
  453. #endif  /* __ASSEMBLY__ */
  454.  
  455. #ifdef CONFIG_X86_32
  456. # include <asm/pgtable_32.h>
  457. #else
  458. # include <asm/pgtable_64.h>
  459. #endif
  460.  
  461. #ifndef __ASSEMBLY__
  462. //#include <linux/mm_types.h>
  463. #include <linux/mmdebug.h>
  464. #include <linux/log2.h>
  465.  
  466. static inline int pte_none(pte_t pte)
  467. {
  468.         return !pte.pte;
  469. }
  470.  
  471. #define __HAVE_ARCH_PTE_SAME
  472. static inline int pte_same(pte_t a, pte_t b)
  473. {
  474.         return a.pte == b.pte;
  475. }
  476.  
  477. static inline int pte_present(pte_t a)
  478. {
  479.         return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
  480. }
  481.  
  482. #ifdef __HAVE_ARCH_PTE_DEVMAP
  483. static inline int pte_devmap(pte_t a)
  484. {
  485.         return (pte_flags(a) & _PAGE_DEVMAP) == _PAGE_DEVMAP;
  486. }
  487. #endif
  488.  
  489. #define pte_accessible pte_accessible
  490.  
  491. static inline int pte_hidden(pte_t pte)
  492. {
  493.         return pte_flags(pte) & _PAGE_HIDDEN;
  494. }
  495.  
  496. static inline int pmd_present(pmd_t pmd)
  497. {
  498.         /*
  499.          * Checking for _PAGE_PSE is needed too because
  500.          * split_huge_page will temporarily clear the present bit (but
  501.          * the _PAGE_PSE flag will remain set at all times while the
  502.          * _PAGE_PRESENT bit is clear).
  503.          */
  504.         return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE);
  505. }
  506.  
  507. #ifdef CONFIG_NUMA_BALANCING
  508. /*
  509.  * These work without NUMA balancing but the kernel does not care. See the
  510.  * comment in include/asm-generic/pgtable.h
  511.  */
  512. static inline int pte_protnone(pte_t pte)
  513. {
  514.         return (pte_flags(pte) & (_PAGE_PROTNONE | _PAGE_PRESENT))
  515.                 == _PAGE_PROTNONE;
  516. }
  517.  
  518. static inline int pmd_protnone(pmd_t pmd)
  519. {
  520.         return (pmd_flags(pmd) & (_PAGE_PROTNONE | _PAGE_PRESENT))
  521.                 == _PAGE_PROTNONE;
  522. }
  523. #endif /* CONFIG_NUMA_BALANCING */
  524.  
  525. static inline int pmd_none(pmd_t pmd)
  526. {
  527.         /* Only check low word on 32-bit platforms, since it might be
  528.            out of sync with upper half. */
  529.         return (unsigned long)native_pmd_val(pmd) == 0;
  530. }
  531.  
  532. static inline unsigned long pmd_page_vaddr(pmd_t pmd)
  533. {
  534.         return (unsigned long)__va(pmd_val(pmd) & pmd_pfn_mask(pmd));
  535. }
  536.  
  537. /*
  538.  * Currently stuck as a macro due to indirect forward reference to
  539.  * linux/mmzone.h's __section_mem_map_addr() definition:
  540.  */
  541. #define pmd_page(pmd)           \
  542.         pfn_to_page((pmd_val(pmd) & pmd_pfn_mask(pmd)) >> PAGE_SHIFT)
  543.  
  544. /*
  545.  * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
  546.  *
  547.  * this macro returns the index of the entry in the pmd page which would
  548.  * control the given virtual address
  549.  */
  550. static inline unsigned long pmd_index(unsigned long address)
  551. {
  552.         return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
  553. }
  554.  
  555. /*
  556.  * Conversion functions: convert a page and protection to a page entry,
  557.  * and a page entry and page directory to the page they refer to.
  558.  *
  559.  * (Currently stuck as a macro because of indirect forward reference
  560.  * to linux/mm.h:page_to_nid())
  561.  */
  562. #define mk_pte(page, pgprot)   pfn_pte(page_to_pfn(page), (pgprot))
  563.  
  564. /*
  565.  * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
  566.  *
  567.  * this function returns the index of the entry in the pte page which would
  568.  * control the given virtual address
  569.  */
  570. static inline unsigned long pte_index(unsigned long address)
  571. {
  572.         return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
  573. }
  574.  
  575. static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
  576. {
  577.         return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
  578. }
  579.  
  580. static inline int pmd_bad(pmd_t pmd)
  581. {
  582.         return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE;
  583. }
  584.  
  585. static inline unsigned long pages_to_mb(unsigned long npg)
  586. {
  587.         return npg >> (20 - PAGE_SHIFT);
  588. }
  589.  
  590. #if CONFIG_PGTABLE_LEVELS > 2
  591. static inline int pud_none(pud_t pud)
  592. {
  593.         return native_pud_val(pud) == 0;
  594. }
  595.  
  596. static inline int pud_present(pud_t pud)
  597. {
  598.         return pud_flags(pud) & _PAGE_PRESENT;
  599. }
  600.  
  601. static inline unsigned long pud_page_vaddr(pud_t pud)
  602. {
  603.         return (unsigned long)__va(pud_val(pud) & pud_pfn_mask(pud));
  604. }
  605.  
  606. /*
  607.  * Currently stuck as a macro due to indirect forward reference to
  608.  * linux/mmzone.h's __section_mem_map_addr() definition:
  609.  */
  610. #define pud_page(pud)           \
  611.         pfn_to_page((pud_val(pud) & pud_pfn_mask(pud)) >> PAGE_SHIFT)
  612.  
  613. /* Find an entry in the second-level page table.. */
  614. static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
  615. {
  616.         return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
  617. }
  618.  
  619. static inline int pud_large(pud_t pud)
  620. {
  621.         return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
  622.                 (_PAGE_PSE | _PAGE_PRESENT);
  623. }
  624.  
  625. static inline int pud_bad(pud_t pud)
  626. {
  627.         return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
  628. }
  629. #else
  630. static inline int pud_large(pud_t pud)
  631. {
  632.         return 0;
  633. }
  634. #endif  /* CONFIG_PGTABLE_LEVELS > 2 */
  635.  
  636. #if CONFIG_PGTABLE_LEVELS > 3
  637. static inline int pgd_present(pgd_t pgd)
  638. {
  639.         return pgd_flags(pgd) & _PAGE_PRESENT;
  640. }
  641.  
  642. static inline unsigned long pgd_page_vaddr(pgd_t pgd)
  643. {
  644.         return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK);
  645. }
  646.  
  647. /*
  648.  * Currently stuck as a macro due to indirect forward reference to
  649.  * linux/mmzone.h's __section_mem_map_addr() definition:
  650.  */
  651. #define pgd_page(pgd)           pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
  652.  
  653. /* to find an entry in a page-table-directory. */
  654. static inline unsigned long pud_index(unsigned long address)
  655. {
  656.         return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
  657. }
  658.  
  659. static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
  660. {
  661.         return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(address);
  662. }
  663.  
  664. static inline int pgd_bad(pgd_t pgd)
  665. {
  666.         return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
  667. }
  668.  
  669. static inline int pgd_none(pgd_t pgd)
  670. {
  671.         return !native_pgd_val(pgd);
  672. }
  673. #endif  /* CONFIG_PGTABLE_LEVELS > 3 */
  674.  
  675. #endif  /* __ASSEMBLY__ */
  676.  
  677. /*
  678.  * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
  679.  *
  680.  * this macro returns the index of the entry in the pgd page which would
  681.  * control the given virtual address
  682.  */
  683. #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
  684.  
  685. /*
  686.  * pgd_offset() returns a (pgd_t *)
  687.  * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
  688.  */
  689. #define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
  690. /*
  691.  * a shortcut which implies the use of the kernel's pgd, instead
  692.  * of a process's
  693.  */
  694. #define pgd_offset_k(address) pgd_offset(&init_mm, (address))
  695.  
  696.  
  697. #define KERNEL_PGD_BOUNDARY     pgd_index(PAGE_OFFSET)
  698. #define KERNEL_PGD_PTRS         (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
  699.  
  700. #ifndef __ASSEMBLY__
  701.  
  702. extern int direct_gbpages;
  703. void init_mem_mapping(void);
  704. void early_alloc_pgt_buf(void);
  705.  
  706. /* local pte updates need not use xchg for locking */
  707. static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
  708. {
  709.         pte_t res = *ptep;
  710.  
  711.         /* Pure native function needs no input for mm, addr */
  712.         native_pte_clear(NULL, 0, ptep);
  713.         return res;
  714. }
  715.  
  716. static inline pmd_t native_local_pmdp_get_and_clear(pmd_t *pmdp)
  717. {
  718.         pmd_t res = *pmdp;
  719.  
  720.         native_pmd_clear(pmdp);
  721.         return res;
  722. }
  723.  
  724. static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
  725.                                      pte_t *ptep , pte_t pte)
  726. {
  727.         native_set_pte(ptep, pte);
  728. }
  729.  
  730. static inline void native_set_pmd_at(struct mm_struct *mm, unsigned long addr,
  731.                                      pmd_t *pmdp , pmd_t pmd)
  732. {
  733.         native_set_pmd(pmdp, pmd);
  734. }
  735.  
  736. #ifndef CONFIG_PARAVIRT
  737. /*
  738.  * Rules for using pte_update - it must be called after any PTE update which
  739.  * has not been done using the set_pte / clear_pte interfaces.  It is used by
  740.  * shadow mode hypervisors to resynchronize the shadow page tables.  Kernel PTE
  741.  * updates should either be sets, clears, or set_pte_atomic for P->P
  742.  * transitions, which means this hook should only be called for user PTEs.
  743.  * This hook implies a P->P protection or access change has taken place, which
  744.  * requires a subsequent TLB flush.
  745.  */
  746. #define pte_update(mm, addr, ptep)              do { } while (0)
  747. #endif
  748.  
  749. /*
  750.  * We only update the dirty/accessed state if we set
  751.  * the dirty bit by hand in the kernel, since the hardware
  752.  * will do the accessed bit for us, and we don't want to
  753.  * race with other CPU's that might be updating the dirty
  754.  * bit at the same time.
  755.  */
  756. struct vm_area_struct;
  757.  
  758. #define  __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
  759. extern int ptep_set_access_flags(struct vm_area_struct *vma,
  760.                                  unsigned long address, pte_t *ptep,
  761.                                  pte_t entry, int dirty);
  762.  
  763. #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
  764. extern int ptep_test_and_clear_young(struct vm_area_struct *vma,
  765.                                      unsigned long addr, pte_t *ptep);
  766.  
  767. #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
  768. extern int ptep_clear_flush_young(struct vm_area_struct *vma,
  769.                                   unsigned long address, pte_t *ptep);
  770.  
  771. #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
  772. static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
  773.                                        pte_t *ptep)
  774. {
  775.         pte_t pte = native_ptep_get_and_clear(ptep);
  776.         pte_update(mm, addr, ptep);
  777.         return pte;
  778. }
  779.  
  780. #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
  781. static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
  782.                                             unsigned long addr, pte_t *ptep,
  783.                                             int full)
  784. {
  785.         pte_t pte;
  786.         if (full) {
  787.                 /*
  788.                  * Full address destruction in progress; paravirt does not
  789.                  * care about updates and native needs no locking
  790.                  */
  791.                 pte = native_local_ptep_get_and_clear(ptep);
  792.         } else {
  793.                 pte = ptep_get_and_clear(mm, addr, ptep);
  794.         }
  795.         return pte;
  796. }
  797.  
  798. #define __HAVE_ARCH_PTEP_SET_WRPROTECT
  799. static inline void ptep_set_wrprotect(struct mm_struct *mm,
  800.                                       unsigned long addr, pte_t *ptep)
  801. {
  802.         clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
  803.         pte_update(mm, addr, ptep);
  804. }
  805.  
  806. #define flush_tlb_fix_spurious_fault(vma, address) do { } while (0)
  807.  
  808. #define mk_pmd(page, pgprot)   pfn_pmd(page_to_pfn(page), (pgprot))
  809.  
  810. #define  __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
  811. extern int pmdp_set_access_flags(struct vm_area_struct *vma,
  812.                                  unsigned long address, pmd_t *pmdp,
  813.                                  pmd_t entry, int dirty);
  814.  
  815. #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
  816. extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
  817.                                      unsigned long addr, pmd_t *pmdp);
  818.  
  819. #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
  820. extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
  821.                                   unsigned long address, pmd_t *pmdp);
  822.  
  823.  
  824. #define __HAVE_ARCH_PMD_WRITE
  825. static inline int pmd_write(pmd_t pmd)
  826. {
  827.         return pmd_flags(pmd) & _PAGE_RW;
  828. }
  829.  
  830. #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
  831. static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long addr,
  832.                                        pmd_t *pmdp)
  833. {
  834.         return native_pmdp_get_and_clear(pmdp);
  835. }
  836.  
  837. #define __HAVE_ARCH_PMDP_SET_WRPROTECT
  838. static inline void pmdp_set_wrprotect(struct mm_struct *mm,
  839.                                       unsigned long addr, pmd_t *pmdp)
  840. {
  841.         clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp);
  842. }
  843.  
  844. /*
  845.  * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
  846.  *
  847.  *  dst - pointer to pgd range anwhere on a pgd page
  848.  *  src - ""
  849.  *  count - the number of pgds to copy.
  850.  *
  851.  * dst and src can be on the same page, but the range must not overlap,
  852.  * and must not cross a page boundary.
  853.  */
  854. static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
  855. {
  856.        memcpy(dst, src, count * sizeof(pgd_t));
  857. }
  858.  
  859. #define PTE_SHIFT ilog2(PTRS_PER_PTE)
  860. static inline int page_level_shift(enum pg_level level)
  861. {
  862.         return (PAGE_SHIFT - PTE_SHIFT) + level * PTE_SHIFT;
  863. }
  864. static inline unsigned long page_level_size(enum pg_level level)
  865. {
  866.         return 1UL << page_level_shift(level);
  867. }
  868. static inline unsigned long page_level_mask(enum pg_level level)
  869. {
  870.         return ~(page_level_size(level) - 1);
  871. }
  872.  
  873. /*
  874.  * The x86 doesn't have any external MMU info: the kernel page
  875.  * tables contain all the necessary information.
  876.  */
  877. static inline void update_mmu_cache(struct vm_area_struct *vma,
  878.                 unsigned long addr, pte_t *ptep)
  879. {
  880. }
  881. static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
  882.                 unsigned long addr, pmd_t *pmd)
  883. {
  884. }
  885.  
  886. #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
  887. static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
  888. {
  889.         return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY);
  890. }
  891.  
  892. static inline int pte_swp_soft_dirty(pte_t pte)
  893. {
  894.         return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY;
  895. }
  896.  
  897. static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
  898. {
  899.         return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY);
  900. }
  901. #endif
  902.  
  903. //#include <asm-generic/pgtable.h>
  904. #endif  /* __ASSEMBLY__ */
  905.  
  906. #endif /* _ASM_X86_PGTABLE_H */
  907.