19,7 → 19,14 |
#include <asm/x86_init.h> |
|
void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd); |
void ptdump_walk_pgd_level_checkwx(void); |
|
#ifdef CONFIG_DEBUG_WX |
#define debug_checkwx() ptdump_walk_pgd_level_checkwx() |
#else |
#define debug_checkwx() do { } while (0) |
#endif |
|
/* |
* ZERO_PAGE is a global shared page that is always zero: used |
* for zero-mapped memory areas etc.. |
115,11 → 122,6 |
return pte_flags(pte) & _PAGE_RW; |
} |
|
static inline int pte_file(pte_t pte) |
{ |
return pte_flags(pte) & _PAGE_FILE; |
} |
|
static inline int pte_huge(pte_t pte) |
{ |
return pte_flags(pte) & _PAGE_PSE; |
137,13 → 139,7 |
|
static inline int pte_special(pte_t pte) |
{ |
/* |
* See CONFIG_NUMA_BALANCING pte_numa in include/asm-generic/pgtable.h. |
* On x86 we have _PAGE_BIT_NUMA == _PAGE_BIT_GLOBAL+1 == |
* __PAGE_BIT_SOFTW1 == _PAGE_BIT_SPECIAL. |
*/ |
return (pte_flags(pte) & _PAGE_SPECIAL) && |
(pte_flags(pte) & (_PAGE_PRESENT|_PAGE_PROTNONE)); |
return pte_flags(pte) & _PAGE_SPECIAL; |
} |
|
static inline unsigned long pte_pfn(pte_t pte) |
153,12 → 149,12 |
|
static inline unsigned long pmd_pfn(pmd_t pmd) |
{ |
return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT; |
return (pmd_val(pmd) & pmd_pfn_mask(pmd)) >> PAGE_SHIFT; |
} |
|
static inline unsigned long pud_pfn(pud_t pud) |
{ |
return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT; |
return (pud_val(pud) & pud_pfn_mask(pud)) >> PAGE_SHIFT; |
} |
|
#define pte_page(pte) pfn_to_page(pte_pfn(pte)) |
305,7 → 301,7 |
|
static inline pmd_t pmd_mknotpresent(pmd_t pmd) |
{ |
return pmd_clear_flags(pmd, _PAGE_PRESENT); |
return pmd_clear_flags(pmd, _PAGE_PRESENT | _PAGE_PROTNONE); |
} |
|
#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY |
329,21 → 325,16 |
return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY); |
} |
|
static inline pte_t pte_file_clear_soft_dirty(pte_t pte) |
static inline pte_t pte_clear_soft_dirty(pte_t pte) |
{ |
return pte_clear_flags(pte, _PAGE_SOFT_DIRTY); |
} |
|
static inline pte_t pte_file_mksoft_dirty(pte_t pte) |
static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd) |
{ |
return pte_set_flags(pte, _PAGE_SOFT_DIRTY); |
return pmd_clear_flags(pmd, _PAGE_SOFT_DIRTY); |
} |
|
static inline int pte_file_soft_dirty(pte_t pte) |
{ |
return pte_flags(pte) & _PAGE_SOFT_DIRTY; |
} |
|
#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */ |
|
/* |
405,7 → 396,9 |
return __pgprot(preservebits | addbits); |
} |
|
#define pte_pgprot(x) __pgprot(pte_flags(x) & PTE_FLAGS_MASK) |
#define pte_pgprot(x) __pgprot(pte_flags(x)) |
#define pmd_pgprot(x) __pgprot(pmd_flags(x)) |
#define pud_pgprot(x) __pgprot(pud_flags(x)) |
|
#define canon_pgprot(p) __pgprot(massage_pgprot(p)) |
|
424,11 → 417,17 |
* requested memtype: |
* - request is uncached, return cannot be write-back |
* - request is write-combine, return cannot be write-back |
* - request is write-through, return cannot be write-back |
* - request is write-through, return cannot be write-combine |
*/ |
if ((pcm == _PAGE_CACHE_MODE_UC_MINUS && |
new_pcm == _PAGE_CACHE_MODE_WB) || |
(pcm == _PAGE_CACHE_MODE_WC && |
new_pcm == _PAGE_CACHE_MODE_WB)) { |
new_pcm == _PAGE_CACHE_MODE_WB) || |
(pcm == _PAGE_CACHE_MODE_WT && |
new_pcm == _PAGE_CACHE_MODE_WB) || |
(pcm == _PAGE_CACHE_MODE_WT && |
new_pcm == _PAGE_CACHE_MODE_WC)) { |
return 0; |
} |
|
463,13 → 462,6 |
|
static inline int pte_present(pte_t a) |
{ |
return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE | |
_PAGE_NUMA); |
} |
|
#define pte_present_nonuma pte_present_nonuma |
static inline int pte_present_nonuma(pte_t a) |
{ |
return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE); |
} |
|
479,7 → 471,7 |
if (pte_flags(a) & _PAGE_PRESENT) |
return true; |
|
if ((pte_flags(a) & (_PAGE_PROTNONE | _PAGE_NUMA)) && |
if ((pte_flags(a) & _PAGE_PROTNONE) && |
mm_tlb_flush_pending(mm)) |
return true; |
|
499,10 → 491,27 |
* the _PAGE_PSE flag will remain set at all times while the |
* _PAGE_PRESENT bit is clear). |
*/ |
return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE | |
_PAGE_NUMA); |
return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE); |
} |
|
#ifdef CONFIG_NUMA_BALANCING |
/* |
* These work without NUMA balancing but the kernel does not care. See the |
* comment in include/asm-generic/pgtable.h |
*/ |
static inline int pte_protnone(pte_t pte) |
{ |
return (pte_flags(pte) & (_PAGE_PROTNONE | _PAGE_PRESENT)) |
== _PAGE_PROTNONE; |
} |
|
static inline int pmd_protnone(pmd_t pmd) |
{ |
return (pmd_flags(pmd) & (_PAGE_PROTNONE | _PAGE_PRESENT)) |
== _PAGE_PROTNONE; |
} |
#endif /* CONFIG_NUMA_BALANCING */ |
|
static inline int pmd_none(pmd_t pmd) |
{ |
/* Only check low word on 32-bit platforms, since it might be |
512,7 → 521,7 |
|
static inline unsigned long pmd_page_vaddr(pmd_t pmd) |
{ |
return (unsigned long)__va(pmd_val(pmd) & PTE_PFN_MASK); |
return (unsigned long)__va(pmd_val(pmd) & pmd_pfn_mask(pmd)); |
} |
|
/* |
519,7 → 528,8 |
* Currently stuck as a macro due to indirect forward reference to |
* linux/mmzone.h's __section_mem_map_addr() definition: |
*/ |
#define pmd_page(pmd) pfn_to_page((pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT) |
#define pmd_page(pmd) \ |
pfn_to_page((pmd_val(pmd) & pmd_pfn_mask(pmd)) >> PAGE_SHIFT) |
|
/* |
* the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD] |
559,11 → 569,6 |
|
static inline int pmd_bad(pmd_t pmd) |
{ |
#ifdef CONFIG_NUMA_BALANCING |
/* pmd_numa check */ |
if ((pmd_flags(pmd) & (_PAGE_NUMA|_PAGE_PRESENT)) == _PAGE_NUMA) |
return 0; |
#endif |
return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE; |
} |
|
572,7 → 577,7 |
return npg >> (20 - PAGE_SHIFT); |
} |
|
#if PAGETABLE_LEVELS > 2 |
#if CONFIG_PGTABLE_LEVELS > 2 |
static inline int pud_none(pud_t pud) |
{ |
return native_pud_val(pud) == 0; |
585,7 → 590,7 |
|
static inline unsigned long pud_page_vaddr(pud_t pud) |
{ |
return (unsigned long)__va((unsigned long)pud_val(pud) & PTE_PFN_MASK); |
return (unsigned long)__va(pud_val(pud) & pud_pfn_mask(pud)); |
} |
|
/* |
592,7 → 597,8 |
* Currently stuck as a macro due to indirect forward reference to |
* linux/mmzone.h's __section_mem_map_addr() definition: |
*/ |
#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT) |
#define pud_page(pud) \ |
pfn_to_page((pud_val(pud) & pud_pfn_mask(pud)) >> PAGE_SHIFT) |
|
/* Find an entry in the second-level page table.. */ |
static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) |
615,9 → 621,9 |
{ |
return 0; |
} |
#endif /* PAGETABLE_LEVELS > 2 */ |
#endif /* CONFIG_PGTABLE_LEVELS > 2 */ |
|
#if PAGETABLE_LEVELS > 3 |
#if CONFIG_PGTABLE_LEVELS > 3 |
static inline int pgd_present(pgd_t pgd) |
{ |
return pgd_flags(pgd) & _PAGE_PRESENT; |
654,7 → 660,7 |
{ |
return !native_pgd_val(pgd); |
} |
#endif /* PAGETABLE_LEVELS > 3 */ |
#endif /* CONFIG_PGTABLE_LEVELS > 3 */ |
|
#endif /* __ASSEMBLY__ */ |
|
820,8 → 826,8 |
return pmd_flags(pmd) & _PAGE_RW; |
} |
|
#define __HAVE_ARCH_PMDP_GET_AND_CLEAR |
static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, unsigned long addr, |
#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR |
static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long addr, |
pmd_t *pmdp) |
{ |
pmd_t pmd = native_pmdp_get_and_clear(pmdp); |
882,19 → 888,16 |
#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY |
static inline pte_t pte_swp_mksoft_dirty(pte_t pte) |
{ |
VM_BUG_ON(pte_present_nonuma(pte)); |
return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY); |
} |
|
static inline int pte_swp_soft_dirty(pte_t pte) |
{ |
VM_BUG_ON(pte_present_nonuma(pte)); |
return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY; |
} |
|
static inline pte_t pte_swp_clear_soft_dirty(pte_t pte) |
{ |
VM_BUG_ON(pte_present_nonuma(pte)); |
return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY); |
} |
#endif |