4,7 → 4,7 |
#include <linux/const.h> |
#include <asm/page_types.h> |
|
#define FIRST_USER_ADDRESS 0 |
#define FIRST_USER_ADDRESS 0UL |
|
#define _PAGE_BIT_PRESENT 0 /* is present */ |
#define _PAGE_BIT_RW 1 /* writeable */ |
27,19 → 27,9 |
#define _PAGE_BIT_SOFT_DIRTY _PAGE_BIT_SOFTW3 /* software dirty tracking */ |
#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */ |
|
/* |
* Swap offsets on configurations that allow automatic NUMA balancing use the |
* bits after _PAGE_BIT_GLOBAL. To uniquely distinguish NUMA hinting PTEs from |
* swap entries, we use the first bit after _PAGE_BIT_GLOBAL and shrink the |
* maximum possible swap space from 16TB to 8TB. |
*/ |
#define _PAGE_BIT_NUMA (_PAGE_BIT_GLOBAL+1) |
|
/* If _PAGE_BIT_PRESENT is clear, we use these: */ |
/* - if the user mapped it with PROT_NONE; pte_present gives true */ |
#define _PAGE_BIT_PROTNONE _PAGE_BIT_GLOBAL |
/* - set: nonlinear file mapping, saved PTE; unset:swap */ |
#define _PAGE_BIT_FILE _PAGE_BIT_DIRTY |
|
#define _PAGE_PRESENT (_AT(pteval_t, 1) << _PAGE_BIT_PRESENT) |
#define _PAGE_RW (_AT(pteval_t, 1) << _PAGE_BIT_RW) |
78,21 → 68,6 |
#endif |
|
/* |
* _PAGE_NUMA distinguishes between a numa hinting minor fault and a page |
* that is not present. The hinting fault gathers numa placement statistics |
* (see pte_numa()). The bit is always zero when the PTE is not present. |
* |
* The bit picked must be always zero when the pmd is present and not |
* present, so that we don't lose information when we set it while |
* atomically clearing the present bit. |
*/ |
#ifdef CONFIG_NUMA_BALANCING |
#define _PAGE_NUMA (_AT(pteval_t, 1) << _PAGE_BIT_NUMA) |
#else |
#define _PAGE_NUMA (_AT(pteval_t, 0)) |
#endif |
|
/* |
* Tracking soft dirty bit when a page goes to a swap is tricky. |
* We need a bit which can be stored in pte _and_ not conflict |
* with swap entry format. On x86 bits 6 and 7 are *not* involved |
114,7 → 89,6 |
#define _PAGE_NX (_AT(pteval_t, 0)) |
#endif |
|
#define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE) |
#define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE) |
|
#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \ |
125,8 → 99,8 |
/* Set of bits not changed in pte_modify */ |
#define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \ |
_PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY | \ |
_PAGE_SOFT_DIRTY | _PAGE_NUMA) |
#define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE | _PAGE_NUMA) |
_PAGE_SOFT_DIRTY) |
#define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE) |
|
/* |
* The cache modes defined here are used to translate between pure SW usage |
235,10 → 209,10 |
|
#include <linux/types.h> |
|
/* PTE_PFN_MASK extracts the PFN from a (pte|pmd|pud|pgd)val_t */ |
/* Extracts the PFN from a (pte|pmd|pud|pgd)val_t of a 4KB page */ |
#define PTE_PFN_MASK ((pteval_t)PHYSICAL_PAGE_MASK) |
|
/* PTE_FLAGS_MASK extracts the flags from a (pte|pmd|pud|pgd)val_t */ |
/* Extracts the flags from a (pte|pmd|pud|pgd)val_t of a 4KB page */ |
#define PTE_FLAGS_MASK (~PTE_PFN_MASK) |
|
typedef struct pgprot { pgprotval_t pgprot; } pgprot_t; |
260,7 → 234,7 |
return native_pgd_val(pgd) & PTE_FLAGS_MASK; |
} |
|
#if PAGETABLE_LEVELS > 3 |
#if CONFIG_PGTABLE_LEVELS > 3 |
typedef struct { pudval_t pud; } pud_t; |
|
static inline pud_t native_make_pud(pmdval_t val) |
281,7 → 255,7 |
} |
#endif |
|
#if PAGETABLE_LEVELS > 2 |
#if CONFIG_PGTABLE_LEVELS > 2 |
typedef struct { pmdval_t pmd; } pmd_t; |
|
static inline pmd_t native_make_pmd(pmdval_t val) |
302,14 → 276,40 |
} |
#endif |
|
static inline pudval_t pud_pfn_mask(pud_t pud) |
{ |
if (native_pud_val(pud) & _PAGE_PSE) |
return PHYSICAL_PUD_PAGE_MASK; |
else |
return PTE_PFN_MASK; |
} |
|
static inline pudval_t pud_flags_mask(pud_t pud) |
{ |
return ~pud_pfn_mask(pud); |
} |
|
static inline pudval_t pud_flags(pud_t pud) |
{ |
return native_pud_val(pud) & PTE_FLAGS_MASK; |
return native_pud_val(pud) & pud_flags_mask(pud); |
} |
|
static inline pmdval_t pmd_pfn_mask(pmd_t pmd) |
{ |
if (native_pmd_val(pmd) & _PAGE_PSE) |
return PHYSICAL_PMD_PAGE_MASK; |
else |
return PTE_PFN_MASK; |
} |
|
static inline pmdval_t pmd_flags_mask(pmd_t pmd) |
{ |
return ~pmd_pfn_mask(pmd); |
} |
|
static inline pmdval_t pmd_flags(pmd_t pmd) |
{ |
return native_pmd_val(pmd) & PTE_FLAGS_MASK; |
return native_pmd_val(pmd) & pmd_flags_mask(pmd); |
} |
|
static inline pte_t native_make_pte(pteval_t val) |
327,20 → 327,6 |
return native_pte_val(pte) & PTE_FLAGS_MASK; |
} |
|
#ifdef CONFIG_NUMA_BALANCING |
/* Set of bits that distinguishes present, prot_none and numa ptes */ |
#define _PAGE_NUMA_MASK (_PAGE_NUMA|_PAGE_PROTNONE|_PAGE_PRESENT) |
static inline pteval_t ptenuma_flags(pte_t pte) |
{ |
return pte_flags(pte) & _PAGE_NUMA_MASK; |
} |
|
static inline pmdval_t pmdnuma_flags(pmd_t pmd) |
{ |
return pmd_flags(pmd) & _PAGE_NUMA_MASK; |
} |
#endif /* CONFIG_NUMA_BALANCING */ |
|
#define pgprot_val(x) ((x).pgprot) |
#define __pgprot(x) ((pgprot_t) { (x) } ) |
|
407,6 → 393,9 |
#define pgprot_writecombine pgprot_writecombine |
extern pgprot_t pgprot_writecombine(pgprot_t prot); |
|
#define pgprot_writethrough pgprot_writethrough |
extern pgprot_t pgprot_writethrough(pgprot_t prot); |
|
/* Indicate that x86 has its own track and untrack pfn vma functions */ |
#define __HAVE_PFNMAP_TRACKING |
|