mirror of https://gitee.com/openkylin/linux.git
powerpc/mm: Make pte_pgprot return all pte bits
Other archs do the same and instead of adding required pte bits (which
got masked out) in __ioremap_at(), make sure we filter only pfn bits
out.
Fixes: 26973fa5ac
("powerpc/mm: use pte helpers in generic code")
Reviewed-by: Christophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
This commit is contained in:
parent
4ffe713b75
commit
b9fb4480a3
|
@ -48,11 +48,6 @@ static inline bool pte_user(pte_t pte)
|
|||
#define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HASHPTE | _PAGE_DIRTY | \
|
||||
_PAGE_ACCESSED | _PAGE_SPECIAL)
|
||||
|
||||
/* Mask of bits returned by pte_pgprot() */
|
||||
#define PAGE_PROT_BITS (_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \
|
||||
_PAGE_WRITETHRU | _PAGE_USER | _PAGE_ACCESSED | \
|
||||
_PAGE_RW | _PAGE_DIRTY)
|
||||
|
||||
/*
|
||||
* We define 2 sets of base prot bits, one for basic pages (ie,
|
||||
* cacheable kernel and user pages) and one for non cacheable
|
||||
|
@ -396,7 +391,6 @@ static inline int pte_young(pte_t pte) { return !!(pte_val(pte) & _PAGE_ACCESSE
|
|||
static inline int pte_special(pte_t pte) { return !!(pte_val(pte) & _PAGE_SPECIAL); }
|
||||
static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
|
||||
static inline bool pte_exec(pte_t pte) { return true; }
|
||||
static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); }
|
||||
|
||||
static inline int pte_present(pte_t pte)
|
||||
{
|
||||
|
|
|
@ -128,13 +128,6 @@
|
|||
|
||||
#define H_PTE_PKEY (H_PTE_PKEY_BIT0 | H_PTE_PKEY_BIT1 | H_PTE_PKEY_BIT2 | \
|
||||
H_PTE_PKEY_BIT3 | H_PTE_PKEY_BIT4)
|
||||
/*
|
||||
* Mask of bits returned by pte_pgprot()
|
||||
*/
|
||||
#define PAGE_PROT_BITS (_PAGE_SAO | _PAGE_NON_IDEMPOTENT | _PAGE_TOLERANT | \
|
||||
H_PAGE_4K_PFN | _PAGE_PRIVILEGED | _PAGE_ACCESSED | \
|
||||
_PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_EXEC | \
|
||||
_PAGE_SOFT_DIRTY | H_PTE_PKEY)
|
||||
/*
|
||||
* We define 2 sets of base prot bits, one for basic pages (ie,
|
||||
* cacheable kernel and user pages) and one for non cacheable
|
||||
|
@ -496,7 +489,6 @@ static inline bool pte_exec(pte_t pte)
|
|||
return !!(pte_raw(pte) & cpu_to_be64(_PAGE_EXEC));
|
||||
}
|
||||
|
||||
static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); }
|
||||
|
||||
#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
|
||||
static inline bool pte_soft_dirty(pte_t pte)
|
||||
|
|
|
@ -73,11 +73,6 @@
|
|||
/* Until my rework is finished, 40x still needs atomic PTE updates */
|
||||
#define PTE_ATOMIC_UPDATES 1
|
||||
|
||||
/* Mask of bits returned by pte_pgprot() */
|
||||
#define PAGE_PROT_BITS (_PAGE_GUARDED | _PAGE_NO_CACHE | \
|
||||
_PAGE_WRITETHRU | _PAGE_USER | _PAGE_ACCESSED | \
|
||||
_PAGE_RW | _PAGE_HWWRITE | _PAGE_DIRTY | _PAGE_EXEC)
|
||||
|
||||
#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED)
|
||||
#define _PAGE_BASE (_PAGE_BASE_NC)
|
||||
|
||||
|
|
|
@ -93,11 +93,6 @@
|
|||
#define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW)
|
||||
#define _PAGE_KERNEL_RWX (_PAGE_DIRTY | _PAGE_RW | _PAGE_EXEC)
|
||||
|
||||
/* Mask of bits returned by pte_pgprot() */
|
||||
#define PAGE_PROT_BITS (_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \
|
||||
_PAGE_WRITETHRU | _PAGE_USER | _PAGE_ACCESSED | \
|
||||
_PAGE_RW | _PAGE_DIRTY | _PAGE_EXEC)
|
||||
|
||||
/* TODO: Add large page lowmem mapping support */
|
||||
#define _PMD_PRESENT 0
|
||||
#define _PMD_PRESENT_MASK (PAGE_MASK)
|
||||
|
|
|
@ -55,11 +55,6 @@
|
|||
#define _PAGE_KERNEL_RW (_PAGE_SH | _PAGE_DIRTY)
|
||||
#define _PAGE_KERNEL_RWX (_PAGE_SH | _PAGE_DIRTY | _PAGE_EXEC)
|
||||
|
||||
/* Mask of bits returned by pte_pgprot() */
|
||||
#define PAGE_PROT_BITS (_PAGE_GUARDED | _PAGE_NO_CACHE | \
|
||||
_PAGE_ACCESSED | _PAGE_RO | _PAGE_NA | \
|
||||
_PAGE_SH | _PAGE_DIRTY | _PAGE_EXEC)
|
||||
|
||||
#define _PMD_PRESENT 0x0001
|
||||
#define _PMD_PRESENT_MASK _PMD_PRESENT
|
||||
#define _PMD_BAD 0x0fd0
|
||||
|
|
|
@ -39,11 +39,6 @@
|
|||
/* No page size encoding in the linux PTE */
|
||||
#define _PAGE_PSIZE 0
|
||||
|
||||
/* Mask of bits returned by pte_pgprot() */
|
||||
#define PAGE_PROT_BITS (_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \
|
||||
_PAGE_WRITETHRU | _PAGE_USER | _PAGE_ACCESSED | \
|
||||
_PAGE_RW | _PAGE_DIRTY | _PAGE_EXEC)
|
||||
|
||||
#define _PMD_PRESENT 0
|
||||
#define _PMD_PRESENT_MASK (PAGE_MASK)
|
||||
#define _PMD_BAD (~PAGE_MASK)
|
||||
|
|
|
@ -52,7 +52,6 @@ static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK)
|
|||
static inline bool pte_hashpte(pte_t pte) { return false; }
|
||||
static inline bool pte_ci(pte_t pte) { return pte_val(pte) & _PAGE_NO_CACHE; }
|
||||
static inline bool pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; }
|
||||
static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); }
|
||||
|
||||
#ifdef CONFIG_NUMA_BALANCING
|
||||
/*
|
||||
|
|
|
@ -82,11 +82,6 @@
|
|||
#define _PTE_NONE_MASK 0
|
||||
#endif
|
||||
|
||||
/* Mask of bits returned by pte_pgprot() */
|
||||
#define PAGE_PROT_BITS (_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \
|
||||
_PAGE_WRITETHRU | _PAGE_USER | _PAGE_ACCESSED | \
|
||||
_PAGE_PRIVILEGED | _PAGE_RW | _PAGE_DIRTY | _PAGE_EXEC)
|
||||
|
||||
/*
|
||||
* We define 2 sets of base prot bits, one for basic pages (ie,
|
||||
* cacheable kernel and user pages) and one for non cacheable
|
||||
|
|
|
@ -46,6 +46,16 @@ struct mm_struct;
|
|||
/* Keep these as a macros to avoid include dependency mess */
|
||||
#define pte_page(x) pfn_to_page(pte_pfn(x))
|
||||
#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
|
||||
/*
|
||||
* Select all bits except the pfn
|
||||
*/
|
||||
static inline pgprot_t pte_pgprot(pte_t pte)
|
||||
{
|
||||
unsigned long pte_flags;
|
||||
|
||||
pte_flags = pte_val(pte) & ~PTE_RPN_MASK;
|
||||
return __pgprot(pte_flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* ZERO_PAGE is a global shared page that is always zero: used
|
||||
|
|
Loading…
Reference in New Issue