diff --git a/arch/powerpc/include/asm/pgtable-ppc32.h b/arch/powerpc/include/asm/pgtable-ppc32.h index a9c6ecef3656..67ceffc01b43 100644 --- a/arch/powerpc/include/asm/pgtable-ppc32.h +++ b/arch/powerpc/include/asm/pgtable-ppc32.h @@ -146,9 +146,29 @@ extern int icache_44x_need_flush; #define _PAGE_HPTEFLAGS _PAGE_HASHPTE -#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | \ - _PAGE_SPECIAL) +/* Location of the PFN in the PTE. Most platforms use the same as _PAGE_SHIFT + * here (ie, naturally aligned). Platform who don't just pre-define the + * value so we don't override it here + */ +#ifndef PTE_RPN_SHIFT +#define PTE_RPN_SHIFT (PAGE_SHIFT) +#endif +#ifdef CONFIG_PTE_64BIT +#define PTE_RPN_MAX (1ULL << (64 - PTE_RPN_SHIFT)) +#define PTE_RPN_MASK (~((1ULL<> PFN_SHIFT_OFFSET) +#define pte_pfn(x) (pte_val(x) >> PTE_RPN_SHIFT) #define pte_page(x) pfn_to_page(pte_pfn(x)) -#define pfn_pte(pfn, prot) __pte(((pte_basic_t)(pfn) << PFN_SHIFT_OFFSET) |\ +#define pfn_pte(pfn, prot) __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) |\ pgprot_val(prot)) #define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot) #endif /* __ASSEMBLY__ */ diff --git a/arch/powerpc/include/asm/pte-fsl-booke.h b/arch/powerpc/include/asm/pte-fsl-booke.h index 0fe5de7bea3d..10820f58acf5 100644 --- a/arch/powerpc/include/asm/pte-fsl-booke.h +++ b/arch/powerpc/include/asm/pte-fsl-booke.h @@ -36,6 +36,8 @@ #ifdef CONFIG_PTE_64BIT /* ERPN in a PTE never gets cleared, ignore it */ #define _PTE_NONE_MASK 0xffffffffffff0000ULL +/* We extend the size of the PTE flags area when using 64-bit PTEs */ +#define PTE_RPN_SHIFT (PAGE_SHIFT + 8) #endif #define _PMD_PRESENT 0