mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-31 16:38:12 +00:00
x86/mm/pkeys: Add arch-specific VMA protection bits
Lots of things seem to do: vma->vm_page_prot = vm_get_page_prot(flags); and the ptes get created right from things we pull out of ->vm_page_prot. So it is very convenient if we can store the protection key in flags and vm_page_prot, just like the existing permission bits (_PAGE_RW/PRESENT). It greatly reduces the amount of plumbing and arch-specific hacking we have to do in generic code. This also takes the new PROT_PKEY{0,1,2,3} flags and turns *those* in to VM_ flags for vma->vm_flags. The protection key values are stored in 4 places: 1. "prot" argument to system calls 2. vma->vm_flags, filled from the mmap "prot" 3. vma->vm_page prot, filled from vma->vm_flags 4. the PTE itself. The pseudocode for these for steps are as follows: mmap(PROT_PKEY*) vma->vm_flags = ... | arch_calc_vm_prot_bits(mmap_prot); vma->vm_page_prot = ... | arch_vm_get_page_prot(vma->vm_flags); pte = pfn | vma->vm_page_prot Note that this provides a new definitions for x86: arch_vm_get_page_prot() Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Dave Hansen <dave@sr71.net> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@redhat.com> Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20160212210210.FE483A42@viggo.jf.intel.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
63c17fb8e5
commit
8f62c88322
4 changed files with 44 additions and 2 deletions
|
@ -275,4 +275,15 @@ static inline void arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||
mpx_notify_unmap(mm, vma, start, end);
|
||||
}
|
||||
|
||||
static inline int vma_pkey(struct vm_area_struct *vma)
|
||||
{
|
||||
u16 pkey = 0;
|
||||
#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
|
||||
unsigned long vma_pkey_mask = VM_PKEY_BIT0 | VM_PKEY_BIT1 |
|
||||
VM_PKEY_BIT2 | VM_PKEY_BIT3;
|
||||
pkey = (vma->vm_flags & vma_pkey_mask) >> VM_PKEY_SHIFT;
|
||||
#endif
|
||||
return pkey;
|
||||
}
|
||||
|
||||
#endif /* _ASM_X86_MMU_CONTEXT_H */
|
||||
|
|
|
@ -115,7 +115,12 @@
|
|||
#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | \
|
||||
_PAGE_DIRTY)
|
||||
|
||||
/* Set of bits not changed in pte_modify */
|
||||
/*
|
||||
* Set of bits not changed in pte_modify. The pte's
|
||||
* protection key is treated like _PAGE_RW, for
|
||||
* instance, and is *not* included in this mask since
|
||||
* pte_modify() does modify it.
|
||||
*/
|
||||
#define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
|
||||
_PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY | \
|
||||
_PAGE_SOFT_DIRTY)
|
||||
|
@ -231,7 +236,10 @@ enum page_cache_mode {
|
|||
/* Extracts the PFN from a (pte|pmd|pud|pgd)val_t of a 4KB page */
|
||||
#define PTE_PFN_MASK ((pteval_t)PHYSICAL_PAGE_MASK)
|
||||
|
||||
/* Extracts the flags from a (pte|pmd|pud|pgd)val_t of a 4KB page */
|
||||
/*
|
||||
* Extracts the flags from a (pte|pmd|pud|pgd)val_t
|
||||
* This includes the protection key value.
|
||||
*/
|
||||
#define PTE_FLAGS_MASK (~PTE_PFN_MASK)
|
||||
|
||||
typedef struct pgprot { pgprotval_t pgprot; } pgprot_t;
|
||||
|
|
|
@ -6,6 +6,22 @@
|
|||
#define MAP_HUGE_2MB (21 << MAP_HUGE_SHIFT)
|
||||
#define MAP_HUGE_1GB (30 << MAP_HUGE_SHIFT)
|
||||
|
||||
#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
|
||||
/*
|
||||
* Take the 4 protection key bits out of the vma->vm_flags
|
||||
* value and turn them in to the bits that we can put in
|
||||
* to a pte.
|
||||
*
|
||||
* Only override these if Protection Keys are available
|
||||
* (which is only on 64-bit).
|
||||
*/
|
||||
#define arch_vm_get_page_prot(vm_flags) __pgprot( \
|
||||
((vm_flags) & VM_PKEY_BIT0 ? _PAGE_PKEY_BIT0 : 0) | \
|
||||
((vm_flags) & VM_PKEY_BIT1 ? _PAGE_PKEY_BIT1 : 0) | \
|
||||
((vm_flags) & VM_PKEY_BIT2 ? _PAGE_PKEY_BIT2 : 0) | \
|
||||
((vm_flags) & VM_PKEY_BIT3 ? _PAGE_PKEY_BIT3 : 0))
|
||||
#endif
|
||||
|
||||
#include <asm-generic/mman.h>
|
||||
|
||||
#endif /* _ASM_X86_MMAN_H */
|
||||
|
|
|
@ -183,6 +183,13 @@ extern unsigned int kobjsize(const void *objp);
|
|||
|
||||
#if defined(CONFIG_X86)
|
||||
# define VM_PAT VM_ARCH_1 /* PAT reserves whole VMA at once (x86) */
|
||||
#if defined (CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS)
|
||||
# define VM_PKEY_SHIFT VM_HIGH_ARCH_BIT_0
|
||||
# define VM_PKEY_BIT0 VM_HIGH_ARCH_0 /* A protection key is a 4-bit value */
|
||||
# define VM_PKEY_BIT1 VM_HIGH_ARCH_1
|
||||
# define VM_PKEY_BIT2 VM_HIGH_ARCH_2
|
||||
# define VM_PKEY_BIT3 VM_HIGH_ARCH_3
|
||||
#endif
|
||||
#elif defined(CONFIG_PPC)
|
||||
# define VM_SAO VM_ARCH_1 /* Strong Access Ordering (powerpc) */
|
||||
#elif defined(CONFIG_PARISC)
|
||||
|
|
Loading…
Reference in a new issue