mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-31 08:28:13 +00:00
c1821c2e97
This provides a noexec protection on s390 hardware. Our hardware does not have any bits left in the pte for a hw noexec bit, so this is a different approach using shadow page tables and a special addressing mode that allows separate address spaces for code and data. As a special feature of our "secondary-space" addressing mode, separate page tables can be specified for the translation of data addresses (storage operands) and instruction addresses. The shadow page table is used for the instruction addresses and the standard page table for the data addresses. The shadow page table is linked to the standard page table by a pointer in page->lru.next of the struct page corresponding to the page that contains the standard page table (since page->private is not really private with the pte_lock and the page table pages are not in the LRU list). Depending on the software bits of a pte, it is either inserted into both page tables or just into the standard (data) page table. Pages of a vma that does not have the VM_EXEC bit set get mapped only in the data address space. Any try to execute code on such a page will cause a page translation exception. The standard reaction to this is a SIGSEGV with two exceptions: the two system call opcodes 0x0a77 (sys_sigreturn) and 0x0aad (sys_rt_sigreturn) are allowed. They are stored by the kernel to the signal stack frame. Unfortunately, the signal return mechanism cannot be modified to use an SA_RESTORER because the exception unwinding code depends on the system call opcode stored behind the signal stack frame. This feature requires that user space is executed in secondary-space mode and the kernel in home-space mode, which means that the addressing modes need to be switched and that the noexec protection only works for user space. After switching the addressing modes, we cannot use the mvcp/mvcs instructions anymore to copy between kernel and user space. A new mvcos instruction has been added to the z9 EC/BC hardware which allows to copy between arbitrary address spaces, but on older hardware the page tables need to be walked manually. Signed-off-by: Gerald Schaefer <geraldsc@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
239 lines
5.7 KiB
C
239 lines
5.7 KiB
C
/*
|
|
* include/asm-s390/pgalloc.h
|
|
*
|
|
* S390 version
|
|
* Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
|
|
* Author(s): Hartmut Penner (hp@de.ibm.com)
|
|
* Martin Schwidefsky (schwidefsky@de.ibm.com)
|
|
*
|
|
* Derived from "include/asm-i386/pgalloc.h"
|
|
* Copyright (C) 1994 Linus Torvalds
|
|
*/
|
|
|
|
#ifndef _S390_PGALLOC_H
|
|
#define _S390_PGALLOC_H
|
|
|
|
#include <linux/threads.h>
|
|
#include <linux/gfp.h>
|
|
#include <linux/mm.h>
|
|
|
|
#define check_pgt_cache() do {} while (0)
|
|
|
|
extern void diag10(unsigned long addr);
|
|
|
|
/*
|
|
* Page allocation orders.
|
|
*/
|
|
#ifndef __s390x__
|
|
# define PTE_ALLOC_ORDER 0
|
|
# define PMD_ALLOC_ORDER 0
|
|
# define PGD_ALLOC_ORDER 1
|
|
#else /* __s390x__ */
|
|
# define PTE_ALLOC_ORDER 0
|
|
# define PMD_ALLOC_ORDER 2
|
|
# define PGD_ALLOC_ORDER 2
|
|
#endif /* __s390x__ */
|
|
|
|
/*
|
|
* Allocate and free page tables. The xxx_kernel() versions are
|
|
* used to allocate a kernel page table - this turns on ASN bits
|
|
* if any.
|
|
*/
|
|
|
|
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
|
|
{
|
|
pgd_t *pgd = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ALLOC_ORDER);
|
|
int i;
|
|
|
|
if (!pgd)
|
|
return NULL;
|
|
if (s390_noexec) {
|
|
pgd_t *shadow_pgd = (pgd_t *)
|
|
__get_free_pages(GFP_KERNEL, PGD_ALLOC_ORDER);
|
|
struct page *page = virt_to_page(pgd);
|
|
|
|
if (!shadow_pgd) {
|
|
free_pages((unsigned long) pgd, PGD_ALLOC_ORDER);
|
|
return NULL;
|
|
}
|
|
page->lru.next = (void *) shadow_pgd;
|
|
}
|
|
for (i = 0; i < PTRS_PER_PGD; i++)
|
|
#ifndef __s390x__
|
|
pmd_clear(pmd_offset(pgd + i, i*PGDIR_SIZE));
|
|
#else
|
|
pgd_clear(pgd + i);
|
|
#endif
|
|
return pgd;
|
|
}
|
|
|
|
static inline void pgd_free(pgd_t *pgd)
|
|
{
|
|
pgd_t *shadow_pgd = get_shadow_pgd(pgd);
|
|
|
|
if (shadow_pgd)
|
|
free_pages((unsigned long) shadow_pgd, PGD_ALLOC_ORDER);
|
|
free_pages((unsigned long) pgd, PGD_ALLOC_ORDER);
|
|
}
|
|
|
|
#ifndef __s390x__
|
|
/*
|
|
* page middle directory allocation/free routines.
|
|
* We use pmd cache only on s390x, so these are dummy routines. This
|
|
* code never triggers because the pgd will always be present.
|
|
*/
|
|
#define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); })
|
|
#define pmd_free(x) do { } while (0)
|
|
#define __pmd_free_tlb(tlb,x) do { } while (0)
|
|
#define pgd_populate(mm, pmd, pte) BUG()
|
|
#define pgd_populate_kernel(mm, pmd, pte) BUG()
|
|
#else /* __s390x__ */
|
|
static inline pmd_t * pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
|
|
{
|
|
pmd_t *pmd = (pmd_t *) __get_free_pages(GFP_KERNEL, PMD_ALLOC_ORDER);
|
|
int i;
|
|
|
|
if (!pmd)
|
|
return NULL;
|
|
if (s390_noexec) {
|
|
pmd_t *shadow_pmd = (pmd_t *)
|
|
__get_free_pages(GFP_KERNEL, PMD_ALLOC_ORDER);
|
|
struct page *page = virt_to_page(pmd);
|
|
|
|
if (!shadow_pmd) {
|
|
free_pages((unsigned long) pmd, PMD_ALLOC_ORDER);
|
|
return NULL;
|
|
}
|
|
page->lru.next = (void *) shadow_pmd;
|
|
}
|
|
for (i=0; i < PTRS_PER_PMD; i++)
|
|
pmd_clear(pmd + i);
|
|
return pmd;
|
|
}
|
|
|
|
static inline void pmd_free (pmd_t *pmd)
|
|
{
|
|
pmd_t *shadow_pmd = get_shadow_pmd(pmd);
|
|
|
|
if (shadow_pmd)
|
|
free_pages((unsigned long) shadow_pmd, PMD_ALLOC_ORDER);
|
|
free_pages((unsigned long) pmd, PMD_ALLOC_ORDER);
|
|
}
|
|
|
|
#define __pmd_free_tlb(tlb,pmd) \
|
|
do { \
|
|
tlb_flush_mmu(tlb, 0, 0); \
|
|
pmd_free(pmd); \
|
|
} while (0)
|
|
|
|
static inline void
|
|
pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
|
|
{
|
|
pgd_val(*pgd) = _PGD_ENTRY | __pa(pmd);
|
|
}
|
|
|
|
static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
|
|
{
|
|
pgd_t *shadow_pgd = get_shadow_pgd(pgd);
|
|
pmd_t *shadow_pmd = get_shadow_pmd(pmd);
|
|
|
|
if (shadow_pgd && shadow_pmd)
|
|
pgd_populate_kernel(mm, shadow_pgd, shadow_pmd);
|
|
pgd_populate_kernel(mm, pgd, pmd);
|
|
}
|
|
|
|
#endif /* __s390x__ */
|
|
|
|
static inline void
|
|
pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
|
|
{
|
|
#ifndef __s390x__
|
|
pmd_val(pmd[0]) = _PAGE_TABLE + __pa(pte);
|
|
pmd_val(pmd[1]) = _PAGE_TABLE + __pa(pte+256);
|
|
pmd_val(pmd[2]) = _PAGE_TABLE + __pa(pte+512);
|
|
pmd_val(pmd[3]) = _PAGE_TABLE + __pa(pte+768);
|
|
#else /* __s390x__ */
|
|
pmd_val(*pmd) = _PMD_ENTRY + __pa(pte);
|
|
pmd_val1(*pmd) = _PMD_ENTRY + __pa(pte+256);
|
|
#endif /* __s390x__ */
|
|
}
|
|
|
|
static inline void
|
|
pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page)
|
|
{
|
|
pte_t *pte = (pte_t *)page_to_phys(page);
|
|
pmd_t *shadow_pmd = get_shadow_pmd(pmd);
|
|
pte_t *shadow_pte = get_shadow_pte(pte);
|
|
|
|
pmd_populate_kernel(mm, pmd, pte);
|
|
if (shadow_pmd && shadow_pte)
|
|
pmd_populate_kernel(mm, shadow_pmd, shadow_pte);
|
|
}
|
|
|
|
/*
|
|
* page table entry allocation/free routines.
|
|
*/
|
|
static inline pte_t *
|
|
pte_alloc_one_kernel(struct mm_struct *mm, unsigned long vmaddr)
|
|
{
|
|
pte_t *pte = (pte_t *) __get_free_page(GFP_KERNEL|__GFP_REPEAT);
|
|
int i;
|
|
|
|
if (!pte)
|
|
return NULL;
|
|
if (s390_noexec) {
|
|
pte_t *shadow_pte = (pte_t *)
|
|
__get_free_page(GFP_KERNEL|__GFP_REPEAT);
|
|
struct page *page = virt_to_page(pte);
|
|
|
|
if (!shadow_pte) {
|
|
free_page((unsigned long) pte);
|
|
return NULL;
|
|
}
|
|
page->lru.next = (void *) shadow_pte;
|
|
}
|
|
for (i=0; i < PTRS_PER_PTE; i++) {
|
|
pte_clear(mm, vmaddr, pte + i);
|
|
vmaddr += PAGE_SIZE;
|
|
}
|
|
return pte;
|
|
}
|
|
|
|
static inline struct page *
|
|
pte_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
|
|
{
|
|
pte_t *pte = pte_alloc_one_kernel(mm, vmaddr);
|
|
if (pte)
|
|
return virt_to_page(pte);
|
|
return NULL;
|
|
}
|
|
|
|
static inline void pte_free_kernel(pte_t *pte)
|
|
{
|
|
pte_t *shadow_pte = get_shadow_pte(pte);
|
|
|
|
if (shadow_pte)
|
|
free_page((unsigned long) shadow_pte);
|
|
free_page((unsigned long) pte);
|
|
}
|
|
|
|
static inline void pte_free(struct page *pte)
|
|
{
|
|
struct page *shadow_page = get_shadow_page(pte);
|
|
|
|
if (shadow_page)
|
|
__free_page(shadow_page);
|
|
__free_page(pte);
|
|
}
|
|
|
|
#define __pte_free_tlb(tlb, pte) \
|
|
({ \
|
|
struct mmu_gather *__tlb = (tlb); \
|
|
struct page *__pte = (pte); \
|
|
struct page *shadow_page = get_shadow_page(__pte); \
|
|
if (shadow_page) \
|
|
tlb_remove_page(__tlb, shadow_page); \
|
|
tlb_remove_page(__tlb, __pte); \
|
|
})
|
|
|
|
#endif /* _S390_PGALLOC_H */
|