mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-29 23:53:32 +00:00
b507808ebc
Patch series "mm: In-kernel support for memory-deny-write-execute (MDWE)", v2. The background to this is that systemd has a configuration option called MemoryDenyWriteExecute [2], implemented as a SECCOMP BPF filter. Its aim is to prevent a user task from inadvertently creating an executable mapping that is (or was) writeable. Since such BPF filter is stateless, it cannot detect mappings that were previously writeable but subsequently changed to read-only. Therefore the filter simply rejects any mprotect(PROT_EXEC). The side-effect is that on arm64 with BTI support (Branch Target Identification), the dynamic loader cannot change an ELF section from PROT_EXEC to PROT_EXEC|PROT_BTI using mprotect(). For libraries, it can resort to unmapping and re-mapping but for the main executable it does not have a file descriptor. The original bug report in the Red Hat bugzilla - [3] - and subsequent glibc workaround for libraries - [4]. This series adds in-kernel support for this feature as a prctl PR_SET_MDWE, that is inherited on fork(). The prctl denies PROT_WRITE | PROT_EXEC mappings. Like the systemd BPF filter it also denies adding PROT_EXEC to mappings. However unlike the BPF filter it only denies it if the mapping didn't previous have PROT_EXEC. This allows to PROT_EXEC -> PROT_EXEC | PROT_BTI with mprotect(), which is a problem with the BPF filter. This patch (of 2): The aim of such policy is to prevent a user task from creating an executable mapping that is also writeable. An example of mmap() returning -EACCESS if the policy is enabled: mmap(0, size, PROT_READ | PROT_WRITE | PROT_EXEC, flags, 0, 0); Similarly, mprotect() would return -EACCESS below: addr = mmap(0, size, PROT_READ | PROT_EXEC, flags, 0, 0); mprotect(addr, size, PROT_READ | PROT_WRITE | PROT_EXEC); The BPF filter that systemd MDWE uses is stateless, and disallows mprotect() with PROT_EXEC completely. This new prctl allows PROT_EXEC to be enabled if it was already PROT_EXEC, which allows the following case: addr = mmap(0, size, PROT_READ | PROT_EXEC, flags, 0, 0); mprotect(addr, size, PROT_READ | PROT_EXEC | PROT_BTI); where PROT_BTI enables branch tracking identification on arm64. Link: https://lkml.kernel.org/r/20230119160344.54358-1-joey.gouly@arm.com Link: https://lkml.kernel.org/r/20230119160344.54358-2-joey.gouly@arm.com Signed-off-by: Joey Gouly <joey.gouly@arm.com> Co-developed-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Cc: Alexander Viro <viro@zeniv.linux.org.uk> Cc: Jeremy Linton <jeremy.linton@arm.com> Cc: Kees Cook <keescook@chromium.org> Cc: Lennart Poettering <lennart@poettering.net> Cc: Mark Brown <broonie@kernel.org> Cc: nd <nd@arm.com> Cc: Shuah Khan <shuah@kernel.org> Cc: Szabolcs Nagy <szabolcs.nagy@arm.com> Cc: Topi Miettinen <toiwoton@gmail.com> Cc: Zbigniew Jędrzejewski-Szmek <zbyszek@in.waw.pl> Cc: David Hildenbrand <david@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
193 lines
4.5 KiB
C
193 lines
4.5 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _LINUX_MMAN_H
|
|
#define _LINUX_MMAN_H
|
|
|
|
#include <linux/mm.h>
|
|
#include <linux/percpu_counter.h>
|
|
|
|
#include <linux/atomic.h>
|
|
#include <uapi/linux/mman.h>
|
|
|
|
/*
|
|
* Arrange for legacy / undefined architecture specific flags to be
|
|
* ignored by mmap handling code.
|
|
*/
|
|
#ifndef MAP_32BIT
|
|
#define MAP_32BIT 0
|
|
#endif
|
|
#ifndef MAP_HUGE_2MB
|
|
#define MAP_HUGE_2MB 0
|
|
#endif
|
|
#ifndef MAP_HUGE_1GB
|
|
#define MAP_HUGE_1GB 0
|
|
#endif
|
|
#ifndef MAP_UNINITIALIZED
|
|
#define MAP_UNINITIALIZED 0
|
|
#endif
|
|
#ifndef MAP_SYNC
|
|
#define MAP_SYNC 0
|
|
#endif
|
|
|
|
/*
|
|
* The historical set of flags that all mmap implementations implicitly
|
|
* support when a ->mmap_validate() op is not provided in file_operations.
|
|
*
|
|
* MAP_EXECUTABLE and MAP_DENYWRITE are completely ignored throughout the
|
|
* kernel.
|
|
*/
|
|
#define LEGACY_MAP_MASK (MAP_SHARED \
|
|
| MAP_PRIVATE \
|
|
| MAP_FIXED \
|
|
| MAP_ANONYMOUS \
|
|
| MAP_DENYWRITE \
|
|
| MAP_EXECUTABLE \
|
|
| MAP_UNINITIALIZED \
|
|
| MAP_GROWSDOWN \
|
|
| MAP_LOCKED \
|
|
| MAP_NORESERVE \
|
|
| MAP_POPULATE \
|
|
| MAP_NONBLOCK \
|
|
| MAP_STACK \
|
|
| MAP_HUGETLB \
|
|
| MAP_32BIT \
|
|
| MAP_HUGE_2MB \
|
|
| MAP_HUGE_1GB)
|
|
|
|
extern int sysctl_overcommit_memory;
|
|
extern int sysctl_overcommit_ratio;
|
|
extern unsigned long sysctl_overcommit_kbytes;
|
|
extern struct percpu_counter vm_committed_as;
|
|
|
|
#ifdef CONFIG_SMP
|
|
extern s32 vm_committed_as_batch;
|
|
extern void mm_compute_batch(int overcommit_policy);
|
|
#else
|
|
#define vm_committed_as_batch 0
|
|
static inline void mm_compute_batch(int overcommit_policy)
|
|
{
|
|
}
|
|
#endif
|
|
|
|
unsigned long vm_memory_committed(void);
|
|
|
|
static inline void vm_acct_memory(long pages)
|
|
{
|
|
percpu_counter_add_batch(&vm_committed_as, pages, vm_committed_as_batch);
|
|
}
|
|
|
|
static inline void vm_unacct_memory(long pages)
|
|
{
|
|
vm_acct_memory(-pages);
|
|
}
|
|
|
|
/*
|
|
* Allow architectures to handle additional protection and flag bits. The
|
|
* overriding macros must be defined in the arch-specific asm/mman.h file.
|
|
*/
|
|
|
|
#ifndef arch_calc_vm_prot_bits
|
|
#define arch_calc_vm_prot_bits(prot, pkey) 0
|
|
#endif
|
|
|
|
#ifndef arch_calc_vm_flag_bits
|
|
#define arch_calc_vm_flag_bits(flags) 0
|
|
#endif
|
|
|
|
#ifndef arch_validate_prot
|
|
/*
|
|
* This is called from mprotect(). PROT_GROWSDOWN and PROT_GROWSUP have
|
|
* already been masked out.
|
|
*
|
|
* Returns true if the prot flags are valid
|
|
*/
|
|
static inline bool arch_validate_prot(unsigned long prot, unsigned long addr)
|
|
{
|
|
return (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM)) == 0;
|
|
}
|
|
#define arch_validate_prot arch_validate_prot
|
|
#endif
|
|
|
|
#ifndef arch_validate_flags
|
|
/*
|
|
* This is called from mmap() and mprotect() with the updated vma->vm_flags.
|
|
*
|
|
* Returns true if the VM_* flags are valid.
|
|
*/
|
|
static inline bool arch_validate_flags(unsigned long flags)
|
|
{
|
|
return true;
|
|
}
|
|
#define arch_validate_flags arch_validate_flags
|
|
#endif
|
|
|
|
/*
|
|
* Optimisation macro. It is equivalent to:
|
|
* (x & bit1) ? bit2 : 0
|
|
* but this version is faster.
|
|
* ("bit1" and "bit2" must be single bits)
|
|
*/
|
|
#define _calc_vm_trans(x, bit1, bit2) \
|
|
((!(bit1) || !(bit2)) ? 0 : \
|
|
((bit1) <= (bit2) ? ((x) & (bit1)) * ((bit2) / (bit1)) \
|
|
: ((x) & (bit1)) / ((bit1) / (bit2))))
|
|
|
|
/*
|
|
* Combine the mmap "prot" argument into "vm_flags" used internally.
|
|
*/
|
|
static inline unsigned long
|
|
calc_vm_prot_bits(unsigned long prot, unsigned long pkey)
|
|
{
|
|
return _calc_vm_trans(prot, PROT_READ, VM_READ ) |
|
|
_calc_vm_trans(prot, PROT_WRITE, VM_WRITE) |
|
|
_calc_vm_trans(prot, PROT_EXEC, VM_EXEC) |
|
|
arch_calc_vm_prot_bits(prot, pkey);
|
|
}
|
|
|
|
/*
|
|
* Combine the mmap "flags" argument into "vm_flags" used internally.
|
|
*/
|
|
static inline unsigned long
|
|
calc_vm_flag_bits(unsigned long flags)
|
|
{
|
|
return _calc_vm_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN ) |
|
|
_calc_vm_trans(flags, MAP_LOCKED, VM_LOCKED ) |
|
|
_calc_vm_trans(flags, MAP_SYNC, VM_SYNC ) |
|
|
arch_calc_vm_flag_bits(flags);
|
|
}
|
|
|
|
unsigned long vm_commit_limit(void);
|
|
|
|
/*
|
|
* Denies creating a writable executable mapping or gaining executable permissions.
|
|
*
|
|
* This denies the following:
|
|
*
|
|
* a) mmap(PROT_WRITE | PROT_EXEC)
|
|
*
|
|
* b) mmap(PROT_WRITE)
|
|
* mprotect(PROT_EXEC)
|
|
*
|
|
* c) mmap(PROT_WRITE)
|
|
* mprotect(PROT_READ)
|
|
* mprotect(PROT_EXEC)
|
|
*
|
|
* But allows the following:
|
|
*
|
|
* d) mmap(PROT_READ | PROT_EXEC)
|
|
* mmap(PROT_READ | PROT_EXEC | PROT_BTI)
|
|
*/
|
|
static inline bool map_deny_write_exec(struct vm_area_struct *vma, unsigned long vm_flags)
|
|
{
|
|
if (!test_bit(MMF_HAS_MDWE, ¤t->mm->flags))
|
|
return false;
|
|
|
|
if ((vm_flags & VM_EXEC) && (vm_flags & VM_WRITE))
|
|
return true;
|
|
|
|
if (!(vma->vm_flags & VM_EXEC) && (vm_flags & VM_EXEC))
|
|
return true;
|
|
|
|
return false;
|
|
}
|
|
|
|
#endif /* _LINUX_MMAN_H */
|