mirror of
https://github.com/jart/cosmopolitan.git
synced 2025-04-22 06:44:42 +00:00
[metal] Allow more fine-grained control over page permissions (#663)
- use PAGE_RSRV bit (originally only for blinkenlights), rather than PAGE_V bit, to indicate that a virtual address page has been reserved — this should allow a program to create & reserve inaccessible "guard pages" - mark page table entries for non-code pages with PAGE_XD bit, which should be supported on (circa) post-2004 x86-64 CPUs
This commit is contained in:
parent
0f89140882
commit
d38700687a
5 changed files with 25 additions and 14 deletions
|
@ -1285,8 +1285,8 @@ lcheck: pushf # check for i8086 / i8088 / i80186
|
||||||
jl 10f
|
jl 10f
|
||||||
mov %edi,%eax
|
mov %edi,%eax
|
||||||
cpuid
|
cpuid
|
||||||
mov $1<<29,%edi # need nexgen32e long mode support
|
mov $1<<29|1<<20,%edi # need nexgen32e long mode support
|
||||||
and %edi,%edx
|
and %edi,%edx # & nx support
|
||||||
cmp %edi,%edx
|
cmp %edi,%edx
|
||||||
jne 10f
|
jne 10f
|
||||||
xor %ax,%ax
|
xor %ax,%ax
|
||||||
|
@ -1415,7 +1415,7 @@ pinit: push %ds
|
||||||
movl $0x79000+PAGE_V+PAGE_RW,0x7b000-SEG # PD←PDT (+)
|
movl $0x79000+PAGE_V+PAGE_RW,0x7b000-SEG # PD←PDT (+)
|
||||||
movl $0x79000+PAGE_V+PAGE_RW,0x7a000-SEG # PD←PDT (-)
|
movl $0x79000+PAGE_V+PAGE_RW,0x7a000-SEG # PD←PDT (-)
|
||||||
mov $512,%cx # PD±2MB
|
mov $512,%cx # PD±2MB
|
||||||
mov $PAGE_V+PAGE_RW,%eax
|
mov $PAGE_V+PAGE_RSRV+PAGE_RW,%eax
|
||||||
xor %di,%di
|
xor %di,%di
|
||||||
0: stosl
|
0: stosl
|
||||||
add $0x1000,%eax
|
add $0x1000,%eax
|
||||||
|
@ -1438,7 +1438,7 @@ golong: cli
|
||||||
mov %eax,%cr4
|
mov %eax,%cr4
|
||||||
movl $EFER,%ecx
|
movl $EFER,%ecx
|
||||||
rdmsr
|
rdmsr
|
||||||
or $EFER_LME|EFER_SCE,%eax
|
or $EFER_LME|EFER_SCE|EFER_NXE,%eax
|
||||||
wrmsr
|
wrmsr
|
||||||
lgdt REAL(_gdtrphy)
|
lgdt REAL(_gdtrphy)
|
||||||
mov %cr0,%eax
|
mov %cr0,%eax
|
||||||
|
|
|
@ -43,7 +43,7 @@ noasan struct DirectMap sys_mmap_metal(void *paddr, size_t size, int prot,
|
||||||
addr = 4096;
|
addr = 4096;
|
||||||
for (i = 0; i < size; i += 4096) {
|
for (i = 0; i < size; i += 4096) {
|
||||||
pte = __get_virtual(mm, pml4t, addr + i, false);
|
pte = __get_virtual(mm, pml4t, addr + i, false);
|
||||||
if (pte && (*pte & PAGE_V)) {
|
if (pte && (*pte & (PAGE_V | PAGE_RSRV))) {
|
||||||
addr = MAX(addr, sys_mmap_metal_break) + i + 4096;
|
addr = MAX(addr, sys_mmap_metal_break) + i + 4096;
|
||||||
i = 0;
|
i = 0;
|
||||||
}
|
}
|
||||||
|
@ -55,7 +55,13 @@ noasan struct DirectMap sys_mmap_metal(void *paddr, size_t size, int prot,
|
||||||
pte = __get_virtual(mm, pml4t, addr + i, true);
|
pte = __get_virtual(mm, pml4t, addr + i, true);
|
||||||
if (pte && page) {
|
if (pte && page) {
|
||||||
__clear_page(BANE + page);
|
__clear_page(BANE + page);
|
||||||
*pte = page | ((prot & PROT_WRITE) ? PAGE_RW : 0) | PAGE_U | PAGE_V;
|
page |= PAGE_RSRV | PAGE_U;
|
||||||
|
if ((prot & PROT_WRITE))
|
||||||
|
page |= PAGE_V | PAGE_RW;
|
||||||
|
else if ((prot & (PROT_READ | PROT_EXEC)))
|
||||||
|
page |= PAGE_V;
|
||||||
|
if (!(prot & PROT_EXEC)) page |= PAGE_XD;
|
||||||
|
*pte = page;
|
||||||
} else {
|
} else {
|
||||||
addr = -1;
|
addr = -1;
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -71,7 +71,7 @@ noasan textreal uint64_t *__get_virtual(struct mman *mm, uint64_t *t,
|
||||||
for (h = 39;; h -= 9) {
|
for (h = 39;; h -= 9) {
|
||||||
e = t + ((vaddr >> h) & 511);
|
e = t + ((vaddr >> h) & 511);
|
||||||
if (h == 12) return e;
|
if (h == 12) return e;
|
||||||
if (!(*e & PAGE_V)) {
|
if (!(*e & (PAGE_V | PAGE_RSRV))) {
|
||||||
if (!maketables) return NULL;
|
if (!maketables) return NULL;
|
||||||
if (!(p = __new_page(mm))) return NULL;
|
if (!(p = __new_page(mm))) return NULL;
|
||||||
__clear_page(BANE + p);
|
__clear_page(BANE + p);
|
||||||
|
@ -123,8 +123,8 @@ noasan textreal void __invert_memory_area(struct mman *mm, uint64_t *pml4t,
|
||||||
pe = ROUNDUP(pe, 4096);
|
pe = ROUNDUP(pe, 4096);
|
||||||
for (p = ps; p != pe; p += 4096) {
|
for (p = ps; p != pe; p += 4096) {
|
||||||
m = __get_virtual(mm, pml4t, BANE + p, true);
|
m = __get_virtual(mm, pml4t, BANE + p, true);
|
||||||
if (m && !(*m & PAGE_V)) {
|
if (m && !(*m & (PAGE_V | PAGE_RSRV))) {
|
||||||
*m = p | PAGE_V | pte_flags;
|
*m = p | PAGE_V | PAGE_RSRV | pte_flags;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -139,7 +139,7 @@ static noasan textreal void __invert_memory(struct mman *mm, uint64_t *pml4t) {
|
||||||
/* ape/ape.S has already mapped the first 2 MiB of physical memory. */
|
/* ape/ape.S has already mapped the first 2 MiB of physical memory. */
|
||||||
if (ps < 0x200000 && ps + size <= 0x200000)
|
if (ps < 0x200000 && ps + size <= 0x200000)
|
||||||
continue;
|
continue;
|
||||||
__invert_memory_area(mm, pml4t, ps, size, PAGE_RW);
|
__invert_memory_area(mm, pml4t, ps, size, PAGE_RW | PAGE_XD);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -187,8 +187,12 @@ noasan textreal void __map_phdrs(struct mman *mm, uint64_t *pml4t, uint64_t b) {
|
||||||
for (p = (struct Elf64_Phdr *)REAL(ape_phdrs), m = 0;
|
for (p = (struct Elf64_Phdr *)REAL(ape_phdrs), m = 0;
|
||||||
p < (struct Elf64_Phdr *)REAL(ape_phdrs_end); ++p) {
|
p < (struct Elf64_Phdr *)REAL(ape_phdrs_end); ++p) {
|
||||||
if (p->p_type == PT_LOAD || p->p_type == PT_GNU_STACK) {
|
if (p->p_type == PT_LOAD || p->p_type == PT_GNU_STACK) {
|
||||||
f = PAGE_V | PAGE_U;
|
f = PAGE_RSRV | PAGE_U;
|
||||||
if (p->p_flags & PF_W) f |= PAGE_RW;
|
if (p->p_flags & PF_W)
|
||||||
|
f |= PAGE_V | PAGE_RW;
|
||||||
|
else if (p->p_flags & (PF_R | PF_X))
|
||||||
|
f |= PAGE_V;
|
||||||
|
if (!(p->p_flags & PF_X)) f |= PAGE_XD;
|
||||||
for (i = 0; i < p->p_memsz; i += 4096) {
|
for (i = 0; i < p->p_memsz; i += 4096) {
|
||||||
if (i < p->p_filesz) {
|
if (i < p->p_filesz) {
|
||||||
v = b + p->p_offset + i;
|
v = b + p->p_offset + i;
|
||||||
|
|
|
@ -126,7 +126,7 @@ __msabi noasan EFI_STATUS EfiMain(EFI_HANDLE ImageHandle,
|
||||||
pdpt2 = (uint64_t *)0x7c000;
|
pdpt2 = (uint64_t *)0x7c000;
|
||||||
pml4t = (uint64_t *)0x7e000;
|
pml4t = (uint64_t *)0x7e000;
|
||||||
for (i = 0; i < 512; ++i) {
|
for (i = 0; i < 512; ++i) {
|
||||||
pd[i] = 0x1000 * i + PAGE_V + PAGE_RW;
|
pd[i] = 0x1000 * i + PAGE_V + PAGE_RSRV + PAGE_RW;
|
||||||
}
|
}
|
||||||
pdt1[0] = (intptr_t)pd + PAGE_V + PAGE_RW;
|
pdt1[0] = (intptr_t)pd + PAGE_V + PAGE_RW;
|
||||||
pdt2[0] = (intptr_t)pd + PAGE_V + PAGE_RW;
|
pdt2[0] = (intptr_t)pd + PAGE_V + PAGE_RW;
|
||||||
|
|
|
@ -51,7 +51,8 @@ void _vga_reinit(struct Tty *tty, unsigned short starty, unsigned short startx,
|
||||||
chr_ht = VGA_ASSUME_CHAR_HEIGHT_PX;
|
chr_ht = VGA_ASSUME_CHAR_HEIGHT_PX;
|
||||||
chr_wid = VGA_ASSUME_CHAR_WIDTH_PX;
|
chr_wid = VGA_ASSUME_CHAR_WIDTH_PX;
|
||||||
/* Make sure the video buffer is mapped into virtual memory. */
|
/* Make sure the video buffer is mapped into virtual memory. */
|
||||||
__invert_memory_area(mm, __get_pml4t(), vid_buf_phy, vid_buf_sz, PAGE_RW);
|
__invert_memory_area(mm, __get_pml4t(), vid_buf_phy, vid_buf_sz,
|
||||||
|
PAGE_RW | PAGE_XD);
|
||||||
/*
|
/*
|
||||||
* Initialize our tty structure from the current screen geometry, screen
|
* Initialize our tty structure from the current screen geometry, screen
|
||||||
* contents, cursor position, & character dimensions.
|
* contents, cursor position, & character dimensions.
|
||||||
|
|
Loading…
Add table
Reference in a new issue