[metal] Allow munmap() to reclaim dynamically allocated pages

This commit is contained in:
tkchia 2022-11-12 23:14:43 +00:00
parent dfd7f9aa1b
commit de295966b1
5 changed files with 92 additions and 22 deletions

View file

@ -21,13 +21,18 @@
noasan int sys_munmap_metal(void *addr, size_t size) {
size_t i;
uint64_t *e;
uint64_t *e, paddr;
struct mman *mm;
uint64_t *pml4t = __get_pml4t();
mm = (struct mman *)(BANE + 0x0500);
for (i = 0; i < size; i += 4096) {
e = __get_virtual(mm, __get_pml4t(), (uint64_t)addr + i, false);
if (e) *e = ~(PAGE_V | PAGE_RSRV);
invlpg((uint64_t)addr + i);
e = __get_virtual(mm, pml4t, (uint64_t)addr + i, false);
if (e) {
paddr = *e & PAGE_TA;
*e &= ~(PAGE_V | PAGE_RSRV);
invlpg((uint64_t)addr + i);
__unref_page(mm, pml4t, paddr);
}
}
return 0;
}

View file

@ -44,7 +44,7 @@ noasan struct DirectMap sys_mmap_metal(void *vaddr, size_t size, int prot,
size_t i;
struct mman *mm;
struct DirectMap res;
uint64_t addr, faddr = 0, page, *pte, *fdpte, *pml4t;
uint64_t addr, faddr = 0, page, e, *pte, *fdpte, *pml4t;
mm = (struct mman *)(BANE + 0x0500);
pml4t = __get_pml4t();
size = ROUNDUP(size, 4096);
@ -78,27 +78,27 @@ noasan struct DirectMap sys_mmap_metal(void *vaddr, size_t size, int prot,
sys_mmap_metal_break = MAX(addr + size, sys_mmap_metal_break);
}
for (i = 0; i < size; i += 4096) {
page = __new_page(mm);
pte = __get_virtual(mm, pml4t, addr + i, true);
if (pte) {
if ((flags & MAP_ANONYMOUS_linux)) {
page = __new_page(mm);
if (!page) return bad_mmap();
__clear_page(BANE + page);
page |= PAGE_RSRV | PAGE_U;
e = page | PAGE_RSRV | PAGE_U;
if ((prot & PROT_WRITE))
page |= PAGE_V | PAGE_RW;
e |= PAGE_V | PAGE_RW;
else if ((prot & (PROT_READ | PROT_EXEC)))
page |= PAGE_V;
if (!(prot & PROT_EXEC)) page |= PAGE_XD;
e |= PAGE_V;
if (!(prot & PROT_EXEC)) e |= PAGE_XD;
} else {
fdpte = __get_virtual(mm, pml4t, faddr + i, false);
page = *fdpte;
page |= PAGE_RSRV | PAGE_U;
if (!(prot & PROT_WRITE)) page &= ~PAGE_RW;
if (!(prot & PROT_EXEC)) page |= PAGE_XD;
e = *fdpte | PAGE_RSRV | PAGE_U;
page = e & PAGE_TA;
if (!(prot & PROT_WRITE)) e &= ~PAGE_RW;
if (!(prot & PROT_EXEC)) e |= PAGE_XD;
}
*pte = page;
__ref_page(mm, pml4t, page);
*pte = e;
invlpg(addr + i);
} else {
addr = -1;

View file

@ -133,10 +133,11 @@ static noasan textreal void __normalize_e820(struct mman *mm, uint64_t top) {
/**
* Identity maps an area of physical memory to its negative address.
*/
noasan textreal void __invert_memory_area(struct mman *mm, uint64_t *pml4t,
uint64_t ps, uint64_t size,
uint64_t pte_flags) {
uint64_t pe = ps + size, p, *m;
noasan textreal uint64_t *__invert_memory_area(struct mman *mm,
uint64_t *pml4t,
uint64_t ps, uint64_t size,
uint64_t pte_flags) {
uint64_t pe = ps + size, p, *m = NULL;
ps = ROUNDDOWN(ps, 4096);
pe = ROUNDUP(pe, 4096);
for (p = ps; p != pe; p += 4096) {
@ -145,6 +146,62 @@ noasan textreal void __invert_memory_area(struct mman *mm, uint64_t *pml4t,
*m = p | PAGE_V | PAGE_RSRV | pte_flags;
}
}
return m;
}
/**
* Increments the reference count for a page of physical memory.
*/
noasan void __ref_page(struct mman *mm, uint64_t *pml4t, uint64_t p) {
uint64_t *m, e;
m = __invert_memory_area(mm, pml4t, p, 4096, PAGE_RW | PAGE_XD);
if (m) {
e = *m;
if ((e & PAGE_REFC) != PAGE_REFC) {
e += PAGE_1REF;
*m = e;
}
}
}
/**
* Increments the reference counts for an area of physical memory.
*/
noasan void __ref_pages(struct mman *mm, uint64_t *pml4t, uint64_t ps,
uint64_t size) {
uint64_t p = ROUNDDOWN(ps, 4096), e = ROUNDUP(ps + size, 4096);
while (p != e) {
__ref_page(mm, pml4t, p);
p += 4096;
}
}
/**
* Reclaims a page of physical memory for later use.
*/
static noasan void __reclaim_page(struct mman *mm, uint64_t p) {
struct ReclaimedPage *rp = (struct ReclaimedPage *)(BANE + p);
_unassert(p == (p & PAGE_TA));
rp->next = mm->frp;
mm->frp = p;
}
/**
* Decrements the reference count for a page of physical memory. Frees the
* page if there are no virtual addresses (excluding the negative space)
* referring to it.
*/
noasan void __unref_page(struct mman *mm, uint64_t *pml4t, uint64_t p) {
uint64_t *m, e;
m = __invert_memory_area(mm, pml4t, p, 4096, PAGE_RW | PAGE_XD);
if (m) {
e = *m;
if ((e & PAGE_REFC) != PAGE_REFC) {
e -= PAGE_1REF;
*m = e;
if ((e & PAGE_REFC) == 0) __reclaim_page(mm, p);
}
}
}
/**
@ -224,6 +281,7 @@ noasan textreal void __map_phdrs(struct mman *mm, uint64_t *pml4t, uint64_t b,
v = __clear_page(BANE + __new_page(mm));
}
*__get_virtual(mm, pml4t, p->p_vaddr + i, true) = (v & PAGE_TA) | f;
__ref_page(mm, pml4t, v & PAGE_TA);
}
}
}
@ -231,7 +289,7 @@ noasan textreal void __map_phdrs(struct mman *mm, uint64_t *pml4t, uint64_t b,
}
/**
* Reclaim memory pages which were used at boot time but which can now be
* Reclaims memory pages which were used at boot time but which can now be
* made available for the application.
*/
noasan textreal void __reclaim_boot_pages(struct mman *mm, uint64_t skip_start,

View file

@ -167,6 +167,9 @@
#define PAGE_GROD /* blinkenlights MAP_GROWSDOWN */ 0b010000000000
#define PAGE_TA 0x00007ffffffff000
#define PAGE_PA2 0x00007fffffe00000
#define PAGE_IGN2 0x07f0000000000000
#define PAGE_REFC PAGE_IGN2 /* libc reference counting */
#define PAGE_1REF 0x0010000000000000 /* libc reference counting */
#define PAGE_XD 0x8000000000000000
#if !(__ASSEMBLER__ + __LINKER__ + 0)
@ -186,10 +189,13 @@ struct IdtDescriptor {
uint64_t *__get_virtual(struct mman *, uint64_t *, int64_t, bool);
uint64_t __clear_page(uint64_t);
uint64_t __new_page(struct mman *);
void __invert_memory_area(struct mman *, uint64_t *, uint64_t, uint64_t,
uint64_t);
uint64_t * __invert_memory_area(struct mman *, uint64_t *, uint64_t, uint64_t,
uint64_t);
void __map_phdrs(struct mman *, uint64_t *, uint64_t, uint64_t);
void __reclaim_boot_pages(struct mman *, uint64_t, uint64_t);
void __ref_page(struct mman *, uint64_t *, uint64_t);
void __ref_pages(struct mman *, uint64_t *, uint64_t, uint64_t);
void __unref_page(struct mman *, uint64_t *, uint64_t);
forceinline unsigned char inb(unsigned short port) {
unsigned char al;

View file

@ -53,6 +53,7 @@ void _vga_reinit(struct Tty *tty, unsigned short starty, unsigned short startx,
/* Make sure the video buffer is mapped into virtual memory. */
__invert_memory_area(mm, __get_pml4t(), vid_buf_phy, vid_buf_sz,
PAGE_RW | PAGE_XD);
__ref_pages(mm, __get_pml4t(), vid_buf_phy, vid_buf_sz);
/*
* Initialize our tty structure from the current screen geometry, screen
* contents, cursor position, & character dimensions.