Files
nub-os/src/arch/x86_64/mem/vmm.c
nub31 46bc977104 ..
2025-09-06 19:45:33 +02:00

222 lines
6.2 KiB
C

#include "vmm.h"
#include "pmm.h"
#include "x86_64/panic.h"
#define PML4_INDEX(addr) (((addr) >> 39) & 0x1FF)
#define PDPT_INDEX(addr) (((addr) >> 30) & 0x1FF)
#define PD_INDEX(addr) (((addr) >> 21) & 0x1FF)
#define PTE_MASK 0x000FFFFFFFFFF000ULL
#define PTE_PRESENT (1ULL << 0)
#define PTE_WRITABLE (1ULL << 1)
#define PTE_USER (1ULL << 2)
#define PTE_PS (1ULL << 7)
#define BITMAP_PAGE_COUNT (ADDRES_SPACE_SIZE / PAGE_SIZE)
#define BITMAP_SIZE (BITMAP_PAGE_COUNT / 8)
static u8 page_bitmap[BITMAP_SIZE];
extern u64 pml4[];
void vmm_init()
{
// Mark first 32 pages (64mb) as unusable since it is reserved by the bootloader
// todo(nub31): This should be revisited. Maybe do a higher half kernel?
for (u64 page = 0; page < 32; page++)
{
if (page < BITMAP_PAGE_COUNT)
{
if (!(page_bitmap[page / 8] & (1 << (page % 8))))
{
page_bitmap[page / 8] |= (1 << (page % 8));
}
}
else
{
panic("Bitmap is not large enough to hold the bootloader reserved address space");
}
}
}
u64 vmm_alloc_address(size_t page_count)
{
size_t total_pages = BITMAP_PAGE_COUNT;
for (size_t start_page = 0; start_page <= total_pages - page_count; start_page++)
{
bool found_block = true;
for (size_t i = 0; i < page_count; i++)
{
size_t page = start_page + i;
size_t byte_index = page / 8;
size_t bit_index = page % 8;
if (page_bitmap[byte_index] & (1 << bit_index))
{
found_block = false;
start_page = page;
break;
}
}
if (found_block)
{
for (size_t i = 0; i < page_count; i++)
{
size_t page = start_page + i;
size_t byte_index = page / 8;
size_t bit_index = page % 8;
page_bitmap[byte_index] |= (1 << bit_index);
}
return start_page * PAGE_SIZE;
}
}
return 0;
}
void vmm_free_address(u64 virtual_address, size_t page_count)
{
u64 start_page = virtual_address / PAGE_SIZE;
if (start_page + page_count > BITMAP_PAGE_COUNT)
{
printf("Virtual address range exceeds bitmap bounds\n");
panic("Failed to free virtual address");
}
for (size_t i = 0; i < page_count; i++)
{
size_t page = start_page + i;
size_t byte_index = page / 8;
size_t bit_index = page % 8;
if (!(page_bitmap[byte_index] & (1 << bit_index)))
{
printf("Virtual address 0x%x (page %u) is already free\n", virtual_address + (i * PAGE_SIZE), page);
panic("Failed to free virtual address");
}
}
for (size_t i = 0; i < page_count; i++)
{
size_t page = start_page + i;
size_t byte_index = page / 8;
size_t bit_index = page % 8;
page_bitmap[byte_index] &= ~(1 << bit_index);
}
}
static u64 create_2mb_pte(u64 physical_address, u32 flags)
{
if (physical_address & (PAGE_SIZE - 1))
{
printf("Physical address not page aligned (0x%x)\n", physical_address);
panic("Failed to create PTE");
}
return (physical_address & PTE_MASK) | flags | PTE_PS;
}
void vmm_map(u64 physical_address, u64 virtual_address, u32 flags)
{
u64 pml4_idx = PML4_INDEX(virtual_address);
u64 pdpt_idx = PDPT_INDEX(virtual_address);
u64 pd_idx = PD_INDEX(virtual_address);
u64 pdpt = pml4[pml4_idx];
if (!(pdpt & PTE_PRESENT))
{
// todo(nub31): Dynamically create a pdpt table
printf("PDPT not present at PML4 index %u\n", pml4_idx);
panic("Failed to map virtual to physical page");
}
u64* pdpt_physical_address = (u64*)(pdpt & PTE_MASK);
u64 pd = pdpt_physical_address[pdpt_idx];
if (!(pd & PTE_PRESENT))
{
// todo(nub31): Dynamically create a pd table
printf("PD not present at PDPT index %u\n", pdpt_idx);
panic("Failed to map virtual to physical page");
}
u64* pd_physical_address = (u64*)(pd & PTE_MASK);
u64 entry = pd_physical_address[pd_idx];
if (entry & PTE_PRESENT)
{
printf("Virtual address 0x%x is already mapped\n", virtual_address);
panic("Failed to map virtual to physical page");
}
pd_physical_address[pd_idx] = create_2mb_pte(physical_address, flags);
}
u64 vmm_unmap(u64 virtual_address)
{
u64 pml4_idx = PML4_INDEX(virtual_address);
u64 pdpt_idx = PDPT_INDEX(virtual_address);
u64 pd_idx = PD_INDEX(virtual_address);
u64 pdpt_entry = pml4[pml4_idx];
if (!(pdpt_entry & PTE_PRESENT))
{
printf("PDPT not present at PML4 index %llu\n", pml4_idx);
panic("Failed to unmap virtual address");
}
u64* pdpt_physical_address = (u64*)(pdpt_entry & PTE_MASK);
u64 pd_entry = pdpt_physical_address[pdpt_idx];
if (!(pd_entry & PTE_PRESENT))
{
printf("PD not present at PDPT index %llu\n", pdpt_idx);
panic("Failed to unmap virtual address");
}
u64* pd_physical_address = (u64*)(pd_entry & PTE_MASK);
if (!(pd_physical_address[pd_idx] & PTE_PRESENT))
{
printf("Virtual address 0x%llx is not mapped\n", virtual_address);
panic("Failed to unmap virtual address");
}
u64 physical_address = pd_physical_address[pd_idx] & PTE_MASK;
pd_physical_address[pd_idx] = 0;
__asm__ volatile("invlpg (%0)" : : "r"(virtual_address) : "memory");
return physical_address;
}
void* vmm_alloc(size_t page_count)
{
u64 virtual_address = vmm_alloc_address(page_count);
for (size_t i = 0; i < page_count; i++)
{
u64 physical_address = pmm_alloc();
if (!physical_address)
{
panic("Out of physical memory");
}
vmm_map(physical_address, virtual_address + (i * PAGE_SIZE), PTE_PRESENT | PTE_WRITABLE);
}
return (void*)virtual_address;
}
void vmm_free(void* virtual_address, size_t page_count)
{
for (size_t i = 0; i < page_count; i++)
{
u64 physical_address = vmm_unmap((u64)virtual_address + (i * PAGE_SIZE));
pmm_free(physical_address);
}
vmm_free_address((u64)virtual_address, page_count);
}