From 5f071104bccb52df29e9e00c322022f09caab31f Mon Sep 17 00:00:00 2001 From: nub31 Date: Sat, 6 Sep 2025 18:32:52 +0200 Subject: [PATCH] virtual page allocation --- src/arch/x86_64/main.c | 3 +- src/arch/x86_64/mem/pmm.c | 9 ++--- src/arch/x86_64/mem/vmm.c | 76 +++++++++++++++++++++++++++++++++------ src/arch/x86_64/mem/vmm.h | 12 ++++--- 4 files changed, 77 insertions(+), 23 deletions(-) diff --git a/src/arch/x86_64/main.c b/src/arch/x86_64/main.c index 8c7de39..34c9382 100644 --- a/src/arch/x86_64/main.c +++ b/src/arch/x86_64/main.c @@ -26,8 +26,7 @@ void x86_64_main(u32 magic, multiboot_info_t* info) enable_interrupts(); pmm_init(info); - void* mem = vmm_alloc(1); - vmm_free(mem, 1); + vmm_init(); kernel_main(); } diff --git a/src/arch/x86_64/mem/pmm.c b/src/arch/x86_64/mem/pmm.c index 8c4c393..d448bae 100644 --- a/src/arch/x86_64/mem/pmm.c +++ b/src/arch/x86_64/mem/pmm.c @@ -23,7 +23,6 @@ static size_t num_regions = 0; static u8 page_bitmap[BITMAP_SIZE]; static u64 total_pages = 0; -static u64 free_pages = 0; void pmm_init(multiboot_info_t* info) { @@ -72,7 +71,6 @@ void pmm_init(multiboot_info_t* info) if (page < BITMAP_SIZE * 8) { page_bitmap[page / 8] &= ~(1 << (page % 8)); - free_pages++; total_pages++; } else @@ -92,7 +90,6 @@ void pmm_init(multiboot_info_t* info) if (!(page_bitmap[page / 8] & (1 << (page % 8)))) { page_bitmap[page / 8] |= (1 << (page % 8)); - free_pages--; } } else @@ -113,7 +110,6 @@ u64 pmm_alloc() if (!(page_bitmap[i] & (1 << bit))) { page_bitmap[i] |= (1 << bit); - free_pages--; return ((i * 8 + bit) * PAGE_SIZE); } } @@ -131,12 +127,11 @@ void pmm_free(u64 physical_address) if (page_bitmap[page / 8] & (1 << (page % 8))) { page_bitmap[page / 8] &= ~(1 << (page % 8)); - free_pages++; } else { - printf("pmm_free: Physical address %x is already free", physical_address); - panic("pmm_free: Failed to free"); + printf("Physical address %x is already free", physical_address); + panic("Failed to free physical address"); } } } \ No newline at end of file diff --git a/src/arch/x86_64/mem/vmm.c b/src/arch/x86_64/mem/vmm.c index cabfaca..26c7b1c 100644 --- a/src/arch/x86_64/mem/vmm.c +++ b/src/arch/x86_64/mem/vmm.c @@ -13,8 +13,74 @@ #define PTE_USER (1ULL << 2) #define PTE_PS (1ULL << 7) +#define BITMAP_PAGE_COUNT (ADDRES_SPACE_SIZE / PAGE_SIZE) +#define BITMAP_SIZE (BITMAP_PAGE_COUNT / 8) + +static u8 page_bitmap[BITMAP_SIZE]; +static u64 total_pages = 0; +static u64 free_pages = 0; + extern u64 pml4[]; +void vmm_init() +{ + // Mark first 32 pages (64mb) as unusable since it is reserved by the bootloader + // todo(nub31): This should be revisited. Maybe do a higher half kernel? + for (u64 page = 0; page < 32; page++) + { + if (page < BITMAP_PAGE_COUNT) + { + if (!(page_bitmap[page / 8] & (1 << (page % 8)))) + { + page_bitmap[page / 8] |= (1 << (page % 8)); + } + } + else + { + panic("Bitmap is not large enough to hold the bootloader reserved memory"); + } + } +} + +u64 vmm_alloc_address(size_t page_count) +{ + for (size_t i = 0; i < BITMAP_SIZE; i++) + { + if (page_bitmap[i] != 0xFF) + { + for (int bit = 0; bit < 8; bit++) + { + if (!(page_bitmap[i] & (1 << bit))) + { + page_bitmap[i] |= (1 << bit); + free_pages--; + return ((i * 8 + bit) * PAGE_SIZE); + } + } + } + } + + return 0; +} + +u64 vmm_free_address(u64 virtual_address, size_t page_count) +{ + u64 page = virtual_address / PAGE_SIZE; + if (page < BITMAP_SIZE * 8) + { + if (page_bitmap[page / 8] & (1 << (page % 8))) + { + page_bitmap[page / 8] &= ~(1 << (page % 8)); + free_pages++; + } + else + { + printf("Virtual address %x is already free", virtual_address); + panic("Failed to free virtual address"); + } + } +} + static u64 create_2mb_pte(u64 physical_address, u32 flags) { if (physical_address & (PAGE_SIZE - 1)) @@ -97,16 +163,6 @@ u64 vmm_unmap(u64 virtual_address) return physical_address; } -u64 vmm_alloc_address(size_t page_count) -{ - panic("not implemented"); -} - -u64 vmm_free_address(u64 virtual_address, size_t page_count) -{ - panic("not implemented"); -} - void* vmm_alloc(size_t page_count) { u64 virtual_address = vmm_alloc_address(page_count); diff --git a/src/arch/x86_64/mem/vmm.h b/src/arch/x86_64/mem/vmm.h index 88aee05..d437fe5 100644 --- a/src/arch/x86_64/mem/vmm.h +++ b/src/arch/x86_64/mem/vmm.h @@ -2,10 +2,9 @@ #include "std.h" -// Low level function to map a virtual address to a physical address -void vmm_map(u64 physical_address, u64 virtual_address, u32 flags); -// Low level function to unmap a virtual address from a physical address -u64 vmm_unmap(u64 virtual_address); +#define ADDRES_SPACE_SIZE GiB(64) + +void vmm_init(); // Allocates a free page aligned block of virtual addresses u64 vmm_alloc_address(size_t page_count); @@ -13,6 +12,11 @@ u64 vmm_alloc_address(size_t page_count); // Only use this function for pages mapped via `vmm_alloc_address` u64 vmm_free_address(u64 virtual_address, size_t page_count); +// Low level function to map a virtual address to a physical address +void vmm_map(u64 physical_address, u64 virtual_address, u32 flags); +// Low level function to unmap a virtual address from a physical address +u64 vmm_unmap(u64 virtual_address); + // Allocates and maps `page_count` continuous pages and returns the virtual address of the first page void* vmm_alloc(size_t page_count); // Frees the pages allocated via `vmm_alloc` at the specified virtual address