From 130e271461d4e9957ceee08628a7cb6e1aa3bb55 Mon Sep 17 00:00:00 2001 From: nub31 Date: Sat, 6 Sep 2025 20:22:51 +0200 Subject: [PATCH] only identity map the start of the kernel --- src/arch/x86_64/boot/boot.asm | 16 ++++++++++++++-- src/arch/x86_64/main.c | 6 +++--- src/arch/x86_64/mem/pmm.c | 22 ++++++++-------------- src/arch/x86_64/mem/pmm.h | 2 +- src/arch/x86_64/mem/vmm.c | 21 ++++++++------------- src/arch/x86_64/mem/vmm.h | 2 +- 6 files changed, 35 insertions(+), 34 deletions(-) diff --git a/src/arch/x86_64/boot/boot.asm b/src/arch/x86_64/boot/boot.asm index 16f370f..fb3db2d 100644 --- a/src/arch/x86_64/boot/boot.asm +++ b/src/arch/x86_64/boot/boot.asm @@ -1,6 +1,7 @@ global _start global pml4 extern x86_64_main +extern kernel_end %define FLAGS 0b10 %define MAGIC 0x1BADB002 @@ -43,6 +44,8 @@ section .data dd 0 multiboot_magic: dd 0 + kernel_page_count: + dd 0 section .text bits 32 @@ -98,10 +101,18 @@ section .text mov eax, pd or eax, 0x03 mov [pdpt], eax - ; Map first 32 2mb pages for the kernel for a total of 64mb + + ; Calculate how many 2mb pages we need to identity map + mov ecx, kernel_end + add ecx, 0x1FFFFF ; Page align end of kernel + shr ecx, 21 ; ecx now holds the required pages + + ; Save the page count so we can pass it to c later + mov [kernel_page_count], ecx + + ; Identity map the 0x0 to kernel_end mov edi, pd mov eax, 0x83 - mov ecx, 32 .setup_pd: mov [edi], eax add eax, 0x200000 @@ -150,6 +161,7 @@ section .text ; Finally, we call in to c mov edi, [multiboot_magic] mov esi, [multiboot_info] + mov edx, [kernel_page_count] call x86_64_main .hang: hlt diff --git a/src/arch/x86_64/main.c b/src/arch/x86_64/main.c index 0225e4c..70a24b8 100644 --- a/src/arch/x86_64/main.c +++ b/src/arch/x86_64/main.c @@ -7,7 +7,7 @@ #include "panic.h" #include "util.h" -void x86_64_main(u32 magic, multiboot_info_t* info) +void x86_64_main(u32 magic, multiboot_info_t* info, u32 kernel_page_count) { console_clear(); @@ -24,8 +24,8 @@ void x86_64_main(u32 magic, multiboot_info_t* info) idt_init(); remap_pic(); - pmm_init(info); - vmm_init(); + pmm_init(kernel_page_count, info); + vmm_init(kernel_page_count); kernel_main(); } diff --git a/src/arch/x86_64/mem/pmm.c b/src/arch/x86_64/mem/pmm.c index ffafa6a..20ab85a 100644 --- a/src/arch/x86_64/mem/pmm.c +++ b/src/arch/x86_64/mem/pmm.c @@ -23,7 +23,7 @@ static size_t num_regions = 0; static u8 page_bitmap[BITMAP_SIZE]; -void pmm_init(multiboot_info_t* info) +void pmm_init(u32 kernel_page_count, multiboot_info_t* info) { if (!(info->flags & (1 << 6))) { @@ -67,7 +67,7 @@ void pmm_init(multiboot_info_t* info) for (u64 page = start_page; page < start_page + num_pages; page++) { - if (page < BITMAP_SIZE * 8) + if (page < BITMAP_PAGE_COUNT) { page_bitmap[page / 8] &= ~(1 << (page % 8)); } @@ -79,21 +79,15 @@ void pmm_init(multiboot_info_t* info) } } - // Mark first 32 pages (64mb) as unusable since it is reserved by the bootloader - // todo(nub31): This should be revisited. Maybe do a higher half kernel? - for (u64 page = 0; page < 32; page++) + // The kernel was identity mapped by the kernel, so those bits should be marked as unavailable + for (u64 page = 0; page < kernel_page_count; page++) { - if (page < BITMAP_PAGE_COUNT) + if (page >= BITMAP_PAGE_COUNT) { - if (!(page_bitmap[page / 8] & (1 << (page % 8)))) - { - page_bitmap[page / 8] |= (1 << (page % 8)); - } - } - else - { - panic("Bitmap is not large enough to hold the bootloader reserved memory"); + panic("Bitmap is not large enough to hold the memory reserved by the kernel"); } + + page_bitmap[page / 8] |= (1 << (page % 8)); } } diff --git a/src/arch/x86_64/mem/pmm.h b/src/arch/x86_64/mem/pmm.h index ed2bcc9..b9e3d61 100644 --- a/src/arch/x86_64/mem/pmm.h +++ b/src/arch/x86_64/mem/pmm.h @@ -10,7 +10,7 @@ // The value must be a multible of 8 #define MAX_MEMORY GiB(64) -void pmm_init(multiboot_info_t* info); +void pmm_init(u32 kernel_page_count, multiboot_info_t* info); // Low level function to allocate a 2mb physical page and return the physical address // A return value 0 indicates out of memory diff --git a/src/arch/x86_64/mem/vmm.c b/src/arch/x86_64/mem/vmm.c index 4bc963f..3f0d621 100644 --- a/src/arch/x86_64/mem/vmm.c +++ b/src/arch/x86_64/mem/vmm.c @@ -20,23 +20,18 @@ static u8 page_bitmap[BITMAP_SIZE]; extern u64 pml4[]; -void vmm_init() +void vmm_init(u32 kernel_page_count) { - // Mark first 32 pages (64mb) as unusable since it is reserved by the bootloader - // todo(nub31): This should be revisited. Maybe do a higher half kernel? - for (u64 page = 0; page < 32; page++) + // The kernel was identity mapped when we entered long mode, + // so those virtual addresses should be marked as unavailable + for (u64 page = 0; page < kernel_page_count; page++) { - if (page < BITMAP_PAGE_COUNT) + if (page >= BITMAP_PAGE_COUNT) { - if (!(page_bitmap[page / 8] & (1 << (page % 8)))) - { - page_bitmap[page / 8] |= (1 << (page % 8)); - } - } - else - { - panic("Bitmap is not large enough to hold the bootloader reserved address space"); + panic("Bitmap is not large enough to hold the addresses reserved by the kernel"); } + + page_bitmap[page / 8] |= (1 << (page % 8)); } } diff --git a/src/arch/x86_64/mem/vmm.h b/src/arch/x86_64/mem/vmm.h index 7696254..61a610c 100644 --- a/src/arch/x86_64/mem/vmm.h +++ b/src/arch/x86_64/mem/vmm.h @@ -6,7 +6,7 @@ // The value must be a multible of 8 #define ADDRES_SPACE_SIZE GiB(64) -void vmm_init(); +void vmm_init(u32 kernel_page_count); // Allocates a free page aligned block of virtual addresses // A return value 0 indicates that there were not blocks