Compare commits

...

2 Commits

Author SHA1 Message Date
nub31
130e271461 only identity map the start of the kernel 2025-09-06 20:22:51 +02:00
nub31
46bc977104 .. 2025-09-06 19:45:33 +02:00
7 changed files with 92 additions and 61 deletions

View File

@@ -1,6 +1,7 @@
global _start global _start
global pml4 global pml4
extern x86_64_main extern x86_64_main
extern kernel_end
%define FLAGS 0b10 %define FLAGS 0b10
%define MAGIC 0x1BADB002 %define MAGIC 0x1BADB002
@@ -43,6 +44,8 @@ section .data
dd 0 dd 0
multiboot_magic: multiboot_magic:
dd 0 dd 0
kernel_page_count:
dd 0
section .text section .text
bits 32 bits 32
@@ -98,10 +101,18 @@ section .text
mov eax, pd mov eax, pd
or eax, 0x03 or eax, 0x03
mov [pdpt], eax mov [pdpt], eax
; Map first 32 2mb pages for the kernel for a total of 64mb
; Calculate how many 2mb pages we need to identity map
mov ecx, kernel_end
add ecx, 0x1FFFFF ; Page align end of kernel
shr ecx, 21 ; ecx now holds the required pages
; Save the page count so we can pass it to c later
mov [kernel_page_count], ecx
; Identity map the 0x0 to kernel_end
mov edi, pd mov edi, pd
mov eax, 0x83 mov eax, 0x83
mov ecx, 32
.setup_pd: .setup_pd:
mov [edi], eax mov [edi], eax
add eax, 0x200000 add eax, 0x200000
@@ -150,6 +161,7 @@ section .text
; Finally, we call in to c ; Finally, we call in to c
mov edi, [multiboot_magic] mov edi, [multiboot_magic]
mov esi, [multiboot_info] mov esi, [multiboot_info]
mov edx, [kernel_page_count]
call x86_64_main call x86_64_main
.hang: .hang:
hlt hlt

View File

@@ -7,7 +7,7 @@
#include "panic.h" #include "panic.h"
#include "util.h" #include "util.h"
void x86_64_main(u32 magic, multiboot_info_t* info) void x86_64_main(u32 magic, multiboot_info_t* info, u32 kernel_page_count)
{ {
console_clear(); console_clear();
@@ -23,10 +23,9 @@ void x86_64_main(u32 magic, multiboot_info_t* info)
idt_init(); idt_init();
remap_pic(); remap_pic();
enable_interrupts();
pmm_init(info); pmm_init(kernel_page_count, info);
vmm_init(); vmm_init(kernel_page_count);
kernel_main(); kernel_main();
} }

View File

@@ -22,9 +22,8 @@ static size_t num_regions = 0;
#define BITMAP_SIZE (BITMAP_PAGE_COUNT / 8) #define BITMAP_SIZE (BITMAP_PAGE_COUNT / 8)
static u8 page_bitmap[BITMAP_SIZE]; static u8 page_bitmap[BITMAP_SIZE];
static u64 total_pages = 0;
void pmm_init(multiboot_info_t* info) void pmm_init(u32 kernel_page_count, multiboot_info_t* info)
{ {
if (!(info->flags & (1 << 6))) if (!(info->flags & (1 << 6)))
{ {
@@ -68,10 +67,9 @@ void pmm_init(multiboot_info_t* info)
for (u64 page = start_page; page < start_page + num_pages; page++) for (u64 page = start_page; page < start_page + num_pages; page++)
{ {
if (page < BITMAP_SIZE * 8) if (page < BITMAP_PAGE_COUNT)
{ {
page_bitmap[page / 8] &= ~(1 << (page % 8)); page_bitmap[page / 8] &= ~(1 << (page % 8));
total_pages++;
} }
else else
{ {
@@ -81,21 +79,15 @@ void pmm_init(multiboot_info_t* info)
} }
} }
// Mark first 32 pages (64mb) as unusable since it is reserved by the bootloader // The kernel was identity mapped by the kernel, so those bits should be marked as unavailable
// todo(nub31): This should be revisited. Maybe do a higher half kernel? for (u64 page = 0; page < kernel_page_count; page++)
for (u64 page = 0; page < 32; page++)
{ {
if (page < BITMAP_PAGE_COUNT) if (page >= BITMAP_PAGE_COUNT)
{ {
if (!(page_bitmap[page / 8] & (1 << (page % 8)))) panic("Bitmap is not large enough to hold the memory reserved by the kernel");
{
page_bitmap[page / 8] |= (1 << (page % 8));
}
}
else
{
panic("Bitmap is not large enough to hold the bootloader reserved memory");
} }
page_bitmap[page / 8] |= (1 << (page % 8));
} }
} }

View File

@@ -10,10 +10,10 @@
// The value must be a multible of 8 // The value must be a multible of 8
#define MAX_MEMORY GiB(64) #define MAX_MEMORY GiB(64)
void pmm_init(multiboot_info_t* info); void pmm_init(u32 kernel_page_count, multiboot_info_t* info);
// Low level function to allocate a 2mb physical page and return the physical address // Low level function to allocate a 2mb physical page and return the physical address
// Return value 0 indicates out of memory // A return value 0 indicates out of memory
u64 pmm_alloc(); u64 pmm_alloc();
// Low level function to free a 2mb physical page // Low level function to free a 2mb physical page

View File

@@ -17,68 +17,92 @@
#define BITMAP_SIZE (BITMAP_PAGE_COUNT / 8) #define BITMAP_SIZE (BITMAP_PAGE_COUNT / 8)
static u8 page_bitmap[BITMAP_SIZE]; static u8 page_bitmap[BITMAP_SIZE];
static u64 total_pages = 0;
static u64 free_pages = 0;
extern u64 pml4[]; extern u64 pml4[];
void vmm_init() void vmm_init(u32 kernel_page_count)
{ {
// Mark first 32 pages (64mb) as unusable since it is reserved by the bootloader // The kernel was identity mapped when we entered long mode,
// todo(nub31): This should be revisited. Maybe do a higher half kernel? // so those virtual addresses should be marked as unavailable
for (u64 page = 0; page < 32; page++) for (u64 page = 0; page < kernel_page_count; page++)
{ {
if (page < BITMAP_PAGE_COUNT) if (page >= BITMAP_PAGE_COUNT)
{ {
if (!(page_bitmap[page / 8] & (1 << (page % 8)))) panic("Bitmap is not large enough to hold the addresses reserved by the kernel");
{
page_bitmap[page / 8] |= (1 << (page % 8));
}
}
else
{
panic("Bitmap is not large enough to hold the bootloader reserved memory");
} }
page_bitmap[page / 8] |= (1 << (page % 8));
} }
} }
u64 vmm_alloc_address(size_t page_count) u64 vmm_alloc_address(size_t page_count)
{ {
for (size_t i = 0; i < BITMAP_SIZE; i++) size_t total_pages = BITMAP_PAGE_COUNT;
for (size_t start_page = 0; start_page <= total_pages - page_count; start_page++)
{ {
if (page_bitmap[i] != 0xFF) bool found_block = true;
for (size_t i = 0; i < page_count; i++)
{ {
for (int bit = 0; bit < 8; bit++) size_t page = start_page + i;
size_t byte_index = page / 8;
size_t bit_index = page % 8;
if (page_bitmap[byte_index] & (1 << bit_index))
{ {
if (!(page_bitmap[i] & (1 << bit))) found_block = false;
{ start_page = page;
page_bitmap[i] |= (1 << bit); break;
free_pages--;
return ((i * 8 + bit) * PAGE_SIZE);
}
} }
} }
if (found_block)
{
for (size_t i = 0; i < page_count; i++)
{
size_t page = start_page + i;
size_t byte_index = page / 8;
size_t bit_index = page % 8;
page_bitmap[byte_index] |= (1 << bit_index);
}
return start_page * PAGE_SIZE;
}
} }
return 0; return 0;
} }
u64 vmm_free_address(u64 virtual_address, size_t page_count) void vmm_free_address(u64 virtual_address, size_t page_count)
{ {
u64 page = virtual_address / PAGE_SIZE; u64 start_page = virtual_address / PAGE_SIZE;
if (page < BITMAP_SIZE * 8) if (start_page + page_count > BITMAP_PAGE_COUNT)
{ {
if (page_bitmap[page / 8] & (1 << (page % 8))) printf("Virtual address range exceeds bitmap bounds\n");
panic("Failed to free virtual address");
}
for (size_t i = 0; i < page_count; i++)
{
size_t page = start_page + i;
size_t byte_index = page / 8;
size_t bit_index = page % 8;
if (!(page_bitmap[byte_index] & (1 << bit_index)))
{ {
page_bitmap[page / 8] &= ~(1 << (page % 8)); printf("Virtual address 0x%x (page %u) is already free\n", virtual_address + (i * PAGE_SIZE), page);
free_pages++;
}
else
{
printf("Virtual address %x is already free", virtual_address);
panic("Failed to free virtual address"); panic("Failed to free virtual address");
} }
} }
for (size_t i = 0; i < page_count; i++)
{
size_t page = start_page + i;
size_t byte_index = page / 8;
size_t bit_index = page % 8;
page_bitmap[byte_index] &= ~(1 << bit_index);
}
} }
static u64 create_2mb_pte(u64 physical_address, u32 flags) static u64 create_2mb_pte(u64 physical_address, u32 flags)

View File

@@ -2,15 +2,19 @@
#include "std.h" #include "std.h"
// Defines the theoretical max virtual memory space the kernel can allocate
// The value must be a multible of 8
#define ADDRES_SPACE_SIZE GiB(64) #define ADDRES_SPACE_SIZE GiB(64)
void vmm_init(); void vmm_init(u32 kernel_page_count);
// Allocates a free page aligned block of virtual addresses // Allocates a free page aligned block of virtual addresses
// A return value 0 indicates that there were not blocks
// found which is large enought for the amount of pages requested
u64 vmm_alloc_address(size_t page_count); u64 vmm_alloc_address(size_t page_count);
// Frees a block of virtual addresses previously allocated via `vmm_alloc_address` // Frees a block of virtual addresses previously allocated via `vmm_alloc_address`
// Only use this function for pages mapped via `vmm_alloc_address` // Only use this function for pages mapped via `vmm_alloc_address`
u64 vmm_free_address(u64 virtual_address, size_t page_count); void vmm_free_address(u64 virtual_address, size_t page_count);
// Low level function to map a virtual address to a physical address // Low level function to map a virtual address to a physical address
void vmm_map(u64 physical_address, u64 virtual_address, u32 flags); void vmm_map(u64 physical_address, u64 virtual_address, u32 flags);

View File

@@ -4,8 +4,8 @@
void kernel_main() void kernel_main()
{ {
arch_api.enable_interrupts();
printf("Welcome to nub OS :)\n"); printf("Welcome to nub OS :)\n");
printf("Kernel has exited\n"); printf("Kernel has exited\n");
arch_api.halt(); arch_api.halt();
} }