Compare commits
2 Commits
5f071104bc
...
130e271461
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
130e271461 | ||
|
|
46bc977104 |
@@ -1,6 +1,7 @@
|
||||
global _start
|
||||
global pml4
|
||||
extern x86_64_main
|
||||
extern kernel_end
|
||||
|
||||
%define FLAGS 0b10
|
||||
%define MAGIC 0x1BADB002
|
||||
@@ -43,6 +44,8 @@ section .data
|
||||
dd 0
|
||||
multiboot_magic:
|
||||
dd 0
|
||||
kernel_page_count:
|
||||
dd 0
|
||||
|
||||
section .text
|
||||
bits 32
|
||||
@@ -98,10 +101,18 @@ section .text
|
||||
mov eax, pd
|
||||
or eax, 0x03
|
||||
mov [pdpt], eax
|
||||
; Map first 32 2mb pages for the kernel for a total of 64mb
|
||||
|
||||
; Calculate how many 2mb pages we need to identity map
|
||||
mov ecx, kernel_end
|
||||
add ecx, 0x1FFFFF ; Page align end of kernel
|
||||
shr ecx, 21 ; ecx now holds the required pages
|
||||
|
||||
; Save the page count so we can pass it to c later
|
||||
mov [kernel_page_count], ecx
|
||||
|
||||
; Identity map the 0x0 to kernel_end
|
||||
mov edi, pd
|
||||
mov eax, 0x83
|
||||
mov ecx, 32
|
||||
.setup_pd:
|
||||
mov [edi], eax
|
||||
add eax, 0x200000
|
||||
@@ -150,6 +161,7 @@ section .text
|
||||
; Finally, we call in to c
|
||||
mov edi, [multiboot_magic]
|
||||
mov esi, [multiboot_info]
|
||||
mov edx, [kernel_page_count]
|
||||
call x86_64_main
|
||||
.hang:
|
||||
hlt
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
#include "panic.h"
|
||||
#include "util.h"
|
||||
|
||||
void x86_64_main(u32 magic, multiboot_info_t* info)
|
||||
void x86_64_main(u32 magic, multiboot_info_t* info, u32 kernel_page_count)
|
||||
{
|
||||
console_clear();
|
||||
|
||||
@@ -23,10 +23,9 @@ void x86_64_main(u32 magic, multiboot_info_t* info)
|
||||
|
||||
idt_init();
|
||||
remap_pic();
|
||||
enable_interrupts();
|
||||
|
||||
pmm_init(info);
|
||||
vmm_init();
|
||||
pmm_init(kernel_page_count, info);
|
||||
vmm_init(kernel_page_count);
|
||||
|
||||
kernel_main();
|
||||
}
|
||||
|
||||
@@ -22,9 +22,8 @@ static size_t num_regions = 0;
|
||||
#define BITMAP_SIZE (BITMAP_PAGE_COUNT / 8)
|
||||
|
||||
static u8 page_bitmap[BITMAP_SIZE];
|
||||
static u64 total_pages = 0;
|
||||
|
||||
void pmm_init(multiboot_info_t* info)
|
||||
void pmm_init(u32 kernel_page_count, multiboot_info_t* info)
|
||||
{
|
||||
if (!(info->flags & (1 << 6)))
|
||||
{
|
||||
@@ -68,10 +67,9 @@ void pmm_init(multiboot_info_t* info)
|
||||
|
||||
for (u64 page = start_page; page < start_page + num_pages; page++)
|
||||
{
|
||||
if (page < BITMAP_SIZE * 8)
|
||||
if (page < BITMAP_PAGE_COUNT)
|
||||
{
|
||||
page_bitmap[page / 8] &= ~(1 << (page % 8));
|
||||
total_pages++;
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -81,22 +79,16 @@ void pmm_init(multiboot_info_t* info)
|
||||
}
|
||||
}
|
||||
|
||||
// Mark first 32 pages (64mb) as unusable since it is reserved by the bootloader
|
||||
// todo(nub31): This should be revisited. Maybe do a higher half kernel?
|
||||
for (u64 page = 0; page < 32; page++)
|
||||
// The kernel was identity mapped by the kernel, so those bits should be marked as unavailable
|
||||
for (u64 page = 0; page < kernel_page_count; page++)
|
||||
{
|
||||
if (page < BITMAP_PAGE_COUNT)
|
||||
{
|
||||
if (!(page_bitmap[page / 8] & (1 << (page % 8))))
|
||||
if (page >= BITMAP_PAGE_COUNT)
|
||||
{
|
||||
panic("Bitmap is not large enough to hold the memory reserved by the kernel");
|
||||
}
|
||||
|
||||
page_bitmap[page / 8] |= (1 << (page % 8));
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
panic("Bitmap is not large enough to hold the bootloader reserved memory");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
u64 pmm_alloc()
|
||||
|
||||
@@ -10,10 +10,10 @@
|
||||
// The value must be a multible of 8
|
||||
#define MAX_MEMORY GiB(64)
|
||||
|
||||
void pmm_init(multiboot_info_t* info);
|
||||
void pmm_init(u32 kernel_page_count, multiboot_info_t* info);
|
||||
|
||||
// Low level function to allocate a 2mb physical page and return the physical address
|
||||
// Return value 0 indicates out of memory
|
||||
// A return value 0 indicates out of memory
|
||||
u64 pmm_alloc();
|
||||
|
||||
// Low level function to free a 2mb physical page
|
||||
|
||||
@@ -17,67 +17,91 @@
|
||||
#define BITMAP_SIZE (BITMAP_PAGE_COUNT / 8)
|
||||
|
||||
static u8 page_bitmap[BITMAP_SIZE];
|
||||
static u64 total_pages = 0;
|
||||
static u64 free_pages = 0;
|
||||
|
||||
extern u64 pml4[];
|
||||
|
||||
void vmm_init()
|
||||
void vmm_init(u32 kernel_page_count)
|
||||
{
|
||||
// Mark first 32 pages (64mb) as unusable since it is reserved by the bootloader
|
||||
// todo(nub31): This should be revisited. Maybe do a higher half kernel?
|
||||
for (u64 page = 0; page < 32; page++)
|
||||
// The kernel was identity mapped when we entered long mode,
|
||||
// so those virtual addresses should be marked as unavailable
|
||||
for (u64 page = 0; page < kernel_page_count; page++)
|
||||
{
|
||||
if (page < BITMAP_PAGE_COUNT)
|
||||
{
|
||||
if (!(page_bitmap[page / 8] & (1 << (page % 8))))
|
||||
if (page >= BITMAP_PAGE_COUNT)
|
||||
{
|
||||
panic("Bitmap is not large enough to hold the addresses reserved by the kernel");
|
||||
}
|
||||
|
||||
page_bitmap[page / 8] |= (1 << (page % 8));
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
panic("Bitmap is not large enough to hold the bootloader reserved memory");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
u64 vmm_alloc_address(size_t page_count)
|
||||
{
|
||||
for (size_t i = 0; i < BITMAP_SIZE; i++)
|
||||
size_t total_pages = BITMAP_PAGE_COUNT;
|
||||
|
||||
for (size_t start_page = 0; start_page <= total_pages - page_count; start_page++)
|
||||
{
|
||||
if (page_bitmap[i] != 0xFF)
|
||||
bool found_block = true;
|
||||
|
||||
for (size_t i = 0; i < page_count; i++)
|
||||
{
|
||||
for (int bit = 0; bit < 8; bit++)
|
||||
size_t page = start_page + i;
|
||||
size_t byte_index = page / 8;
|
||||
size_t bit_index = page % 8;
|
||||
|
||||
if (page_bitmap[byte_index] & (1 << bit_index))
|
||||
{
|
||||
if (!(page_bitmap[i] & (1 << bit)))
|
||||
{
|
||||
page_bitmap[i] |= (1 << bit);
|
||||
free_pages--;
|
||||
return ((i * 8 + bit) * PAGE_SIZE);
|
||||
found_block = false;
|
||||
start_page = page;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (found_block)
|
||||
{
|
||||
for (size_t i = 0; i < page_count; i++)
|
||||
{
|
||||
size_t page = start_page + i;
|
||||
size_t byte_index = page / 8;
|
||||
size_t bit_index = page % 8;
|
||||
page_bitmap[byte_index] |= (1 << bit_index);
|
||||
}
|
||||
|
||||
return start_page * PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
u64 vmm_free_address(u64 virtual_address, size_t page_count)
|
||||
void vmm_free_address(u64 virtual_address, size_t page_count)
|
||||
{
|
||||
u64 page = virtual_address / PAGE_SIZE;
|
||||
if (page < BITMAP_SIZE * 8)
|
||||
u64 start_page = virtual_address / PAGE_SIZE;
|
||||
if (start_page + page_count > BITMAP_PAGE_COUNT)
|
||||
{
|
||||
if (page_bitmap[page / 8] & (1 << (page % 8)))
|
||||
{
|
||||
page_bitmap[page / 8] &= ~(1 << (page % 8));
|
||||
free_pages++;
|
||||
}
|
||||
else
|
||||
{
|
||||
printf("Virtual address %x is already free", virtual_address);
|
||||
printf("Virtual address range exceeds bitmap bounds\n");
|
||||
panic("Failed to free virtual address");
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < page_count; i++)
|
||||
{
|
||||
size_t page = start_page + i;
|
||||
size_t byte_index = page / 8;
|
||||
size_t bit_index = page % 8;
|
||||
|
||||
if (!(page_bitmap[byte_index] & (1 << bit_index)))
|
||||
{
|
||||
printf("Virtual address 0x%x (page %u) is already free\n", virtual_address + (i * PAGE_SIZE), page);
|
||||
panic("Failed to free virtual address");
|
||||
}
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < page_count; i++)
|
||||
{
|
||||
size_t page = start_page + i;
|
||||
size_t byte_index = page / 8;
|
||||
size_t bit_index = page % 8;
|
||||
page_bitmap[byte_index] &= ~(1 << bit_index);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -2,15 +2,19 @@
|
||||
|
||||
#include "std.h"
|
||||
|
||||
// Defines the theoretical max virtual memory space the kernel can allocate
|
||||
// The value must be a multible of 8
|
||||
#define ADDRES_SPACE_SIZE GiB(64)
|
||||
|
||||
void vmm_init();
|
||||
void vmm_init(u32 kernel_page_count);
|
||||
|
||||
// Allocates a free page aligned block of virtual addresses
|
||||
// A return value 0 indicates that there were not blocks
|
||||
// found which is large enought for the amount of pages requested
|
||||
u64 vmm_alloc_address(size_t page_count);
|
||||
// Frees a block of virtual addresses previously allocated via `vmm_alloc_address`
|
||||
// Only use this function for pages mapped via `vmm_alloc_address`
|
||||
u64 vmm_free_address(u64 virtual_address, size_t page_count);
|
||||
void vmm_free_address(u64 virtual_address, size_t page_count);
|
||||
|
||||
// Low level function to map a virtual address to a physical address
|
||||
void vmm_map(u64 physical_address, u64 virtual_address, u32 flags);
|
||||
|
||||
@@ -4,8 +4,8 @@
|
||||
|
||||
void kernel_main()
|
||||
{
|
||||
arch_api.enable_interrupts();
|
||||
printf("Welcome to nub OS :)\n");
|
||||
|
||||
printf("Kernel has exited\n");
|
||||
arch_api.halt();
|
||||
}
|
||||
Reference in New Issue
Block a user