...
This commit is contained in:
115
src/arch/x86_64/mem/vmm.c
Normal file
115
src/arch/x86_64/mem/vmm.c
Normal file
@@ -0,0 +1,115 @@
|
||||
#include "vmm.h"
|
||||
#include "pmm.h"
|
||||
#include "x86_64/panic.h"
|
||||
|
||||
#define PML4_INDEX(addr) (((addr) >> 39) & 0x1FF)
|
||||
#define PDPT_INDEX(addr) (((addr) >> 30) & 0x1FF)
|
||||
#define PD_INDEX(addr) (((addr) >> 21) & 0x1FF)
|
||||
|
||||
#define PTE_MASK 0x000FFFFFFFFFF000ULL
|
||||
|
||||
#define PTE_PRESENT (1ULL << 0)
|
||||
#define PTE_WRITABLE (1ULL << 1)
|
||||
#define PTE_USER (1ULL << 2)
|
||||
#define PTE_PS (1ULL << 7)
|
||||
|
||||
extern u64 pml4[];
|
||||
|
||||
static u64 create_pte(u64 physical_address)
|
||||
{
|
||||
if (physical_address & (PAGE_SIZE - 1))
|
||||
{
|
||||
printf("Physical address not page aligned (0x%x)\n", physical_address);
|
||||
panic("Failed to create PTE");
|
||||
}
|
||||
|
||||
return (physical_address & PTE_MASK) | PTE_PRESENT | PTE_WRITABLE | PTE_PS;
|
||||
}
|
||||
|
||||
void vmm_map(u64 virtual_address, u64 physical_address)
|
||||
{
|
||||
u64 pml4_idx = PML4_INDEX(virtual_address);
|
||||
u64 pdpt_idx = PDPT_INDEX(virtual_address);
|
||||
u64 pd_idx = PD_INDEX(virtual_address);
|
||||
|
||||
u64 pdpt = pml4[pml4_idx];
|
||||
if (!(pdpt & PTE_PRESENT))
|
||||
{
|
||||
// todo(nub31): Dynamically create a pdpt table
|
||||
printf("PDPT not present at PML4 index %u\n", pml4_idx);
|
||||
panic("Failed to map virtual to physical page");
|
||||
}
|
||||
|
||||
u64* pdpt_phys = (u64*)(pdpt & PTE_MASK);
|
||||
u64 pd = pdpt_phys[pdpt_idx];
|
||||
if (!(pd & PTE_PRESENT))
|
||||
{
|
||||
// todo(nub31): Dynamically create a pd table
|
||||
printf("PD not present at PDPT index %u\n", pdpt_idx);
|
||||
panic("Failed to map virtual to physical page");
|
||||
}
|
||||
|
||||
u64* pd_phys = (u64*)(pd & PTE_MASK);
|
||||
u64 entry = pd_phys[pd_idx];
|
||||
|
||||
if (entry & PTE_PRESENT)
|
||||
{
|
||||
printf("Virtual address 0x%x is already mapped\n", virtual_address);
|
||||
panic("Failed to map virtual to physical page");
|
||||
}
|
||||
|
||||
pd_phys[pd_idx] = create_pte(physical_address);
|
||||
}
|
||||
|
||||
u64 vmm_unmap(u64 virtual_address)
|
||||
{
|
||||
u64 pml4_idx = PML4_INDEX(virtual_address);
|
||||
u64 pdpt_idx = PDPT_INDEX(virtual_address);
|
||||
u64 pd_idx = PD_INDEX(virtual_address);
|
||||
|
||||
u64 pdpt_entry = pml4[pml4_idx];
|
||||
if (!(pdpt_entry & PTE_PRESENT))
|
||||
{
|
||||
printf("PDPT not present at PML4 index %llu\n", pml4_idx);
|
||||
panic("Failed to unmap virtual address");
|
||||
}
|
||||
|
||||
u64* pdpt_phys = (u64*)(pdpt_entry & PTE_MASK);
|
||||
u64 pd_entry = pdpt_phys[pdpt_idx];
|
||||
if (!(pd_entry & PTE_PRESENT))
|
||||
{
|
||||
printf("PD not present at PDPT index %llu\n", pdpt_idx);
|
||||
panic("Failed to unmap virtual address");
|
||||
}
|
||||
|
||||
u64* pd_phys = (u64*)(pd_entry & PTE_MASK);
|
||||
if (!(pd_phys[pd_idx] & PTE_PRESENT))
|
||||
{
|
||||
printf("Virtual address 0x%llx is not mapped\n", virtual_address);
|
||||
panic("Failed to unmap virtual address");
|
||||
}
|
||||
u64 phys = pd_phys[pd_idx] & PTE_MASK;
|
||||
pd_phys[pd_idx] = 0;
|
||||
|
||||
__asm__ volatile("invlpg (%0)" : : "r"(virtual_address) : "memory");
|
||||
|
||||
return phys;
|
||||
}
|
||||
|
||||
void* vmm_alloc(u64 virtual_address)
|
||||
{
|
||||
u64 phys = pmm_alloc();
|
||||
if (!phys)
|
||||
{
|
||||
panic("Out of physical memory");
|
||||
}
|
||||
|
||||
vmm_map(virtual_address, phys);
|
||||
return (void*)virtual_address;
|
||||
}
|
||||
|
||||
void vmm_free(u64 virtual_address)
|
||||
{
|
||||
u64 phys = vmm_unmap(virtual_address);
|
||||
pmm_free(phys);
|
||||
}
|
||||
Reference in New Issue
Block a user