convert gc to c

This commit is contained in:
nub31
2025-05-03 16:32:51 +02:00
parent aa970f1abb
commit bfc3aad516
11 changed files with 306 additions and 488 deletions

3
.clang-format Normal file
View File

@@ -0,0 +1,3 @@
BasedOnStyle: Chromium
IndentWidth: 4
ColumnLimit: 120

View File

@@ -0,0 +1,9 @@
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<TargetFramework>net8.0</TargetFramework>
<ImplicitUsings>enable</ImplicitUsings>
<Nullable>enable</Nullable>
</PropertyGroup>
</Project>

View File

@@ -2,6 +2,8 @@
Microsoft Visual Studio Solution File, Format Version 12.00 Microsoft Visual Studio Solution File, Format Version 12.00
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Nub.Lang", "Nub.Lang\Nub.Lang.csproj", "{5047E21F-590D-4CB3-AFF3-064316485009}" Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Nub.Lang", "Nub.Lang\Nub.Lang.csproj", "{5047E21F-590D-4CB3-AFF3-064316485009}"
EndProject EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Nub.Core", "Nub.Core\Nub.Core.csproj", "{903F2D49-4F69-4287-A709-EFC68BDD9654}"
EndProject
Global Global
GlobalSection(SolutionConfigurationPlatforms) = preSolution GlobalSection(SolutionConfigurationPlatforms) = preSolution
Debug|Any CPU = Debug|Any CPU Debug|Any CPU = Debug|Any CPU
@@ -12,5 +14,9 @@ Global
{5047E21F-590D-4CB3-AFF3-064316485009}.Debug|Any CPU.Build.0 = Debug|Any CPU {5047E21F-590D-4CB3-AFF3-064316485009}.Debug|Any CPU.Build.0 = Debug|Any CPU
{5047E21F-590D-4CB3-AFF3-064316485009}.Release|Any CPU.ActiveCfg = Release|Any CPU {5047E21F-590D-4CB3-AFF3-064316485009}.Release|Any CPU.ActiveCfg = Release|Any CPU
{5047E21F-590D-4CB3-AFF3-064316485009}.Release|Any CPU.Build.0 = Release|Any CPU {5047E21F-590D-4CB3-AFF3-064316485009}.Release|Any CPU.Build.0 = Release|Any CPU
{903F2D49-4F69-4287-A709-EFC68BDD9654}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{903F2D49-4F69-4287-A709-EFC68BDD9654}.Debug|Any CPU.Build.0 = Debug|Any CPU
{903F2D49-4F69-4287-A709-EFC68BDD9654}.Release|Any CPU.ActiveCfg = Release|Any CPU
{903F2D49-4F69-4287-A709-EFC68BDD9654}.Release|Any CPU.Build.0 = Release|Any CPU
EndGlobalSection EndGlobalSection
EndGlobal EndGlobal

View File

@@ -8,7 +8,7 @@
</PropertyGroup> </PropertyGroup>
<ItemGroup> <ItemGroup>
<PackageReference Include="Nub.Core" Version="1.0.1" /> <ProjectReference Include="..\Nub.Core\Nub.Core.csproj" />
</ItemGroup> </ItemGroup>
</Project> </Project>

View File

@@ -1,160 +0,0 @@
global alloc, free
section .bss
free_list_head: resq 1 ; head of free list
alloc_list_head: resq 1 ; head of allocation list
section .text
alloc:
add rdi, 16 ; reserve 16 bytes for metadata
mov rsi, [free_list_head]
xor r8, r8
.loop:
test rsi, rsi ; allocate new block if end of list is reached
jz .new_block
mov rdx, [rsi]
cmp rdx, rdi ; is there enough space for allocation?
ja .use_block ; yes? allocate
add rdx, 16
cmp rdx, rdi ; no? is there enough space if we include metadata
je .use_block ; if we include metadata, the sizes has to match exactly, or partial metadata will persist
mov r8, rsi ; r8 contains the node from the last iteration
mov rsi, [rsi + 8] ; next node
jmp .loop
.new_block:
push rdi
push r8
add rdi, 16
mov rsi, 4096
call max
push rax
mov rdi, rax
call sys_mmap
pop rsi
sub rsi, 16
mov qword [rax], rsi ; update metadata to page size - metadata
push rax
mov rdi, rax
call insert_into_free
pop rsi
pop r8
pop rdi
.use_block:
cmp [rsi], rdi ; check if the block will be empty after allocation
ja .unlink_done ; if not, do not unlink
test r8, r8 ; r8 is null if node is also head
jz .unlink_head
mov rdx, [rsi + 8] ; load next node
mov [r8 + 8], rdx ; link next node to last node's next
jmp .unlink_done
.unlink_head:
mov rdx, [free_list_head] ; load head
mov rdx, [rdx + 8] ; load head.next
mov [free_list_head], rdx ; mov head.next into head
.unlink_done:
sub [rsi], rdi ; reduce available space of block by the allocated space
mov rdx, [rsi] ; load the available space excluding the newly allocated space
lea rax, [rsi + rdx + 16] ; load the address of the newly allocated space
sub rdi, 16
mov [rax], rdi ; update metadata to allocation size - metadata
mov rdx, [alloc_list_head]
mov [rax + 8], rdx ; move head to nex item in this alloc
mov [alloc_list_head], rax ; update head to point to this node
lea rax, [rax + 16] ; skip past metadata for return value
ret
free:
lea rdi, [rdi - 16] ; adjust for metadata
mov rsi, [alloc_list_head]
xor r8, r8
.loop:
test rsi, rsi
jz .not_found
cmp rdi, rsi
je .found
mov r8, rsi
mov rsi, [rsi + 8] ; next node
jmp .loop
.not_found:
mov rax, 60
mov rdi, 1
syscall
.found:
test r8, r8 ; r8 is null if node is also head
jz .unlink_head
mov rdx, [rsi + 8] ; load next node
mov [r8 + 8], rdx ; link next node to last node's next
jmp .unlink_done
.unlink_head:
mov rdx, [alloc_list_head] ; load head
mov rdx, [rdx + 8] ; load head.next
mov [alloc_list_head], rdx ; mov head.next into head
.unlink_done:
mov rdi, rsi
call insert_into_free
ret
insert_into_free:
mov rsi, [free_list_head] ; load head
test rsi, rsi ; is list empty
jz .insert_head ; if empty, insert at head
cmp rdi, rsi ; is input smaller then head
jl .insert_head ; if smaller, insert at head
xor r8, r8 ; r9 will track the previous node
.loop:
test rsi, rsi
jz .insert_end ; if at end of list, insert at end
cmp rdi, [rsi + 8] ; compare input to next node
jg .next ; if larger, skip to next node
mov [rsi + 8], rdi ; if smaller, insert at this position
mov [rdi + 8], rdx
ret
.next:
mov r8, rsi ; update r8 to current node
mov rsi, [rsi + 8] ; update rsi to next node
jmp .loop
.insert_head:
mov rdx, [free_list_head]
mov [rdi + 8], rdx
mov [free_list_head], rdi
ret
.insert_end:
mov [r8 + 8], rdi ; update last node's next to point at rdi
ret
sys_mmap:
mov rax, 9
mov rsi, rdi
mov rdi, 0
mov rdx, 3
mov r10, 34
mov r8, -1
mov r9, 0
syscall
cmp rax, -1
je .error
ret
.error:
mov rax, 60
mov rdi, 1
syscall
sys_munmap:
mov rax, 11
syscall
cmp rax, -1
je .error
ret
.error:
mov rax, 60
mov rdi, 1
syscall
max:
cmp rdi, rsi
jae .left
mov rax, rsi
ret
.left:
mov rax, rdi
ret

View File

@@ -1,311 +0,0 @@
global gc_init, gc_alloc
extern printint, printstr, endl
section .bss
alloc_list_head: resq 1 ; metadata size: 24
free_list_head: resq 1 ; metadata size: 16
stack_start: resq 1
free_list_size: resq 1
mark_count: resq 1
section .data
gc_bytes_allocated: dq 0 ; bytes allocated since the last gc cycle
gc_trigger_threshold: dq 1024 * 1024 * 8 ; initial gc trigger threshold in bytes (adjusts dynamically)
txt_start_collect: db "Running gc after ", 0
txt_sweep_done: db " Sweep done. We now have ", 0
txt_next_threshold: db " The next threshold is ", 0
txt_allocated_bytes: db " allocated bytes", 0
txt_marking_done: db " Marking done. Objects marked is ", 0
txt_free_list_size: db " Free list size is ", 0
section .text
gc_init:
mov [stack_start], rsp
ret
gc_alloc:
add rdi, 24 ; adjust for metadata size
mov rdx, [gc_bytes_allocated]
cmp rdx, [gc_trigger_threshold]
jbe .alloc ; if allocated bytes since last collect has exceeded threshold, trigger collect
push rdi
call gc_collect
pop rdi
.alloc:
add [gc_bytes_allocated], rdi ; adjust allocated bytes list
mov rsi, [free_list_head]
xor r8, r8
.loop:
test rsi, rsi
jz .new_block ; allocate new block if at end of list
mov rdx, [rsi]
cmp rdi, rdx
jbe .use_block ; use block if object fits within block
mov r8, rsi ; load r8 with current node
mov rsi, [rsi + 8] ; load next node
jmp .loop
.new_block:
push rdi
push r8
mov rsi, 4096
call max ; calculate size of allocation (max(input, 4096))
mov rdi, rax
push rdi
call sys_mmap
pop rsi
sub rsi, 16
mov qword [rax], rsi ; set size of object to page size - metadata
mov qword [rax + 8], 0 ; ensure that next pointer is null
push rax
mov rdi, rax
call insert_into_free
pop rsi
pop r8
pop rdi
.use_block:
cmp [rsi], rdi ; check if the block will be empty after allocation
ja .unlink_done ; if not, do not unlink
test r8, r8 ; r8 is null if node is also head
jz .unlink_head
mov rdx, [rsi + 8] ; load next node
mov [r8 + 8], rdx ; link next node to last node's next
jmp .unlink_done
dec qword [free_list_size]
.unlink_head:
mov rdx, [free_list_head] ; load head
mov rdx, [rdx + 8] ; load head.next
mov [free_list_head], rdx ; mov head.next into head
dec qword [free_list_size]
.unlink_done:
sub [rsi], rdi ; reduce available space of block by the allocated space
mov rdx, [rsi] ; load the available space excluding the newly allocated space
lea rax, [rsi + rdx + 16] ; load the address of the newly allocated space
mov byte [rax], 0 ; update mark to 0
sub rdi, 24
mov [rax + 8], rdi ; update metadata size to allocation size - metadata
mov rdx, [alloc_list_head]
mov [rax + 16], rdx
mov [alloc_list_head], rax ; append this allocation to the head of allocation list
lea rax, [rax + 24] ; skip past metadata for return value
ret
gc_collect:
mov rdi, txt_start_collect
call printstr
mov rdi, [gc_bytes_allocated]
call printint
mov rdi, txt_allocated_bytes
call printstr
call endl
call gc_mark_stack
call gc_sweep
mov rdi, txt_sweep_done
call printstr
mov rdi, [gc_bytes_allocated]
call printint
mov rdi, txt_allocated_bytes
call printstr
call endl
mov rdi, [gc_bytes_allocated]
shl rdi, 1
mov rsi, 1024 * 1024 * 8
call max
mov [gc_trigger_threshold], rax
mov qword [gc_bytes_allocated], 0
mov rdi, txt_next_threshold
call printstr
mov rdi, [gc_trigger_threshold]
call printint
mov rdi, txt_allocated_bytes
call printstr
call endl
mov rdi, txt_free_list_size
call printstr
mov rdi, [free_list_size]
call printint
call endl
ret
gc_mark_stack:
mov qword [mark_count], 0
mov r8, rsp ; load current stack pointer
mov r9, [stack_start] ; load start of stack
.loop:
cmp r8, r9 ; have we reached end of stack?
jae .done ; yes? return
mov rdi, [r8] ; no? load the value
call gc_mark ; this might be an allocation, check
lea r8, [r8 + 8] ; next item in stack
jmp .loop
.done:
mov rdi, txt_marking_done
call printstr
mov rdi, [mark_count]
call printint
call endl
ret
gc_mark:
test rdi, rdi
jz .done ; if stack item is null, return
mov rsi, [alloc_list_head]
.loop:
test rsi, rsi
jz .done ; return if end of list is reached
lea rdx, [rsi + 24] ; input value does not include metadata
cmp rdx, rdi
je .mark_object ; if match is found, mark the object
mov rsi, [rsi + 16] ; load next item and repeat
jmp .loop
.mark_object:
inc qword [mark_count]
mov al, [rdi] ; load mark
test al, al ; already marked?
jnz .done ; yes? return
mov byte [rdi - 24], 1 ; mark object
mov rcx, [rdi - 16] ; load object size
lea rcx, [rdi + rcx] ; end of object
.scan_object:
cmp rdi, rcx
jge .done
push rdi
mov rdi, [rdi]
call gc_mark
pop rdi
lea rdi, [rdi + 8]
jmp .scan_object
.done:
ret
gc_sweep:
mov rdi, [alloc_list_head]
xor r8, r8
.loop:
test rdi, rdi ; reached end of list?
jz .done ; yes? return
mov al, [rdi]
test al, al
jz .unmarked ; if unmarked, free object
mov byte [rdi], 0 ; unmark object
mov r8, rdi
mov rdi, [rdi + 16] ; next item
jmp .loop
.done:
ret
.unmarked:
mov r9, [rdi + 16] ; save address of next object in list
test r8, r8
jz .unlink_head ; if current is head, unlink head
mov [r8 + 16], r9 ; unlink the current node by setting the previous node's next to the next node's address
jmp .unlink_done
.unlink_head:
mov [alloc_list_head], r9 ; update head node to be the next node
.unlink_done:
push r8 ; save previous node since it will also be the previous node for the next item
push r9 ; save next node
mov rdx, [rdi + 8] ; load current size
add rdx, 24 ; add metadata size back
sub [gc_bytes_allocated], rdx ; adjust allocated bytes
mov rdx, [rdi + 8] ; load current size
add rdx, 8 ; adjust for smaller metadata in free list
mov [rdi], rdx ; save new size in correct position
mov qword [rdi + 8], 0 ; set next to null
call insert_into_free
pop rdi ; input for next iteration
pop r8 ; prev node for next iteration
jmp .loop
insert_into_free:
mov rsi, [free_list_head] ; rsi will track the current node
test rsi, rsi
jz .insert_head ; if list is empty, insert at head
cmp rdi, rsi
jb .insert_head ; is input is smaller than head, insert at head
.loop:
mov r9, [rsi + 8] ; load next node
test r9, r9
jz .insert_tail ; if at end of the list, insert at tail
cmp rdi, r9
jbe .insert_between ; if input < next insert between current and next
mov rsi, r9
jmp .loop
.insert_between:
mov [rdi + 8], r9 ; input.next = next
mov [rsi + 8], rdi ; current.next = input
inc qword [free_list_size]
mov rdi, rsi
call merge
ret
.insert_head:
mov [rdi + 8], rsi
mov [free_list_head], rdi
inc qword [free_list_size]
call merge
ret
.insert_tail:
mov qword [rdi + 8], 0 ; set input.tail to null
mov [rsi + 8], rdi ; add input to current.next
inc qword [free_list_size]
mov rdi, rsi
call merge
ret
merge:
mov rsi, [rdi + 8]
test rsi, rsi
jz .return
mov rdx, [rdi]
lea rdx, [rdi + rdx + 16]
cmp rdx, rsi
jne .return
dec qword [free_list_size]
mov rdx, [rsi]
add rdx, 16
add [rdi], rdx
mov rdx, [rsi + 8]
mov [rdi + 8], rdx
jmp merge
.return:
ret
sys_mmap:
mov rax, 9
mov rsi, rdi
mov rdi, 0
mov rdx, 3
mov r10, 34
mov r8, -1
mov r9, 0
syscall
cmp rax, -1
je .error
ret
.error:
mov rax, 60
mov rdi, 1
syscall
sys_munmap:
mov rax, 11
syscall
cmp rax, -1
je .error
ret
.error:
mov rax, 60
mov rdi, 1
syscall
max:
cmp rdi, rsi
jae .left
mov rax, rsi
ret
.left:
mov rax, rdi
ret

277
input/baseline/gc.c Normal file
View File

@@ -0,0 +1,277 @@
#include <stdint.h>
#include <stdio.h>
#include <sys/mman.h>
#include <unistd.h>
/* Constants */
#define GC_INITIAL_THRESHOLD (1024 * 1024 * 8) // 8MB initial threshold
#define GC_MIN_ALLOC 4096 // Minimum allocation size
/* Allocation metadata structures */
typedef struct alloc_block {
uint8_t mark; // Mark bit for GC
uint8_t padding[7]; // Padding for alignment
int64_t size; // Size of the allocation
struct alloc_block* next; // Next allocation in the list
} alloc_block_t;
typedef struct free_block {
int64_t size; // Size of the free block
struct free_block* next; // Next free block in the list
} free_block_t;
/* Global variables */
static alloc_block_t* alloc_list_head = NULL;
static free_block_t* free_list_head = NULL;
static void* stack_start = NULL;
static int64_t free_list_size = 0;
static int64_t mark_count = 0;
/* GC metrics */
static int64_t gc_bytes_allocated = 0;
static int64_t gc_trigger_threshold = GC_INITIAL_THRESHOLD;
/* Forward declarations */
static void* sys_mmap(size_t size);
static void gc_collect(void);
static void gc_mark(void* ptr);
static void gc_mark_stack(void);
static void gc_sweep(void);
static int64_t max(int64_t a, int64_t b);
static void insert_into_free(free_block_t* block);
static void merge(free_block_t* block);
/* Initialize the garbage collector */
void gc_init(void) {
// Save the current stack pointer as the start of the stack
volatile unsigned long var = 0;
stack_start = (void*)((unsigned long)&var + 4);
}
/* Allocate memory with garbage collection */
void* gc_alloc(int64_t size) {
size += sizeof(alloc_block_t); // Adjust for metadata size
// Check if we need to trigger garbage collection
if (gc_bytes_allocated > gc_trigger_threshold) {
gc_collect();
}
gc_bytes_allocated += size; // Adjust allocation counter
// Search free list for a suitable block
free_block_t* current = free_list_head;
free_block_t* prev = NULL;
while (current != NULL) {
if (current->size >= size) {
// Found a suitable block
break;
}
prev = current;
current = current->next;
}
if (current == NULL) {
// No suitable block found, allocate a new one
int64_t alloc_size = max(size, GC_MIN_ALLOC);
void* memory = sys_mmap(alloc_size);
free_block_t* new_block = (free_block_t*)memory;
new_block->size = alloc_size - sizeof(free_block_t);
new_block->next = NULL;
insert_into_free(new_block);
current = new_block;
// Recalculate prev
if (current == free_list_head) {
prev = NULL;
} else {
prev = free_list_head;
while (prev->next != current) {
prev = prev->next;
}
}
}
// Use the block
alloc_block_t* result;
if (current->size > size) {
// Block is larger than needed, split it
result = (alloc_block_t*)((char*)current + current->size + sizeof(free_block_t) - size);
current->size -= size;
} else {
// Use the entire block
result = (alloc_block_t*)current;
// Remove block from free list
if (prev == NULL) {
free_list_head = current->next;
} else {
prev->next = current->next;
}
free_list_size--;
}
// Initialize metadata
result->mark = 0;
result->size = size - sizeof(alloc_block_t);
result->next = alloc_list_head;
alloc_list_head = result;
// Return pointer to usable memory
return (void*)(result + 1);
}
/* Run garbage collection */
static void gc_collect(void) {
printf("Reached threshold of %ld bytes. Starting GC\n", gc_bytes_allocated);
gc_mark_stack();
printf("\tMarking done. Objects marked is %ld\n", mark_count);
gc_sweep();
printf("\tSweep done. We now have %ld allocated bytes\n", gc_bytes_allocated);
gc_trigger_threshold = max(gc_bytes_allocated * 2, GC_INITIAL_THRESHOLD);
gc_bytes_allocated = 0;
printf("\tThe next threshold is %ld allocated bytes\n", gc_trigger_threshold);
printf("\tFree list size is %ld\n", free_list_size);
}
/* Mark phase of GC - scan stack for pointers */
static void gc_mark_stack(void) {
mark_count = 0;
void** current = (void**)&current; // Approximate current stack position
void** end = (void**)stack_start;
while (current < end) {
gc_mark(*current);
current++;
}
}
/* Mark a single object and recursively mark its contents */
static void gc_mark(void* ptr) {
if (ptr == NULL)
return;
// Check if ptr points to a valid allocation
alloc_block_t* block = alloc_list_head;
while (block != NULL) {
void* block_data = (void*)(block + 1);
if (block_data == ptr) {
// Found the block, mark it if not already marked
if (block->mark == 0) {
mark_count++;
block->mark = 1;
// Recursively mark all pointers in the object
void** p = (void**)block_data;
void** end = (void**)((char*)block_data + block->size);
while (p < end) {
gc_mark(*p);
p++;
}
}
return;
}
block = block->next;
}
}
/* Sweep phase of GC - free unmarked objects */
static void gc_sweep(void) {
alloc_block_t* current = alloc_list_head;
alloc_block_t* prev = NULL;
while (current != NULL) {
if (current->mark == 0) {
// Unmarked object, remove it from the allocation list
alloc_block_t* next = current->next;
if (prev == NULL) {
alloc_list_head = next;
} else {
prev->next = next;
}
// Adjust allocated bytes counter
gc_bytes_allocated -= (current->size + sizeof(alloc_block_t));
// Add to free list
free_block_t* free_block = (free_block_t*)current;
free_block->size = current->size + sizeof(alloc_block_t) - sizeof(free_block_t);
free_block->next = NULL;
insert_into_free(free_block);
current = next;
} else {
// Marked object, unmark it for next GC cycle
current->mark = 0;
prev = current;
current = current->next;
}
}
}
/* Insert a block into the free list, maintaining address order */
static void insert_into_free(free_block_t* block) {
if (free_list_head == NULL || block < free_list_head) {
// Insert at head
block->next = free_list_head;
free_list_head = block;
free_list_size++;
merge(block);
return;
}
// Find insertion point
free_block_t* current = free_list_head;
while (current->next != NULL && current->next < block) {
current = current->next;
}
// Insert after current
block->next = current->next;
current->next = block;
free_list_size++;
// Try to merge adjacent blocks
merge(current);
}
/* Merge a block with any adjacent blocks */
static void merge(free_block_t* block) {
while (block->next != NULL) {
char* block_end = (char*)block + block->size + sizeof(free_block_t);
if (block_end == (char*)block->next) {
// Blocks are adjacent, merge them
free_list_size--;
block->size += block->next->size + sizeof(free_block_t);
block->next = block->next->next;
} else {
// No more adjacent blocks
break;
}
}
}
/* Helper to map memory from the system */
static void* sys_mmap(size_t size) {
void* result = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (result == MAP_FAILED) {
_exit(1); // Exit on failure
}
return result;
}
/* Return maximum of two values */
static int64_t max(int64_t a, int64_t b) {
return (a > b) ? a : b;
}

View File

View File

@@ -1,6 +0,0 @@
namespace DefaultNamespace;
public class gc_h
{
}

View File

@@ -1,19 +1,11 @@
#!/bin/sh #!/bin/sh
gcc -c -g -O2 -fno-stack-protector -fno-builtin ../input/baseline/gc.c -o gc.o
# baseline
nasm -g -felf64 ../input/baseline/gc.asm -o gc.o
nasm -g -felf64 ../input/baseline/alloc.asm -o alloc.o
nasm -g -felf64 ../input/baseline/str_cmp.asm -o str_cmp.o nasm -g -felf64 ../input/baseline/str_cmp.asm -o str_cmp.o
# core
nasm -g -felf64 ../input/core/str_len.asm -o str_len.o nasm -g -felf64 ../input/core/str_len.asm -o str_len.o
nasm -g -felf64 ../input/core/arr_size.asm -o arr_size.o nasm -g -felf64 ../input/core/arr_size.asm -o arr_size.o
nasm -g -felf64 ../input/core/itoa.asm -o itoa.o nasm -g -felf64 ../input/core/itoa.asm -o itoa.o
# program
nasm -g -felf64 out.asm -o out.o nasm -g -felf64 out.asm -o out.o
# tmp gcc -no-pie -nostartfiles -o out gc.o str_cmp.o str_len.o arr_size.o itoa.o out.o
nasm -g -felf64 ../input/util.asm -o util.o
ld -o out gc.o alloc.o str_cmp.o str_len.o arr_size.o itoa.o util.o out.o

8
output/clean.sh Executable file
View File

@@ -0,0 +1,8 @@
#!/bin/sh
rm ./arr_size.o
rm ./gc.o
rm ./itoa.o
rm ./out.o
rm ./str_cmp.o
rm ./str_len.o
rm out