init commit of lab3_1

lab3_1_fork
Zhiyuan Shao 2 years ago
parent 5b9d0b55ea
commit 1f568dd6e1

@ -70,7 +70,7 @@ USER_OBJS := $(addprefix $(OBJ_DIR)/, $(patsubst %.c,%.o,$(USER_CPPS)))
USER_TARGET := $(OBJ_DIR)/app_sum_sequence
USER_TARGET := $(OBJ_DIR)/app_naive_fork
#------------------------targets------------------------
$(OBJ_DIR):
@-mkdir -p $(OBJ_DIR)

@ -82,6 +82,26 @@ elf_status elf_load(elf_ctx *ctx) {
// actual loading
if (elf_fpread(ctx, dest, ph_addr.memsz, ph_addr.off) != ph_addr.memsz)
return EL_EIO;
// record the vm region in proc->mapped_info. added @lab3_1
int j;
for( j=0; j<PGSIZE/sizeof(mapped_region); j++ ) //seek the last mapped region
if( (process*)(((elf_info*)(ctx->info))->p)->mapped_info[j].va == 0x0 ) break;
((process*)(((elf_info*)(ctx->info))->p))->mapped_info[j].va = ph_addr.vaddr;
((process*)(((elf_info*)(ctx->info))->p))->mapped_info[j].npages = 1;
// SEGMENT_READABLE, SEGMENT_EXECUTABLE, SEGMENT_WRITABLE are defined in kernel/elf.h
if( ph_addr.flags == (SEGMENT_READABLE|SEGMENT_EXECUTABLE) ){
((process*)(((elf_info*)(ctx->info))->p))->mapped_info[j].seg_type = CODE_SEGMENT;
sprint( "CODE_SEGMENT added at mapped info offset:%d\n", j );
}else if ( ph_addr.flags == (SEGMENT_READABLE|SEGMENT_WRITABLE) ){
((process*)(((elf_info*)(ctx->info))->p))->mapped_info[j].seg_type = DATA_SEGMENT;
sprint( "DATA_SEGMENT added at mapped info offset:%d\n", j );
}else
panic( "unknown program segment encountered, segment flag:%d.\n", ph_addr.flags );
((process*)(((elf_info*)(ctx->info))->p))->total_mapped_region ++;
}
return EL_OK;

@ -25,6 +25,11 @@ typedef struct elf_header_t {
uint16 shstrndx; /* Section header string table index */
} elf_header;
// segment types, attributes of elf_prog_header_t.flags
#define SEGMENT_READABLE 0x4
#define SEGMENT_EXECUTABLE 0x1
#define SEGMENT_WRITABLE 0x2
// Program segment header.
typedef struct elf_prog_header_t {
uint32 type; /* Segment type */

@ -8,12 +8,10 @@
#include "process.h"
#include "pmm.h"
#include "vmm.h"
#include "sched.h"
#include "memlayout.h"
#include "spike_interface/spike_utils.h"
// process is a structure defined in kernel/process.h
process user_app;
//
// trap_sec_start points to the beginning of S-mode trap segment (i.e., the entry point of
// S-mode trap vector). added @lab2_1
@ -35,42 +33,14 @@ void enable_paging() {
// load the elf, and construct a "process" (with only a trapframe).
// load_bincode_from_host_elf is defined in elf.c
//
void load_user_program(process *proc) {
sprint("User application is loading.\n");
// allocate a page to store the trapframe. alloc_page is defined in kernel/pmm.c. added @lab2_1
proc->trapframe = (trapframe *)alloc_page();
memset(proc->trapframe, 0, sizeof(trapframe));
// allocate a page to store page directory. added @lab2_1
proc->pagetable = (pagetable_t)alloc_page();
memset((void *)proc->pagetable, 0, PGSIZE);
process* load_user_program() {
process* proc;
// allocate pages to both user-kernel stack and user app itself. added @lab2_1
proc->kstack = (uint64)alloc_page() + PGSIZE; //user kernel stack top
uint64 user_stack = (uint64)alloc_page(); //phisical address of user stack bottom
// USER_STACK_TOP = 0x7ffff000, defined in kernel/memlayout.h
proc->trapframe->regs.sp = USER_STACK_TOP; //virtual address of user stack top
sprint("user frame 0x%lx, user stack 0x%lx, user kstack 0x%lx \n", proc->trapframe,
proc->trapframe->regs.sp, proc->kstack);
proc = alloc_process();
sprint("User application is loading.\n");
// load_bincode_from_host_elf() is defined in kernel/elf.c
load_bincode_from_host_elf(proc);
// populate the page table of user application. added @lab2_1
// map user stack in userspace, user_vm_map is defined in kernel/vmm.c
user_vm_map((pagetable_t)proc->pagetable, USER_STACK_TOP - PGSIZE, PGSIZE, user_stack,
prot_to_type(PROT_WRITE | PROT_READ, 1));
// map trapframe in user space (direct mapping as in kernel space).
user_vm_map((pagetable_t)proc->pagetable, (uint64)proc->trapframe, PGSIZE, (uint64)proc->trapframe,
prot_to_type(PROT_WRITE | PROT_READ, 0));
// map S-mode trap vector section in user space (direct mapping as in kernel space)
// here, we assume that the size of usertrap.S is smaller than a page.
user_vm_map((pagetable_t)proc->pagetable, (uint64)trap_sec_start, PGSIZE, (uint64)trap_sec_start,
prot_to_type(PROT_READ | PROT_EXEC, 0));
return proc;
}
//
@ -94,12 +64,14 @@ int s_start(void) {
// the code now formally works in paging mode, meaning the page table is now in use.
sprint("kernel page table is on \n");
// the application code (elf) is first loaded into memory, and then put into execution
load_user_program(&user_app);
// added @lab3_1
init_proc_pool();
sprint("Switch to user mode...\n");
// switch_to() is defined in kernel/process.c
switch_to(&user_app);
// the application code (elf) is first loaded into memory, and then put into execution
// added @lab3_1
insert_to_ready_queue( load_user_program() );
schedule();
// we should never reach here.
return 0;

@ -15,18 +15,23 @@
#include "vmm.h"
#include "pmm.h"
#include "memlayout.h"
#include "sched.h"
#include "spike_interface/spike_utils.h"
//Two functions defined in kernel/usertrap.S
extern char smode_trap_vector[];
extern void return_to_user(trapframe *, uint64 satp);
// trap_sec_start points to the beginning of S-mode trap segment (i.e., the entry point
// of S-mode trap vector).
extern char trap_sec_start[];
// process pool. added @lab3_1
process procs[NPROC];
// current points to the currently running user-mode application.
process* current = NULL;
// points to the first free page in our simple heap. added @lab2_2
uint64 g_ufree_page = USER_FREE_ADDRESS_START;
//
// switch to a user-mode process
//
@ -64,3 +69,184 @@ void switch_to(process* proc) {
// note, return_to_user takes two parameters @ and after lab2_1.
return_to_user(proc->trapframe, user_satp);
}
//
// initialize process pool (the procs[] array). added @lab3_1
//
void init_proc_pool() {
memset( procs, 0, sizeof(process)*NPROC );
for (int i = 0; i < NPROC; ++i) {
procs[i].status = FREE;
procs[i].pid = i;
}
}
//
// allocate an empty process, init its vm space. returns the pointer to
// process strcuture. added @lab3_1
//
process* alloc_process() {
// locate the first usable process structure
int i;
for( i=0; i<NPROC; i++ )
if( procs[i].status == FREE ) break;
if( i>=NPROC ){
panic( "cannot find any free process structure.\n" );
return 0;
}
// init proc[i]'s vm space
procs[i].trapframe = (trapframe *)alloc_page(); //trapframe, used to save context
memset(procs[i].trapframe, 0, sizeof(trapframe));
// page directory
procs[i].pagetable = (pagetable_t)alloc_page();
memset((void *)procs[i].pagetable, 0, PGSIZE);
procs[i].kstack = (uint64)alloc_page() + PGSIZE; //user kernel stack top
uint64 user_stack = (uint64)alloc_page(); //phisical address of user stack bottom
procs[i].trapframe->regs.sp = USER_STACK_TOP; //virtual address of user stack top
// allocates a page to record memory regions (segments)
procs[i].mapped_info = (mapped_region*)alloc_page();
memset( procs[i].mapped_info, 0, PGSIZE );
// map user stack in userspace
user_vm_map((pagetable_t)procs[i].pagetable, USER_STACK_TOP - PGSIZE, PGSIZE,
user_stack, prot_to_type(PROT_WRITE | PROT_READ, 1));
procs[i].mapped_info[STACK_SEGMENT].va = USER_STACK_TOP - PGSIZE;
procs[i].mapped_info[STACK_SEGMENT].npages = 1;
procs[i].mapped_info[STACK_SEGMENT].seg_type = STACK_SEGMENT;
// map trapframe in user space (direct mapping as in kernel space).
user_vm_map((pagetable_t)procs[i].pagetable, (uint64)procs[i].trapframe, PGSIZE,
(uint64)procs[i].trapframe, prot_to_type(PROT_WRITE | PROT_READ, 0));
procs[i].mapped_info[CONTEXT_SEGMENT].va = (uint64)procs[i].trapframe;
procs[i].mapped_info[CONTEXT_SEGMENT].npages = 1;
procs[i].mapped_info[CONTEXT_SEGMENT].seg_type = CONTEXT_SEGMENT;
// map S-mode trap vector section in user space (direct mapping as in kernel space)
// we assume that the size of usertrap.S is smaller than a page.
user_vm_map((pagetable_t)procs[i].pagetable, (uint64)trap_sec_start, PGSIZE,
(uint64)trap_sec_start, prot_to_type(PROT_READ | PROT_EXEC, 0));
procs[i].mapped_info[SYSTEM_SEGMENT].va = (uint64)trap_sec_start;
procs[i].mapped_info[SYSTEM_SEGMENT].npages = 1;
procs[i].mapped_info[SYSTEM_SEGMENT].seg_type = SYSTEM_SEGMENT;
sprint("in alloc_proc. user frame 0x%lx, user stack 0x%lx, user kstack 0x%lx \n",
procs[i].trapframe, procs[i].trapframe->regs.sp, procs[i].kstack);
// initialize the process's heap manager
procs[i].user_heap.heap_top = USER_FREE_ADDRESS_START;
procs[i].user_heap.heap_bottom = USER_FREE_ADDRESS_START;
procs[i].user_heap.free_pages_count = 0;
// map user heap in userspace
procs[i].mapped_info[HEAP_SEGMENT].va = USER_FREE_ADDRESS_START;
procs[i].mapped_info[HEAP_SEGMENT].npages = 0; // no pages are mapped to heap yet.
procs[i].mapped_info[HEAP_SEGMENT].seg_type = HEAP_SEGMENT;
procs[i].total_mapped_region = 4;
// return after initialization.
return &procs[i];
}
//
// reclaim a process. added @lab3_1
//
int free_process( process* proc ) {
// we set the status to ZOMBIE, but cannot destruct its vm space immediately.
// since proc can be current process, and its user kernel stack is currently in use!
// but for proxy kernel, it (memory leaking) may NOT be a really serious issue,
// as it is different from regular OS, which needs to run 7x24.
proc->status = ZOMBIE;
return 0;
}
//
// implements fork syscal in kernel. added @lab3_1
// basic idea here is to first allocate an empty process (child), then duplicate the
// context and data segments of parent process to the child, and lastly, map other
// segments (code, system) of the parent to child. the stack segment remains unchanged
// for the child.
//
int do_fork( process* parent)
{
sprint( "will fork a child from parent %d.\n", parent->pid );
process* child = alloc_process();
for( int i=0; i<parent->total_mapped_region; i++ ){
// browse parent's vm space, and copy its trapframe and data segments,
// map its code segment.
switch( parent->mapped_info[i].seg_type ){
case CONTEXT_SEGMENT:
*child->trapframe = *parent->trapframe;
break;
case STACK_SEGMENT:
memcpy( (void*)lookup_pa(child->pagetable, child->mapped_info[STACK_SEGMENT].va),
(void*)lookup_pa(parent->pagetable, parent->mapped_info[i].va), PGSIZE );
break;
case HEAP_SEGMENT:
// build a same heap for child process.
// convert free_pages_address into a filter to skip reclaimed blocks in the heap
// when mapping the heap blocks
int free_block_filter[MAX_HEAP_PAGES];
memset(free_block_filter, 0, MAX_HEAP_PAGES);
uint64 heap_bottom = parent->user_heap.heap_bottom;
for (int i = 0; i < parent->user_heap.free_pages_count; i++) {
int index = (parent->user_heap.free_pages_address[i] - heap_bottom) / PGSIZE;
free_block_filter[index] = 1;
}
// copy and map the heap blocks
for (uint64 heap_block = current->user_heap.heap_bottom;
heap_block < current->user_heap.heap_top; heap_block += PGSIZE) {
if (free_block_filter[(heap_block - heap_bottom) / PGSIZE]) // skip free blocks
continue;
void* child_pa = alloc_page();
memcpy(child_pa, (void*)lookup_pa(parent->pagetable, heap_block), PGSIZE);
user_vm_map((pagetable_t)child->pagetable, heap_block, PGSIZE, (uint64)child_pa,
prot_to_type(PROT_WRITE | PROT_READ, 1));
}
child->mapped_info[HEAP_SEGMENT].npages = parent->mapped_info[HEAP_SEGMENT].npages;
// copy the heap manager from parent to child
memcpy((void*)&child->user_heap, (void*)&parent->user_heap, sizeof(parent->user_heap));
break;
case CODE_SEGMENT:
// TODO (lab3_1): implment the mapping of child code segment to parent's
// code segment.
// hint: the virtual address mapping of code segment is tracked in mapped_info
// page of parent's process structure. use the information in mapped_info to
// retrieve the virtual to physical mapping of code segment.
// after having the mapping information, just map the corresponding virtual
// address region of child to the physical pages that actually store the code
// segment of parent process.
// DO NOT COPY THE PHYSICAL PAGES, JUST MAP THEM.
panic( "You need to implement the code segment mapping of child in lab3_1.\n" );
// after mapping, register the vm region (do not delete codes below!)
child->mapped_info[child->total_mapped_region].va = parent->mapped_info[i].va;
child->mapped_info[child->total_mapped_region].npages =
parent->mapped_info[i].npages;
child->mapped_info[child->total_mapped_region].seg_type = CODE_SEGMENT;
child->total_mapped_region++;
break;
}
}
child->status = READY;
child->trapframe->regs.a0 = 0;
child->parent = parent;
insert_to_ready_queue( child );
return child->pid;
}

@ -18,6 +18,49 @@ typedef struct trapframe_t {
/* offset:272 */ uint64 kernel_satp;
}trapframe;
// riscv-pke kernel supports at most 32 processes
#define NPROC 32
// maximum number of pages in a process's heap
#define MAX_HEAP_PAGES 32
// possible status of a process
enum proc_status {
FREE, // unused state
READY, // ready state
RUNNING, // currently running
BLOCKED, // waiting for something
ZOMBIE, // terminated but not reclaimed yet
};
// types of a segment
enum segment_type {
STACK_SEGMENT = 0, // runtime stack segment
CONTEXT_SEGMENT, // trapframe segment
SYSTEM_SEGMENT, // system segment
HEAP_SEGMENT, // runtime heap segment
CODE_SEGMENT, // ELF segment
DATA_SEGMENT, // ELF segment
};
// the VM regions mapped to a user process
typedef struct mapped_region {
uint64 va; // mapped virtual address
uint32 npages; // mapping_info is unused if npages == 0
uint32 seg_type; // segment type, one of the segment_types
} mapped_region;
typedef struct process_heap_manager {
// points to the last free page in our simple heap.
uint64 heap_top;
// points to the bottom of our simple heap.
uint64 heap_bottom;
// the address of free pages in the heap
uint64 free_pages_address[MAX_HEAP_PAGES];
// the number of free pages in the heap
uint32 free_pages_count;
}process_heap_manager;
// the extremely simple definition of process, used for begining labs of PKE
typedef struct process_t {
// pointing to the stack used in trap handling.
@ -26,15 +69,38 @@ typedef struct process_t {
pagetable_t pagetable;
// trapframe storing the context of a (User mode) process.
trapframe* trapframe;
// points to a page that contains mapped_regions. below are added @lab3_1
mapped_region *mapped_info;
// next free mapped region in mapped_info
int total_mapped_region;
// heap management
process_heap_manager user_heap;
// process id
uint64 pid;
// process status
int status;
// parent process
struct process_t *parent;
// next queue element
struct process_t *queue_next;
}process;
// switch to run user app
void switch_to(process*);
// initialize process pool (the procs[] array)
void init_proc_pool();
// allocate an empty process, init its vm space. returns its pid
process* alloc_process();
// reclaim a process, destruct its vm space and free physical pages.
int free_process( process* proc );
// fork a child from parent
int do_fork(process* parent);
// current running process
extern process* current;
// address of the first free page in our simple heap. added @lab2_2
extern uint64 g_ufree_page;
#endif

@ -0,0 +1,73 @@
/*
* implementing the scheduler
*/
#include "sched.h"
#include "spike_interface/spike_utils.h"
process* ready_queue_head = NULL;
//
// insert a process, proc, into the END of ready queue.
//
void insert_to_ready_queue( process* proc ) {
sprint( "going to insert process %d to ready queue.\n", proc->pid );
// if the queue is empty in the beginning
if( ready_queue_head == NULL ){
proc->status = READY;
proc->queue_next = NULL;
ready_queue_head = proc;
return;
}
// ready queue is not empty
process *p;
// browse the ready queue to see if proc is already in-queue
for( p=ready_queue_head; p->queue_next!=NULL; p=p->queue_next )
if( p == proc ) return; //already in queue
// p points to the last element of the ready queue
if( p==proc ) return;
p->queue_next = proc;
proc->status = READY;
proc->queue_next = NULL;
return;
}
//
// choose a proc from the ready queue, and put it to run.
// note: schedule() does not take care of previous current process. If the current
// process is still runnable, you should place it into the ready queue (by calling
// ready_queue_insert), and then call schedule().
//
extern process procs[NPROC];
void schedule() {
if ( !ready_queue_head ){
// by default, if there are no ready process, and all processes are in the status of
// FREE and ZOMBIE, we should shutdown the emulated RISC-V machine.
int should_shutdown = 1;
for( int i=0; i<NPROC; i++ )
if( (procs[i].status != FREE) && (procs[i].status != ZOMBIE) ){
should_shutdown = 0;
sprint( "ready queue empty, but process %d is not in free/zombie state:%d\n",
i, procs[i].status );
}
if( should_shutdown ){
sprint( "no more ready processes, system shutdown now.\n" );
shutdown( 0 );
}else{
panic( "Not handled: we should let system wait for unfinished processes.\n" );
}
}
current = ready_queue_head;
assert( current->status == READY );
ready_queue_head = ready_queue_head->queue_next;
current->status = RUNNING;
sprint( "going to schedule process %d to run.\n", current->pid );
switch_to( current );
}

@ -0,0 +1,12 @@
#ifndef _SCHED_H_
#define _SCHED_H_
#include "process.h"
//length of a time slice, in number of ticks
#define TIME_SLICE_LEN 2
void insert_to_ready_queue( process* proc );
void schedule();
#endif

@ -8,6 +8,7 @@
#include "syscall.h"
#include "pmm.h"
#include "vmm.h"
#include "sched.h"
#include "util/functions.h"
#include "spike_interface/spike_utils.h"

@ -12,6 +12,8 @@
#include "util/functions.h"
#include "pmm.h"
#include "vmm.h"
#include "sched.h"
#include "spike_interface/spike_utils.h"
//
@ -31,9 +33,10 @@ ssize_t sys_user_print(const char* buf, size_t n) {
//
ssize_t sys_user_exit(uint64 code) {
sprint("User exit with code:%d.\n", code);
// in lab1, PKE considers only one app (one process).
// therefore, shutdown the system when the app calls exit()
shutdown(code);
// reclaim the current process, and reschedule. added @lab3_1
free_process( current );
schedule();
return 0;
}
//
@ -41,8 +44,19 @@ ssize_t sys_user_exit(uint64 code) {
//
uint64 sys_user_allocate_page() {
void* pa = alloc_page();
uint64 va = g_ufree_page;
g_ufree_page += PGSIZE;
uint64 va;
// if there are previously reclaimed pages, use them first (this does not change the
// size of the heap)
if (current->user_heap.free_pages_count > 0) {
va = current->user_heap.free_pages_address[--current->user_heap.free_pages_count];
assert(va < current->user_heap.heap_top);
} else {
// otherwise, allocate a new page (this increases the size of the heap by one page)
va = current->user_heap.heap_top;
current->user_heap.heap_top += PGSIZE;
current->mapped_info[HEAP_SEGMENT].npages++;
}
user_vm_map((pagetable_t)current->pagetable, va, PGSIZE, (uint64)pa,
prot_to_type(PROT_WRITE | PROT_READ, 1));
@ -54,9 +68,19 @@ uint64 sys_user_allocate_page() {
//
uint64 sys_user_free_page(uint64 va) {
user_vm_unmap((pagetable_t)current->pagetable, va, PGSIZE, 1);
// add the reclaimed page to the free page list
current->user_heap.free_pages_address[current->user_heap.free_pages_count++] = va;
return 0;
}
//
// kerenl entry point of naive_fork
//
ssize_t sys_user_fork() {
sprint("User call fork.\n");
return do_fork( current );
}
//
// [a0]: the syscall number; [a1] ... [a7]: arguments to the syscalls.
// returns the code of success, (e.g., 0 means success, fail for otherwise)
@ -72,6 +96,8 @@ long do_syscall(long a0, long a1, long a2, long a3, long a4, long a5, long a6, l
return sys_user_allocate_page();
case SYS_user_free_page:
return sys_user_free_page(a1);
case SYS_user_fork:
return sys_user_fork();
default:
panic("Unknown syscall %ld \n", a0);
}

@ -11,6 +11,8 @@
// added @lab2_2
#define SYS_user_allocate_page (SYS_user_base + 2)
#define SYS_user_free_page (SYS_user_base + 3)
// added @lab3_1
#define SYS_user_fork (SYS_user_base + 4)
long do_syscall(long a0, long a1, long a2, long a3, long a4, long a5, long a6, long a7);

@ -187,3 +187,21 @@ void user_vm_unmap(pagetable_t page_dir, uint64 va, uint64 size, int free) {
panic( "You have to implement user_vm_unmap to free pages using naive_free in lab2_2.\n" );
}
//
// debug function, print the vm space of a process. added @lab3_1
//
void print_proc_vmspace(process* proc) {
sprint( "======\tbelow is the vm space of process%d\t========\n", proc->pid );
for( int i=0; i<proc->total_mapped_region; i++ ){
sprint( "-va:%lx, npage:%d, ", proc->mapped_info[i].va, proc->mapped_info[i].npages);
switch(proc->mapped_info[i].seg_type){
case CODE_SEGMENT: sprint( "type: CODE SEGMENT" ); break;
case DATA_SEGMENT: sprint( "type: DATA SEGMENT" ); break;
case STACK_SEGMENT: sprint( "type: STACK SEGMENT" ); break;
case CONTEXT_SEGMENT: sprint( "type: TRAPFRAME SEGMENT" ); break;
case SYSTEM_SEGMENT: sprint( "type: USER KERNEL STACK SEGMENT" ); break;
}
sprint( ", mapped to pa:%lx\n", lookup_pa(proc->pagetable, proc->mapped_info[i].va) );
}
}

@ -2,6 +2,7 @@
#define _VMM_H_
#include "riscv.h"
#include "process.h"
/* --- utility functions for virtual address mapping --- */
int map_pages(pagetable_t pagetable, uint64 va, uint64 size, uint64 pa, int perm);
@ -30,5 +31,6 @@ void kern_vm_init(void);
void *user_va_to_pa(pagetable_t page_dir, void *va);
void user_vm_map(pagetable_t page_dir, uint64 va, uint64 size, uint64 pa, int perm);
void user_vm_unmap(pagetable_t page_dir, uint64 va, uint64 size, int free);
void print_proc_vmspace(process* proc);
#endif

@ -0,0 +1,18 @@
/*
* The application of lab3_1.
* it simply forks a child process.
*/
#include "user/user_lib.h"
#include "util/types.h"
int main(void) {
uint64 pid = fork();
if (pid == 0) {
printu("Child: Hello world!\n");
} else {
printu("Parent: Hello world! child id %ld\n", pid);
}
exit(0);
}

@ -1,28 +0,0 @@
/*
* The application of lab2_3.
*/
#include "user_lib.h"
#include "util/types.h"
//
// compute the summation of an arithmetic sequence. for a given "n", compute
// result = n + (n-1) + (n-2) + ... + 0
// sum_sequence() calls itself recursively till 0. The recursive call, however,
// may consume more memory (from stack) than a physical 4KB page, leading to a page fault.
// PKE kernel needs to improved to handle such page fault by expanding the stack.
//
uint64 sum_sequence(uint64 n) {
if (n == 0)
return 0;
else
return sum_sequence( n-1 ) + n;
}
int main(void) {
// we need a large enough "n" to trigger pagefaults in the user stack
uint64 n = 1000;
printu("Summation of an arithmetic sequence from 0 to %ld is: %ld \n", n, sum_sequence(1000) );
exit(0);
}

@ -63,3 +63,9 @@ void* naive_malloc() {
void naive_free(void* va) {
do_user_call(SYS_user_free_page, (uint64)va, 0, 0, 0, 0, 0, 0);
}
//
// lib call to naive_fork
int fork() {
return do_user_call(SYS_user_fork, 0, 0, 0, 0, 0, 0, 0);
}

@ -6,3 +6,4 @@ int printu(const char *s, ...);
int exit(int code);
void* naive_malloc();
void naive_free(void* va);
int fork();

Loading…
Cancel
Save