parent
9c8cbfaa3b
commit
769c280d05
@ -0,0 +1,16 @@
|
||||
#ifndef _MEMLAYOUT_H
|
||||
#define _MEMLAYOUT_H
|
||||
|
||||
// RISC-V machine places its physical memory above DRAM_BASE
|
||||
#define DRAM_BASE 0x80000000
|
||||
|
||||
// the beginning virtual address of PKE kernel
|
||||
#define KERN_BASE 0x80000000
|
||||
|
||||
// default stack size
|
||||
#define STACK_SIZE 4096
|
||||
|
||||
// virtual address of stack top of user process
|
||||
#define USER_STACK_TOP 0x7ffff000
|
||||
|
||||
#endif
|
@ -0,0 +1,88 @@
|
||||
#include "pmm.h"
|
||||
#include "util/functions.h"
|
||||
#include "riscv.h"
|
||||
#include "config.h"
|
||||
#include "util/string.h"
|
||||
#include "memlayout.h"
|
||||
#include "spike_interface/spike_utils.h"
|
||||
|
||||
// _end is defined in kernel/kernel.lds, it marks the ending (virtual) address of PKE kernel
|
||||
extern char _end[];
|
||||
// g_mem_size is defined in spike_interface/spike_memory.c, it indicates the size of our
|
||||
// (emulated) spike machine. g_mem_size's value is obtained when initializing HTIF.
|
||||
extern uint64 g_mem_size;
|
||||
|
||||
static uint64 free_mem_start_addr; //beginning address of free memory
|
||||
static uint64 free_mem_end_addr; //end address of free memory (not included)
|
||||
|
||||
typedef struct node {
|
||||
struct node *next;
|
||||
} list_node;
|
||||
|
||||
// g_free_mem_list is the head of the list of free physical memory pages
|
||||
static list_node g_free_mem_list;
|
||||
|
||||
//
|
||||
// actually creates the freepage list. each page occupies 4KB (PGSIZE), i.e., small page.
|
||||
// PGSIZE is defined in kernel/riscv.h, ROUNDUP is defined in util/functions.h.
|
||||
//
|
||||
static void create_freepage_list(uint64 start, uint64 end) {
|
||||
g_free_mem_list.next = 0;
|
||||
for (uint64 p = ROUNDUP(start, PGSIZE); p + PGSIZE < end; p += PGSIZE)
|
||||
free_page( (void *)p );
|
||||
}
|
||||
|
||||
//
|
||||
// place a physical page at *pa to the free list of g_free_mem_list (to reclaim the page)
|
||||
//
|
||||
void free_page(void *pa) {
|
||||
if (((uint64)pa % PGSIZE) != 0 || (uint64)pa < free_mem_start_addr || (uint64)pa >= free_mem_end_addr)
|
||||
panic("free_page 0x%lx \n", pa);
|
||||
|
||||
// insert a physical page to g_free_mem_list
|
||||
list_node *n = (list_node *)pa;
|
||||
n->next = g_free_mem_list.next;
|
||||
g_free_mem_list.next = n;
|
||||
}
|
||||
|
||||
//
|
||||
// takes the first free page from g_free_mem_list, and returns (allocates) it.
|
||||
// Allocates only ONE page!
|
||||
//
|
||||
void *alloc_page(void) {
|
||||
list_node *n = g_free_mem_list.next;
|
||||
if (n) g_free_mem_list.next = n->next;
|
||||
|
||||
return (void *)n;
|
||||
}
|
||||
|
||||
//
|
||||
// pmm_init() establishes the list of free physical pages according to available
|
||||
// physical memory space.
|
||||
//
|
||||
void pmm_init() {
|
||||
// start of kernel program segment
|
||||
uint64 g_kernel_start = KERN_BASE;
|
||||
uint64 g_kernel_end = (uint64)&_end;
|
||||
|
||||
uint64 pke_kernel_size = g_kernel_end - g_kernel_start;
|
||||
sprint("PKE kernel start 0x%lx, PKE kernel end: 0x%lx, PKE kernel size: 0x%lx .\n",
|
||||
g_kernel_start, g_kernel_end, pke_kernel_size);
|
||||
|
||||
// free memory starts from the end of PKE kernel and must be page-aligined
|
||||
free_mem_start_addr = ROUNDUP(g_kernel_end , PGSIZE);
|
||||
|
||||
// recompute g_mem_size to limit the physical memory space that our riscv-pke kernel
|
||||
// needs to manage
|
||||
g_mem_size = MIN(PKE_MAX_ALLOWABLE_RAM, g_mem_size);
|
||||
if( g_mem_size < pke_kernel_size )
|
||||
panic( "Error when recomputing physical memory size (g_mem_size).\n" );
|
||||
|
||||
free_mem_end_addr = g_mem_size + DRAM_BASE;
|
||||
sprint("free physical memory address: [0x%lx, 0x%lx] \n", free_mem_start_addr,
|
||||
free_mem_end_addr - 1);
|
||||
|
||||
sprint("kernel memory manager is initializing ...\n");
|
||||
// create the list of free pages
|
||||
create_freepage_list(free_mem_start_addr, free_mem_end_addr);
|
||||
}
|
@ -0,0 +1,11 @@
|
||||
#ifndef _PMM_H_
|
||||
#define _PMM_H_
|
||||
|
||||
// Initialize phisical memeory manager
|
||||
void pmm_init();
|
||||
// Allocate a free phisical page
|
||||
void* alloc_page();
|
||||
// Free an allocated page
|
||||
void free_page(void* pa);
|
||||
|
||||
#endif
|
@ -0,0 +1,173 @@
|
||||
/*
|
||||
* virtual address mapping related functions.
|
||||
*/
|
||||
|
||||
#include "vmm.h"
|
||||
#include "riscv.h"
|
||||
#include "pmm.h"
|
||||
#include "util/types.h"
|
||||
#include "memlayout.h"
|
||||
#include "util/string.h"
|
||||
#include "spike_interface/spike_utils.h"
|
||||
#include "util/functions.h"
|
||||
|
||||
/* --- utility functions for virtual address mapping --- */
|
||||
//
|
||||
// establish mapping of virtual address [va, va+size] to phyiscal address [pa, pa+size]
|
||||
// with the permission of "perm".
|
||||
//
|
||||
int map_pages(pagetable_t page_dir, uint64 va, uint64 size, uint64 pa, int perm) {
|
||||
uint64 first, last;
|
||||
pte_t *pte;
|
||||
|
||||
for (first = ROUNDDOWN(va, PGSIZE), last = ROUNDDOWN(va + size - 1, PGSIZE);
|
||||
first <= last; first += PGSIZE, pa += PGSIZE) {
|
||||
if ((pte = page_walk(page_dir, first, 1)) == 0) return -1;
|
||||
if (*pte & PTE_V)
|
||||
panic("map_pages fails on mapping va (0x%lx) to pa (0x%lx)", first, pa);
|
||||
*pte = PA2PTE(pa) | perm | PTE_V;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
//
|
||||
// convert permission code to permission types of PTE
|
||||
//
|
||||
uint64 prot_to_type(int prot, int user) {
|
||||
uint64 perm = 0;
|
||||
if (prot & PROT_READ) perm |= PTE_R | PTE_A;
|
||||
if (prot & PROT_WRITE) perm |= PTE_W | PTE_D;
|
||||
if (prot & PROT_EXEC) perm |= PTE_X | PTE_A;
|
||||
if (perm == 0) perm = PTE_R;
|
||||
if (user) perm |= PTE_U;
|
||||
return perm;
|
||||
}
|
||||
|
||||
//
|
||||
// traverse the page table (starting from page_dir) to find the corresponding pte of va.
|
||||
// returns: PTE (page table entry) pointing to va.
|
||||
//
|
||||
pte_t *page_walk(pagetable_t page_dir, uint64 va, int alloc) {
|
||||
if (va >= MAXVA) panic("page_walk");
|
||||
|
||||
// starting from the page directory
|
||||
pagetable_t pt = page_dir;
|
||||
|
||||
// traverse from page directory to page table.
|
||||
// as we use risc-v sv39 paging scheme, there will be 3 layers: page dir,
|
||||
// page medium dir, and page table.
|
||||
for (int level = 2; level > 0; level--) {
|
||||
// macro "PX" gets the PTE index in page table of current level
|
||||
// "pte" points to the entry of current level
|
||||
pte_t *pte = pt + PX(level, va);
|
||||
|
||||
// now, we need to know if above pte is valid (established mapping to a phyiscal page)
|
||||
// or not.
|
||||
if (*pte & PTE_V) { //PTE valid
|
||||
// phisical address of pagetable of next level
|
||||
pt = (pagetable_t)PTE2PA(*pte);
|
||||
} else { //PTE invalid (not exist).
|
||||
// allocate a page (to be the new pagetable), if alloc == 1
|
||||
if( alloc && ((pt = (pte_t *)alloc_page(1)) != 0) ){
|
||||
memset(pt, 0, PGSIZE);
|
||||
// writes the physical address of newly allocated page to pte, to establish the
|
||||
// page table tree.
|
||||
*pte = PA2PTE(pt) | PTE_V;
|
||||
}else //returns NULL, if alloc == 0, or no more physical page remains
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
// return a PTE which contains phisical address of a page
|
||||
return pt + PX(0, va);
|
||||
}
|
||||
|
||||
//
|
||||
// look up a virtual page address, return the physical page address or 0 if not mapped.
|
||||
//
|
||||
uint64 lookup_pa(pagetable_t pagetable, uint64 va) {
|
||||
pte_t *pte;
|
||||
uint64 pa;
|
||||
|
||||
if (va >= MAXVA) return 0;
|
||||
|
||||
pte = page_walk(pagetable, va, 0);
|
||||
if (pte == 0 || (*pte & PTE_V) == 0 || ((*pte & PTE_R) == 0 && (*pte & PTE_W) == 0))
|
||||
return 0;
|
||||
pa = PTE2PA(*pte);
|
||||
|
||||
return pa;
|
||||
}
|
||||
|
||||
/* --- kernel page table part --- */
|
||||
// _etext is defined in kernel.lds, it points to the address after text and rodata segments.
|
||||
extern char _etext[];
|
||||
|
||||
// pointer to kernel page director
|
||||
pagetable_t g_kernel_pagetable;
|
||||
|
||||
//
|
||||
// maps virtual address [va, va+sz] to [pa, pa+sz] (for kernel).
|
||||
//
|
||||
void kern_vm_map(pagetable_t page_dir, uint64 va, uint64 pa, uint64 sz, int perm) {
|
||||
// map_pages is defined in kernel/vmm.c
|
||||
if (map_pages(page_dir, va, sz, pa, perm) != 0) panic("kern_vm_map");
|
||||
}
|
||||
|
||||
//
|
||||
// kern_vm_init() constructs the kernel page table.
|
||||
//
|
||||
void kern_vm_init(void) {
|
||||
// pagetable_t is defined in kernel/riscv.h. it's actually uint64*
|
||||
pagetable_t t_page_dir;
|
||||
|
||||
// allocate a page (t_page_dir) to be the page directory for kernel. alloc_page is defined in kernel/pmm.c
|
||||
t_page_dir = (pagetable_t)alloc_page();
|
||||
// memset is defined in util/string.c
|
||||
memset(t_page_dir, 0, PGSIZE);
|
||||
|
||||
// map virtual address [KERN_BASE, _etext] to physical address [DRAM_BASE, DRAM_BASE+(_etext - KERN_BASE)],
|
||||
// to maintain (direct) text section kernel address mapping.
|
||||
kern_vm_map(t_page_dir, KERN_BASE, DRAM_BASE, (uint64)_etext - KERN_BASE,
|
||||
prot_to_type(PROT_READ | PROT_EXEC, 0));
|
||||
|
||||
sprint("KERN_BASE 0x%lx\n", lookup_pa(t_page_dir, KERN_BASE));
|
||||
|
||||
// also (direct) map remaining address space, to make them accessable from kernel.
|
||||
// this is important when kernel needs to access the memory content of user's app
|
||||
// without copying pages between kernel and user spaces.
|
||||
kern_vm_map(t_page_dir, (uint64)_etext, (uint64)_etext, PHYS_TOP - (uint64)_etext,
|
||||
prot_to_type(PROT_READ | PROT_WRITE, 0));
|
||||
|
||||
sprint("physical address of _etext is: 0x%lx\n", lookup_pa(t_page_dir, (uint64)_etext));
|
||||
|
||||
g_kernel_pagetable = t_page_dir;
|
||||
}
|
||||
|
||||
/* --- user page table part --- */
|
||||
//
|
||||
// convert and return the corresponding physical address of a virtual address (va) of
|
||||
// application.
|
||||
//
|
||||
void *user_va_to_pa(pagetable_t page_dir, void *va) {
|
||||
// TODO (lab2_1): implement user_va_to_pa to convert a given user virtual address "va"
|
||||
// to its corresponding physical address, i.e., "pa". To do it, we need to walk
|
||||
// through the page table, starting from its directory "page_dir", to locate the PTE
|
||||
// that maps "va". If found, returns the "pa" by using:
|
||||
// pa = PYHS_ADDR(PTE) + (va & (1<<PGSHIFT -1))
|
||||
// Here, PYHS_ADDR() means retrieving the starting address (4KB aligned), and
|
||||
// (va & (1<<PGSHIFT -1)) means computing the offset of "va" inside its page.
|
||||
// Also, it is possible that "va" is not mapped at all. in such case, we can find
|
||||
// invalid PTE, and should return NULL.
|
||||
panic( "You have to implement user_va_to_pa (convert user va to pa) to print messages in lab2_1.\n" );
|
||||
|
||||
}
|
||||
|
||||
//
|
||||
// maps virtual address [va, va+sz] to [pa, pa+sz] (for user application).
|
||||
//
|
||||
void user_vm_map(pagetable_t page_dir, uint64 va, uint64 size, uint64 pa, int perm) {
|
||||
if (map_pages(page_dir, va, size, pa, perm) != 0) {
|
||||
panic("fail to user_vm_map .\n");
|
||||
}
|
||||
}
|
@ -0,0 +1,33 @@
|
||||
#ifndef _VMM_H_
|
||||
#define _VMM_H_
|
||||
|
||||
#include "riscv.h"
|
||||
|
||||
/* --- utility functions for virtual address mapping --- */
|
||||
int map_pages(pagetable_t pagetable, uint64 va, uint64 size, uint64 pa, int perm);
|
||||
// permission codes.
|
||||
enum VMPermision {
|
||||
PROT_NONE = 0,
|
||||
PROT_READ = 1,
|
||||
PROT_WRITE = 2,
|
||||
PROT_EXEC = 4,
|
||||
};
|
||||
|
||||
uint64 prot_to_type(int prot, int user);
|
||||
pte_t *page_walk(pagetable_t pagetable, uint64 va, int alloc);
|
||||
uint64 lookup_pa(pagetable_t pagetable, uint64 va);
|
||||
|
||||
/* --- kernel page table --- */
|
||||
// pointer to kernel page directory
|
||||
extern pagetable_t g_kernel_pagetable;
|
||||
|
||||
void kern_vm_map(pagetable_t page_dir, uint64 va, uint64 pa, uint64 sz, int perm);
|
||||
|
||||
// Initialize the kernel pagetable
|
||||
void kern_vm_init(void);
|
||||
|
||||
/* --- user page table --- */
|
||||
void *user_va_to_pa(pagetable_t page_dir, void *va);
|
||||
void user_vm_map(pagetable_t page_dir, uint64 va, uint64 size, uint64 pa, int perm);
|
||||
|
||||
#endif
|
@ -0,0 +1,12 @@
|
||||
/*
|
||||
* Below is the given application for lab2_1.
|
||||
* This app runs in its own address space, in contrast with in direct mapping.
|
||||
*/
|
||||
|
||||
#include "user_lib.h"
|
||||
#include "util/types.h"
|
||||
|
||||
int main(void) {
|
||||
printu("Hello world!\n");
|
||||
exit(0);
|
||||
}
|
@ -1,20 +0,0 @@
|
||||
/*
|
||||
* Below is the given application for lab1_3.
|
||||
* This app performs a long loop, during which, timers are
|
||||
* generated and pop messages to our screen.
|
||||
*/
|
||||
|
||||
#include "user_lib.h"
|
||||
#include "util/types.h"
|
||||
|
||||
int main(void) {
|
||||
printu("Hello world!\n");
|
||||
int i;
|
||||
for (i = 0; i < 100000000; ++i) {
|
||||
if (i % 5000000 == 0) printu("wait %d\n", i);
|
||||
}
|
||||
|
||||
exit(0);
|
||||
|
||||
return 0;
|
||||
}
|
@ -1,14 +0,0 @@
|
||||
OUTPUT_ARCH( "riscv" )
|
||||
|
||||
ENTRY(main)
|
||||
|
||||
SECTIONS
|
||||
{
|
||||
. = 0x81000000;
|
||||
. = ALIGN(0x1000);
|
||||
.text : { *(.text) }
|
||||
. = ALIGN(16);
|
||||
.data : { *(.data) }
|
||||
. = ALIGN(16);
|
||||
.bss : { *(.bss) }
|
||||
}
|
Loading…
Reference in new issue