Impl page table for RV32. TODO: Fix recursive mapping.

master
WangRunji 6 years ago
parent 6b819d62e4
commit 677c1bd565

@ -4,7 +4,7 @@ kern_entry:
la sp, bootstacktop la sp, bootstacktop
tail rust_main tail rust_main
.section .data .section .bss
.align 12 #PGSHIFT .align 12 #PGSHIFT
.global bootstack .global bootstack
bootstack: bootstack:

@ -12,43 +12,41 @@ SECTIONS
{ {
/* Load the kernel at this address: "." means the current address */ /* Load the kernel at this address: "." means the current address */
. = BASE_ADDRESS; . = BASE_ADDRESS;
start = .;
.text : { .text : {
stext = .;
*(.entry) *(.entry)
*(.text .stub .text.* .gnu.linkonce.t.*) *(.text .stub .text.* .gnu.linkonce.t.*)
. = ALIGN(4K);
etext = .;
} }
PROVIDE(etext = .); /* Define the 'etext' symbol to this value */
.rodata : { .rodata : {
srodata = .;
*(.rodata .rodata.* .gnu.linkonce.r.*) *(.rodata .rodata.* .gnu.linkonce.r.*)
. = ALIGN(4K);
erodata = .;
} }
/* Adjust the address for the data segment to the next page */
. = ALIGN(0x1000);
/* The data segment */
.data : { .data : {
*(.data) sdata = .;
*(.data.*) *(.data .data.*)
. = ALIGN(4K);
edata = .;
} }
.sdata : { .bss : {
*(.sdata) sbss = .;
*(.sdata.*) *(.bss .bss.* .sbss*)
. = ALIGN(4K);
ebss = .;
} }
PROVIDE(edata = .); .got : {
*(.got .got.*)
.bss : { . = ALIGN(4K);
*(.bss)
*(.bss.*)
*(.sbss*)
} }
PROVIDE(end = .); PROVIDE(end = .);
/DISCARD/ : {
*(.eh_frame .note.GNU-stack)
}
} }

@ -0,0 +1,65 @@
use core::slice;
use memory::{active_table, FRAME_ALLOCATOR, init_heap, MemoryArea, MemoryAttr, MemorySet, Stack};
use super::riscv::addr::*;
use ucore_memory::PAGE_SIZE;
pub fn init() {
#[repr(align(4096))]
struct PageData([u8; PAGE_SIZE]);
static PAGE_TABLE_ROOT: PageData = PageData([0; PAGE_SIZE]);
let frame = Frame::of_addr(PhysAddr::new(&PAGE_TABLE_ROOT as *const _ as u32));
super::paging::setup_page_table(frame);
init_frame_allocator();
remap_the_kernel();
init_heap();
}
fn init_frame_allocator() {
use bit_allocator::BitAlloc;
use core::ops::Range;
use consts::{MEMORY_OFFSET, MEMORY_END};
let mut ba = FRAME_ALLOCATOR.lock();
ba.insert(to_range(end as usize, MEMORY_END));
info!("FrameAllocator init end");
fn to_range(start: usize, end: usize) -> Range<usize> {
let page_start = (start - MEMORY_OFFSET) / PAGE_SIZE;
let page_end = (end - MEMORY_OFFSET - 1) / PAGE_SIZE + 1;
page_start..page_end
}
}
fn remap_the_kernel() {
let kstack = Stack {
top: bootstacktop as usize,
bottom: bootstack as usize + PAGE_SIZE,
};
static mut SPACE: [u8; 0x1000] = [0; 0x1000];
let mut ms = unsafe { MemorySet::new_from_raw_space(&mut SPACE, kstack) };
ms.push(MemoryArea::new_identity(stext as usize, etext as usize, MemoryAttr::default().execute().readonly(), "text"));
ms.push(MemoryArea::new_identity(sdata as usize, edata as usize, MemoryAttr::default(), "data"));
ms.push(MemoryArea::new_identity(srodata as usize, erodata as usize, MemoryAttr::default().readonly(), "rodata"));
ms.push(MemoryArea::new_identity(sbss as usize, ebss as usize, MemoryAttr::default(), "bss"));
unsafe { ms.activate(); }
info!("kernel remap end");
}
// Symbols provided by linker script
extern {
fn stext();
fn etext();
fn sdata();
fn edata();
fn srodata();
fn erodata();
fn sbss();
fn ebss();
fn start();
fn end();
fn bootstack();
fn bootstacktop();
}

@ -1,28 +1,16 @@
extern crate riscv;
extern crate bbl; extern crate bbl;
extern crate riscv;
pub mod serial; pub mod serial;
pub mod interrupt; pub mod interrupt;
pub mod timer; pub mod timer;
pub mod paging; pub mod paging;
pub mod memory;
pub fn init() { pub fn init() {
println!("Hello RISCV! {}", 123); println!("Hello RISCV! {}", 123);
interrupt::init(); interrupt::init();
memory::init();
// timer::init(); // timer::init();
println!("satp: {:x?}", riscv::register::satp::read());
use xmas_elf::ElfFile;
use core::slice;
use self::riscv::addr::*;
let begin = 0x80400000usize;
extern { fn end(); }
let end = end as usize;
println!("Kernel: {:#x} {:#x}", begin, end);
// let kernel = unsafe{ slice::from_raw_parts(begin as *const u8, end - begin) };
// let elf = ElfFile::new(kernel).unwrap();
paging::setup_page_table(Frame::of_addr(PhysAddr::new(end as u32 + 4096)));
loop {} loop {}
} }

@ -1,20 +1,245 @@
pub use super::riscv::paging::*; use consts::{KERNEL_PML4, RECURSIVE_PAGE_PML4};
pub use super::riscv::addr::*; // Depends on kernel
use memory::{active_table, alloc_frame, alloc_stack, dealloc_frame};
use super::riscv::addr::*;
use super::riscv::asm::{sfence_vma, sfence_vma_all};
use super::riscv::paging::{Mapper, PageTable as RvPageTable, PageTableEntry, PageTableFlags as EF, RecursivePageTable};
use super::riscv::paging::{FrameAllocator, FrameDeallocator};
use super::riscv::register::satp;
use ucore_memory::memory_set::*;
use ucore_memory::PAGE_SIZE;
use ucore_memory::paging::*;
// need 1 page // need 1 page
pub fn setup_page_table(frame: Frame) { pub fn setup_page_table(frame: Frame) {
let p2 = unsafe { &mut *(frame.start_address().as_u32() as *mut PageTable) }; let p2 = unsafe { &mut *(frame.start_address().as_u32() as *mut RvPageTable) };
p2.zero(); p2.zero();
p2.set_recursive(RECURSIVE_PAGE_PML4, frame.clone());
use self::PageTableFlags as F;
use consts::{KERNEL_PML4, RECURSIVE_PAGE_PML4};
// Set recursive map
p2[RECURSIVE_PAGE_PML4].set(frame.clone(), F::VALID);
// Set kernel identity map // Set kernel identity map
p2[KERNEL_PML4].set(Frame::of_addr(PhysAddr::new((KERNEL_PML4 as u32) << 22)), F::VALID | F::READABLE | F::WRITABLE | F::EXCUTABLE); p2[KERNEL_PML4].set(Frame::of_addr(PhysAddr::new((KERNEL_PML4 as u32) << 22)), EF::VALID | EF::READABLE | EF::WRITABLE | EF::EXECUTABLE);
p2[KERNEL_PML4 + 1].set(Frame::of_addr(PhysAddr::new((KERNEL_PML4 as u32 + 1) << 22)), F::VALID | F::READABLE | F::WRITABLE | F::EXCUTABLE); p2[KERNEL_PML4 + 1].set(Frame::of_addr(PhysAddr::new((KERNEL_PML4 as u32 + 1) << 22)), EF::VALID | EF::READABLE | EF::WRITABLE | EF::EXECUTABLE);
use super::riscv::register::satp; use super::riscv::register::satp;
unsafe { satp::set(satp::Mode::Sv32, 0, frame); } unsafe { satp::set(satp::Mode::Sv32, 0, frame); }
println!("New page table"); sfence_vma_all();
info!("setup init page table end");
}
pub struct ActivePageTable(RecursivePageTable<'static>);
pub struct PageEntry(PageTableEntry);
impl PageTable for ActivePageTable {
type Entry = PageEntry;
fn map(&mut self, addr: usize, target: usize) -> &mut PageEntry {
let flags = EF::VALID | EF::READABLE | EF::WRITABLE;
let page = Page::of_addr(VirtAddr::new(addr));
let frame = Frame::of_addr(PhysAddr::new(target as u32));
self.0.map_to(page, frame, flags, &mut FrameAllocatorForRiscv)
.unwrap().flush();
self.get_entry(addr)
}
fn unmap(&mut self, addr: usize) {
let page = Page::of_addr(VirtAddr::new(addr));
let (frame, flush) = self.0.unmap(page).unwrap();
flush.flush();
}
fn get_entry(&mut self, addr: usize) -> &mut PageEntry {
let page = Page::of_addr(VirtAddr::new(addr));
assert!(self.0.translate_page(page).is_some(), "page table entry not exist");
let entry_addr = ((addr >> 10) & 0x003ffffc) | (RECURSIVE_PAGE_PML4 << 22);
unsafe { &mut *(entry_addr as *mut PageEntry) }
}
fn get_page_slice_mut<'a, 'b>(&'a mut self, addr: usize) -> &'b mut [u8] {
use core::slice;
unsafe { slice::from_raw_parts_mut((addr & !(PAGE_SIZE - 1)) as *mut u8, PAGE_SIZE) }
}
fn read(&mut self, addr: usize) -> u8 {
unsafe { *(addr as *const u8) }
}
fn write(&mut self, addr: usize, data: u8) {
unsafe { *(addr as *mut u8) = data; }
}
}
impl ActivePageTable {
pub unsafe fn new() -> Self {
let root_addr = ((RECURSIVE_PAGE_PML4 << 10) | (RECURSIVE_PAGE_PML4 + 1)) << 12;
println!("{:x?}", &*(root_addr as *const RvPageTable));
ActivePageTable(RecursivePageTable::new(&mut *(root_addr as *mut _)).unwrap())
}
fn with_temporary_map(&mut self, frame: &Frame, f: impl FnOnce(&mut ActivePageTable, &mut RvPageTable)) {
// Create a temporary page
let page = Page::of_addr(VirtAddr::new(0xcafebabe));
assert!(self.0.translate_page(page).is_none(), "temporary page is already mapped");
// Map it to table
self.map(page.start_address().as_usize(), frame.start_address().as_u32() as usize);
// Call f
let table = unsafe { &mut *(page.start_address().as_usize() as *mut _) };
f(self, table);
// Unmap the page
self.unmap(0xcafebabe);
}
}
impl Entry for PageEntry {
fn update(&mut self) {
let addr = VirtAddr::new((self as *const _ as usize) << 10);
sfence_vma(0, addr);
}
fn accessed(&self) -> bool { self.0.flags().contains(EF::ACCESSED) }
fn dirty(&self) -> bool { self.0.flags().contains(EF::DIRTY) }
fn writable(&self) -> bool { self.0.flags().contains(EF::WRITABLE) }
fn present(&self) -> bool { self.0.flags().contains(EF::VALID | EF::READABLE) }
fn clear_accessed(&mut self) { self.as_flags().remove(EF::ACCESSED); }
fn clear_dirty(&mut self) { self.as_flags().remove(EF::DIRTY); }
fn set_writable(&mut self, value: bool) { self.as_flags().set(EF::WRITABLE, value); }
fn set_present(&mut self, value: bool) { self.as_flags().set(EF::VALID | EF::READABLE, value); }
fn target(&self) -> usize { self.0.addr().as_u32() as usize }
fn set_target(&mut self, target: usize) {
let flags = self.0.flags();
let frame = Frame::of_addr(PhysAddr::new(target as u32));
self.0.set(frame, flags);
}
fn writable_shared(&self) -> bool { self.0.flags().contains(EF::RESERVED1) }
fn readonly_shared(&self) -> bool { self.0.flags().contains(EF::RESERVED2) }
fn set_shared(&mut self, writable: bool) {
let flags = self.as_flags();
flags.set(EF::RESERVED1, writable);
flags.set(EF::RESERVED2, !writable);
}
fn clear_shared(&mut self) { self.as_flags().remove(EF::RESERVED1 | EF::RESERVED2); }
fn swapped(&self) -> bool { unimplemented!() }
fn set_swapped(&mut self, value: bool) { unimplemented!() }
fn user(&self) -> bool { self.0.flags().contains(EF::USER) }
fn set_user(&mut self, value: bool) { self.as_flags().set(EF::USER, value); }
fn execute(&self) -> bool { self.0.flags().contains(EF::EXECUTABLE) }
fn set_execute(&mut self, value: bool) { self.as_flags().set(EF::EXECUTABLE, value); }
}
impl PageEntry {
fn as_flags(&mut self) -> &mut EF {
unsafe { &mut *(self as *mut _ as *mut EF) }
}
}
#[derive(Debug)]
pub struct InactivePageTable0 {
p2_frame: Frame,
}
impl InactivePageTable for InactivePageTable0 {
type Active = ActivePageTable;
fn new() -> Self {
let mut pt = Self::new_bare();
pt.map_kernel();
pt
}
fn new_bare() -> Self {
let frame = Self::alloc_frame().map(|target| Frame::of_addr(PhysAddr::new(target as u32)))
.expect("failed to allocate frame");
debug!("begin");
active_table().with_temporary_map(&frame, |_, table: &mut RvPageTable| {
debug!("begin1");
table.zero();
table.set_recursive(RECURSIVE_PAGE_PML4, frame.clone());
debug!("begin2");
});
InactivePageTable0 { p2_frame: frame }
}
fn edit(&mut self, f: impl FnOnce(&mut Self::Active)) {
active_table().with_temporary_map(&satp::read().frame(), |active_table, p2_table: &mut RvPageTable| {
let backup = p2_table[RECURSIVE_PAGE_PML4].clone();
// overwrite recursive mapping
p2_table[RECURSIVE_PAGE_PML4].set(self.p2_frame.clone(), EF::VALID);
sfence_vma_all();
// execute f in the new context
f(active_table);
// restore recursive mapping to original p4 table
p2_table[RECURSIVE_PAGE_PML4] = backup;
sfence_vma_all();
});
}
unsafe fn activate(&self) {
let old_frame = satp::read().frame();
let new_frame = self.p2_frame.clone();
debug!("switch table {:?} -> {:?}", old_frame, new_frame);
if old_frame != new_frame {
satp::set(satp::Mode::Sv32, 0, new_frame);
}
}
unsafe fn with(&self, f: impl FnOnce()) {
let old_frame = satp::read().frame();
let new_frame = self.p2_frame.clone();
debug!("switch table {:?} -> {:?}", old_frame, new_frame);
if old_frame != new_frame {
satp::set(satp::Mode::Sv32, 0, new_frame);
}
f();
debug!("switch table {:?} -> {:?}", new_frame, old_frame);
if old_frame != new_frame {
satp::set(satp::Mode::Sv32, 0, old_frame);
}
}
fn alloc_frame() -> Option<usize> {
alloc_frame()
}
fn dealloc_frame(target: usize) {
dealloc_frame(target)
}
fn alloc_stack(size_in_pages: usize) -> Stack {
alloc_stack(size_in_pages)
}
}
impl InactivePageTable0 {
fn map_kernel(&mut self) {
let mut table = unsafe { &mut *(0xfffff000 as *mut RvPageTable) };
let e1 = table[KERNEL_PML4].clone();
let e2 = table[KERNEL_PML4 + 1].clone();
self.edit(|_| {
table[KERNEL_PML4] = e1;
table[KERNEL_PML4 + 1] = e2;
});
}
}
impl Drop for InactivePageTable0 {
fn drop(&mut self) {
info!("PageTable dropping: {:?}", self);
Self::dealloc_frame(self.p2_frame.start_address().as_u32() as usize);
}
}
struct FrameAllocatorForRiscv;
impl FrameAllocator for FrameAllocatorForRiscv {
fn alloc(&mut self) -> Option<Frame> {
alloc_frame().map(|addr| Frame::of_addr(PhysAddr::new(addr as u32)))
}
}
impl FrameDeallocator for FrameAllocatorForRiscv {
fn dealloc(&mut self, frame: Frame) {
dealloc_frame(frame.start_address().as_u32() as usize);
}
} }

@ -14,11 +14,17 @@ mod riscv {
// [0x80000000, 0x80800000] // [0x80000000, 0x80800000]
const P2_SIZE: usize = 1 << 22; const P2_SIZE: usize = 1 << 22;
const P2_MASK: usize = 0x3ff << 22; const P2_MASK: usize = 0x3ff << 22;
pub const RECURSIVE_PAGE_PML4: usize = 0x3ff; pub const RECURSIVE_PAGE_PML4: usize = 0x3fe;
pub const KERNEL_OFFSET: usize = 0; pub const KERNEL_OFFSET: usize = 0;
pub const KERNEL_PML4: usize = 0x8040_0000 >> 22; pub const KERNEL_PML4: usize = 0x8040_0000 >> 22;
pub const KERNEL_HEAP_OFFSET: usize = 0x8000_0000; pub const KERNEL_HEAP_OFFSET: usize = 0x8050_0000;
pub const KERNEL_HEAP_SIZE: usize = 8 * 1024 * 1024; // 8 MB pub const KERNEL_HEAP_SIZE: usize = 1 * 1024 * 1024;
// 1 MB
pub const KERNEL_STACK_OFFSET: usize = KERNEL_HEAP_OFFSET + KERNEL_HEAP_SIZE;
pub const KERNEL_STACK_SIZE: usize = 64 * 1024;
// 64 KB
pub const MEMORY_OFFSET: usize = 0x8000_0000;
pub const MEMORY_END: usize = 0x8080_0000;
} }
#[cfg(target_arch = "x86_64")] #[cfg(target_arch = "x86_64")]
@ -51,7 +57,9 @@ mod x86_64 {
pub const KERNEL_HEAP_SIZE: usize = 8 * 1024 * 1024; // 8 MB pub const KERNEL_HEAP_SIZE: usize = 8 * 1024 * 1024; // 8 MB
pub const KERNEL_STACK_OFFSET: usize = KERNEL_HEAP_OFFSET + KERNEL_HEAP_SIZE; pub const KERNEL_STACK_OFFSET: usize = KERNEL_HEAP_OFFSET + KERNEL_HEAP_SIZE;
pub const KERNEL_STACK_SIZE: usize = 1 * 1024 * 1024; // 8 MB pub const KERNEL_STACK_SIZE: usize = 1 * 1024 * 1024; // 1 MB
pub const MEMORY_OFFSET: usize = 0;
/// Offset to kernel percpu variables /// Offset to kernel percpu variables
//TODO: Use 64-bit fs offset to enable this pub const KERNEL_PERCPU_OFFSET: usize = KERNEL_HEAP_OFFSET - PML4_SIZE; //TODO: Use 64-bit fs offset to enable this pub const KERNEL_PERCPU_OFFSET: usize = KERNEL_HEAP_OFFSET - PML4_SIZE;

@ -67,7 +67,6 @@ mod io;
#[path = "io/riscv_io.rs"] #[path = "io/riscv_io.rs"]
mod io; mod io;
#[cfg(target_arch = "x86_64")]
mod memory; mod memory;
mod lang; mod lang;
mod util; mod util;

@ -1,10 +1,13 @@
pub use arch::paging::*; pub use arch::paging::*;
use bit_allocator::{BitAlloc, BitAlloc64K}; use bit_allocator::{BitAlloc, BitAlloc64K};
use consts::MEMORY_OFFSET;
use self::stack_allocator::*; use self::stack_allocator::*;
use spin::{Mutex, MutexGuard}; use spin::{Mutex, MutexGuard};
use super::HEAP_ALLOCATOR; use super::HEAP_ALLOCATOR;
use ucore_memory::{*, cow::CowExt, paging::PageTable}; use ucore_memory::{*, paging::PageTable};
pub use ucore_memory::memory_set::{MemoryArea, MemoryAttr, MemorySet as MemorySet_, Stack}; pub use ucore_memory::memory_set::{MemoryArea, MemoryAttr, MemorySet as MemorySet_, Stack};
#[cfg(target_arch = "x86_64")]
use ucore_memory::paging::CowExt;
pub type MemorySet = MemorySet_<InactivePageTable0>; pub type MemorySet = MemorySet_<InactivePageTable0>;
@ -16,11 +19,11 @@ lazy_static! {
pub static STACK_ALLOCATOR: Mutex<Option<StackAllocator>> = Mutex::new(None); pub static STACK_ALLOCATOR: Mutex<Option<StackAllocator>> = Mutex::new(None);
pub fn alloc_frame() -> Option<usize> { pub fn alloc_frame() -> Option<usize> {
FRAME_ALLOCATOR.lock().alloc().map(|id| id * PAGE_SIZE) FRAME_ALLOCATOR.lock().alloc().map(|id| id * PAGE_SIZE + MEMORY_OFFSET)
} }
pub fn dealloc_frame(target: usize) { pub fn dealloc_frame(target: usize) {
FRAME_ALLOCATOR.lock().dealloc(target / PAGE_SIZE); FRAME_ALLOCATOR.lock().dealloc((target - MEMORY_OFFSET) / PAGE_SIZE);
} }
pub fn alloc_stack(size_in_pages: usize) -> Stack { pub fn alloc_stack(size_in_pages: usize) -> Stack {
@ -29,24 +32,44 @@ pub fn alloc_stack(size_in_pages: usize) -> Stack {
.alloc_stack(size_in_pages).expect("no more stack") .alloc_stack(size_in_pages).expect("no more stack")
} }
#[cfg(target_arch = "x86_64")]
lazy_static! { lazy_static! {
static ref ACTIVE_TABLE: Mutex<CowExt<ActivePageTable>> = Mutex::new(unsafe { static ref ACTIVE_TABLE: Mutex<CowExt<ActivePageTable>> = Mutex::new(unsafe {
CowExt::new(ActivePageTable::new()) CowExt::new(ActivePageTable::new())
}); });
} }
#[cfg(target_arch = "riscv")]
lazy_static! {
static ref ACTIVE_TABLE: Mutex<ActivePageTable> = Mutex::new(unsafe {
ActivePageTable::new()
});
}
/// The only way to get active page table /// The only way to get active page table
#[cfg(target_arch = "x86_64")]
pub fn active_table() -> MutexGuard<'static, CowExt<ActivePageTable>> { pub fn active_table() -> MutexGuard<'static, CowExt<ActivePageTable>> {
ACTIVE_TABLE.lock() ACTIVE_TABLE.lock()
} }
#[cfg(target_arch = "riscv")]
pub fn active_table() -> MutexGuard<'static, ActivePageTable> {
ACTIVE_TABLE.lock()
}
// Return true to continue, false to halt // Return true to continue, false to halt
#[cfg(target_arch = "x86_64")]
pub fn page_fault_handler(addr: usize) -> bool { pub fn page_fault_handler(addr: usize) -> bool {
// Handle copy on write // Handle copy on write
unsafe { ACTIVE_TABLE.force_unlock(); } unsafe { ACTIVE_TABLE.force_unlock(); }
active_table().page_fault_handler(addr, || alloc_frame().unwrap()) active_table().page_fault_handler(addr, || alloc_frame().unwrap())
} }
#[cfg(target_arch = "riscv")]
pub fn page_fault_handler(addr: usize) -> bool {
false
}
pub fn init_heap() { pub fn init_heap() {
use consts::{KERNEL_HEAP_OFFSET, KERNEL_HEAP_SIZE, KERNEL_STACK_OFFSET, KERNEL_STACK_SIZE}; use consts::{KERNEL_HEAP_OFFSET, KERNEL_HEAP_SIZE, KERNEL_STACK_OFFSET, KERNEL_STACK_SIZE};

Loading…
Cancel
Save