diff --git a/os/Cargo.toml b/os/Cargo.toml index 24c884a8..2ccd1f86 100644 --- a/os/Cargo.toml +++ b/os/Cargo.toml @@ -11,6 +11,7 @@ riscv = { git = "https://github.com/rcore-os/riscv", features = ["inline-asm"] } lazy_static = { version = "1.4.0", features = ["spin_no_std"] } buddy_system_allocator = "0.6" spin = "0.7.0" +bitflags = "1.2.1" [features] board_qemu = [] diff --git a/os/build.rs b/os/build.rs index 52eb20cb..e5a0a46e 100644 --- a/os/build.rs +++ b/os/build.rs @@ -29,11 +29,9 @@ _num_app: .quad {}"#, apps.len())?; for i in 0..apps.len() { - writeln!(f, r#" - .quad app_{}_start"#, i)?; + writeln!(f, r#".quad app_{}_start"#, i)?; } - writeln!(f, r#" - .quad app_{}_end"#, apps.len() - 1)?; + writeln!(f, r#".quad app_{}_end"#, apps.len() - 1)?; for (idx, app) in apps.iter().enumerate() { println!("app_{}: {}", idx, app); diff --git a/os/src/linker.ld b/os/src/linker.ld index 8a24ad14..b4b2eb7b 100644 --- a/os/src/linker.ld +++ b/os/src/linker.ld @@ -29,6 +29,7 @@ SECTIONS . = ALIGN(4K); edata = .; + sbss_with_stack = .; .bss : { *(.bss.stack) sbss = .; diff --git a/os/src/main.rs b/os/src/main.rs index 9955320f..45ea849e 100644 --- a/os/src/main.rs +++ b/os/src/main.rs @@ -8,6 +8,9 @@ extern crate alloc; +#[macro_use] +extern crate bitflags; + #[macro_use] mod console; mod lang_items; @@ -38,6 +41,8 @@ pub fn rust_main() -> ! { clear_bss(); println!("[kernel] Hello, world!"); mm::init(); + println!("[kernel] back to world!"); + mm::remap_test(); loop {} trap::init(); loader::load_apps(); diff --git a/os/src/mm/address.rs b/os/src/mm/address.rs index 81af1643..ca29987f 100644 --- a/os/src/mm/address.rs +++ b/os/src/mm/address.rs @@ -1,45 +1,72 @@ -/// T: {PhysAddr, VirtAddr, PhysPageNum, VirtPageNum} -/// T -> usize: T.0 -/// usize -> T: usize.into() use crate::config::{PAGE_SIZE, PAGE_SIZE_BITS}; +use super::PageTableEntry; +use core::fmt::{self, Debug, Formatter}; /// Definitions -#[derive(Copy, Clone, Debug)] +#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq)] pub struct PhysAddr(pub usize); -#[derive(Copy, Clone, Debug)] +#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq)] pub struct VirtAddr(pub usize); -#[derive(Copy, Clone, Debug)] +#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq)] pub struct PhysPageNum(pub usize); -#[derive(Copy, Clone, Debug)] +#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq)] pub struct VirtPageNum(pub usize); +/// Debugging + +impl Debug for VirtAddr { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.write_fmt(format_args!("VA:{:#x}", self.0)) + } +} +impl Debug for VirtPageNum { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.write_fmt(format_args!("VPN:{:#x}", self.0)) + } +} +impl Debug for PhysAddr { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.write_fmt(format_args!("PA:{:#x}", self.0)) + } +} +impl Debug for PhysPageNum { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.write_fmt(format_args!("PPN:{:#x}", self.0)) + } +} + +/// T: {PhysAddr, VirtAddr, PhysPageNum, VirtPageNum} +/// T -> usize: T.0 +/// usize -> T: usize.into() + impl From for PhysAddr { fn from(v: usize) -> Self { Self(v) } } impl From for PhysPageNum { fn from(v: usize) -> Self { Self(v) } } -impl From for usize { - fn from(v: PhysAddr) -> Self { v.0 } -} -impl From for usize { - fn from(v: PhysPageNum) -> Self { v.0 } -} impl From for VirtAddr { fn from(v: usize) -> Self { Self(v) } } impl From for VirtPageNum { fn from(v: usize) -> Self { Self(v) } } +impl From for usize { + fn from(v: PhysAddr) -> Self { v.0 } +} +impl From for usize { + fn from(v: PhysPageNum) -> Self { v.0 } +} impl From for usize { fn from(v: VirtAddr) -> Self { v.0 } } impl From for usize { fn from(v: VirtPageNum) -> Self { v.0 } } + impl VirtAddr { pub fn floor(&self) -> VirtPageNum { VirtPageNum(self.0 / PAGE_SIZE) } pub fn ceil(&self) -> VirtPageNum { VirtPageNum((self.0 + PAGE_SIZE - 1) / PAGE_SIZE) } @@ -67,4 +94,93 @@ impl From for PhysPageNum { } impl From for PhysAddr { fn from(v: PhysPageNum) -> Self { Self(v.0 << PAGE_SIZE_BITS) } -} \ No newline at end of file +} + +impl VirtPageNum { + pub fn indexes(&self) -> [usize; 3] { + let mut vpn = self.0; + let mut idx = [0usize; 3]; + for i in (0..3).rev() { + idx[i] = vpn & 511; + vpn >>= 9; + } + idx + } +} + +impl PhysPageNum { + pub fn get_pte_array(&self) -> &'static mut [PageTableEntry] { + let pa: PhysAddr = self.clone().into(); + unsafe { + core::slice::from_raw_parts_mut(pa.0 as *mut PageTableEntry, 512) + } + } + pub fn get_bytes_array(&self) -> &'static mut [u8] { + let pa: PhysAddr = self.clone().into(); + unsafe { + core::slice::from_raw_parts_mut(pa.0 as *mut u8, 4096) + } + } + pub fn get_mut(&self) -> &'static mut T { + let pa: PhysAddr = self.clone().into(); + unsafe { + (pa.0 as *mut T).as_mut().unwrap() + } + } +} + +pub trait StepByOne { + fn step(&mut self); +} +impl StepByOne for VirtPageNum { + fn step(&mut self) { + self.0 += 1; + } +} + +#[derive(Copy, Clone)] +pub struct SimpleRange where + T: StepByOne + Copy + PartialEq + PartialOrd + Debug, { + l: T, + r: T, +} +impl SimpleRange where + T: StepByOne + Copy + PartialEq + PartialOrd + Debug, { + pub fn new(start: T, end: T) -> Self { + assert!(start <= end, "start {:?} > end {:?}!", start, end); + Self { l: start, r: end } + } +} +impl IntoIterator for SimpleRange where + T: StepByOne + Copy + PartialEq + PartialOrd + Debug, { + type Item = T; + type IntoIter = SimpleRangeIterator; + fn into_iter(self) -> Self::IntoIter { + SimpleRangeIterator::new(self.l, self.r) + } +} +pub struct SimpleRangeIterator where + T: StepByOne + Copy + PartialEq + PartialOrd + Debug, { + current: T, + end: T, +} +impl SimpleRangeIterator where + T: StepByOne + Copy + PartialEq + PartialOrd + Debug, { + pub fn new(l: T, r: T) -> Self { + Self { current: l, end: r, } + } +} +impl Iterator for SimpleRangeIterator where + T: StepByOne + Copy + PartialEq + PartialOrd + Debug, { + type Item = T; + fn next(&mut self) -> Option { + if self.current == self.end { + None + } else { + let t = self.current; + self.current.step(); + Some(t) + } + } +} +pub type VPNRange = SimpleRange; \ No newline at end of file diff --git a/os/src/mm/frame_allocator.rs b/os/src/mm/frame_allocator.rs index 8fe56193..e467f372 100644 --- a/os/src/mm/frame_allocator.rs +++ b/os/src/mm/frame_allocator.rs @@ -4,17 +4,34 @@ use spin::Mutex; use crate::config::MEMORY_END; use lazy_static::*; use core::fmt::{self, Debug, Formatter}; -pub struct FrameTracker(PhysPageNum); + +pub struct FrameTracker { + pub ppn: PhysPageNum, +} + +impl FrameTracker { + pub fn new(ppn: PhysPageNum) -> Self { + //println!("into FrameTracker::new, ppn = {:?}", ppn); + // page cleaning + let bytes_array = ppn.get_bytes_array(); + //println!("ptr = {:p}, len = {}", bytes_array.as_ptr(), bytes_array.len()); + for i in bytes_array { + *i = 0; + } + //println!("OK"); + Self { ppn } + } +} impl Debug for FrameTracker { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - f.write_fmt(format_args!("FrameTracker:PPN={:#x}", self.0.0)) + f.write_fmt(format_args!("FrameTracker:PPN={:#x}", self.ppn.0)) } } impl Drop for FrameTracker { fn drop(&mut self) { - frame_dealloc(self.0); + frame_dealloc(self.ppn); } } @@ -45,9 +62,12 @@ impl FrameAllocator for StackFrameAllocator { } } fn alloc(&mut self) -> Option { + //println!("into StackFrameAllocator::alloc()"); if let Some(ppn) = self.recycled.pop() { + //println!("has recycled!"); Some(ppn.into()) } else { + //println!("run out recycled, current = {}, end = {}!", self.current, self.end); if self.current == self.end { None } else { @@ -87,10 +107,11 @@ pub fn init_frame_allocator() { } pub fn frame_alloc() -> Option { + //println!("into frame_alloc()"); FRAME_ALLOCATOR .lock() .alloc() - .map(|ppn| FrameTracker(ppn)) + .map(|ppn| FrameTracker::new(ppn)) } fn frame_dealloc(ppn: PhysPageNum) { diff --git a/os/src/mm/memory_set.rs b/os/src/mm/memory_set.rs new file mode 100644 index 00000000..e9ec1725 --- /dev/null +++ b/os/src/mm/memory_set.rs @@ -0,0 +1,199 @@ +use super::{ + PageTable, + PTEFlags, + VirtAddr, + VirtPageNum, + PhysAddr, + PhysPageNum, + FrameTracker, + VPNRange, + frame_alloc, +}; +use core::ops::Range; +use alloc::collections::BTreeMap; +use alloc::vec::Vec; +use riscv::register::satp; +use alloc::sync::Arc; +use lazy_static::*; +use spin::Mutex; +use crate::config::MEMORY_END; + +extern "C" { + fn stext(); + fn etext(); + fn srodata(); + fn erodata(); + fn sdata(); + fn edata(); + fn sbss_with_stack(); + fn ebss(); + fn ekernel(); +} + +lazy_static! { + pub static ref KERNEL_SPACE: Arc> = Arc::new(Mutex::new( + MemorySet::new_kernel() + )); +} + +pub struct MemorySet { + page_table: PageTable, + areas: Vec, +} + +impl MemorySet { + pub fn new_bare() -> Self { + Self { + page_table: PageTable::new(), + areas: Vec::new(), + } + } + fn push(&mut self, mut map_area: MapArea) { + map_area.map(&mut self.page_table); + self.areas.push(map_area); + } + pub fn new_kernel() -> Self { + let mut memory_set = Self::new_bare(); + println!(".text [{:#x}, {:#x})", stext as usize, etext as usize); + println!(".rodata [{:#x}, {:#x})", srodata as usize, erodata as usize); + println!(".data [{:#x}, {:#x})", sdata as usize, edata as usize); + println!(".bss [{:#x}, {:#x})", sbss_with_stack as usize, ebss as usize); + println!("mapping .text section"); + memory_set.push(MapArea::new( + (stext as usize).into(), + (etext as usize).into(), + MapType::Identical, + MapPermission::R | MapPermission::X, + )); + println!("mapping .rodata section"); + memory_set.push(MapArea::new( + (srodata as usize).into(), + (erodata as usize).into(), + MapType::Identical, + MapPermission::R, + )); + println!("mapping .data section"); + memory_set.push(MapArea::new( + (sdata as usize).into(), + (edata as usize).into(), + MapType::Identical, + MapPermission::R | MapPermission::W, + )); + println!("mapping .bss section"); + memory_set.push(MapArea::new( + (sbss_with_stack as usize).into(), + (ebss as usize).into(), + MapType::Identical, + MapPermission::R | MapPermission::W, + )); + println!("mapping physical memory"); + memory_set.push(MapArea::new( + (ekernel as usize).into(), + MEMORY_END.into(), + MapType::Identical, + MapPermission::R | MapPermission::W, + )); + memory_set + } + pub fn activate(&self) { + let satp = self.page_table.token(); + unsafe { + satp::write(satp); + llvm_asm!("sfence.vma" :::: "volatile"); + } + } +} + +pub struct MapArea { + vpn_range: VPNRange, + data_frames: BTreeMap, + map_type: MapType, + map_perm: MapPermission, +} + +impl MapArea { + pub fn new( + start_va: VirtAddr, + end_va: VirtAddr, + map_type: MapType, + map_perm: MapPermission + ) -> Self { + // alignment assertion, limit to kernel remapping + let start_vpn: VirtPageNum = start_va.into(); + let end_vpn: VirtPageNum = end_va.into(); + Self { + vpn_range: VPNRange::new(start_vpn, end_vpn), + data_frames: BTreeMap::new(), + map_type, + map_perm, + } + } + pub fn map_one(&mut self, page_table: &mut PageTable, vpn: VirtPageNum) { + let pte_flags = PTEFlags::from_bits(self.map_perm.bits).unwrap(); + let mut ppn = PhysPageNum(0); + match self.map_type { + MapType::Identical => { + ppn = PhysPageNum(vpn.0); + } + MapType::Framed => { + let frame = frame_alloc().unwrap(); + ppn = frame.ppn; + self.data_frames.insert(vpn, frame); + } + } + page_table.map(vpn, ppn, pte_flags); + } + pub fn unmap_one(&mut self, page_table: &mut PageTable, vpn: VirtPageNum) { + match self.map_type { + MapType::Framed => { + self.data_frames.remove(&vpn); + } + _ => {} + } + page_table.unmap(vpn); + } + pub fn map(&mut self, page_table: &mut PageTable) { + for vpn in self.vpn_range { + self.map_one(page_table, vpn); + } + } + pub fn unmap(&mut self, page_table: &mut PageTable) { + for vpn in self.vpn_range { + self.unmap_one(page_table, vpn); + } + } +} + +pub enum MapType { + Identical, + Framed, +} + +bitflags! { + pub struct MapPermission: u8 { + const R = 1 << 1; + const W = 1 << 2; + const X = 1 << 3; + } +} + +#[allow(unused)] +pub fn remap_test() { + let mut kernel_space = KERNEL_SPACE.lock(); + let mid_text: VirtAddr = ((stext as usize + etext as usize) / 2).into(); + let mid_rodata: VirtAddr = ((srodata as usize + erodata as usize) / 2).into(); + let mid_data: VirtAddr = ((sdata as usize + edata as usize) / 2).into(); + assert_eq!( + kernel_space.page_table.translate(mid_text.floor()).unwrap().writable(), + false + ); + assert_eq!( + kernel_space.page_table.translate(mid_rodata.floor()).unwrap().writable(), + false, + ); + assert_eq!( + kernel_space.page_table.translate(mid_data.floor()).unwrap().executable(), + false, + ); + println!("remap_test passed!"); +} \ No newline at end of file diff --git a/os/src/mm/mod.rs b/os/src/mm/mod.rs index 17890ee2..122bb08e 100644 --- a/os/src/mm/mod.rs +++ b/os/src/mm/mod.rs @@ -1,11 +1,19 @@ mod heap_allocator; mod address; mod frame_allocator; +mod page_table; +mod memory_set; +use page_table::{PageTable, PTEFlags}; +use address::VPNRange; pub use address::{PhysAddr, VirtAddr, PhysPageNum, VirtPageNum}; pub use frame_allocator::{FrameTracker, frame_alloc}; +pub use page_table::{PageTableEntry}; +pub use memory_set::{MemorySet, KERNEL_SPACE}; +pub use memory_set::remap_test; pub fn init() { heap_allocator::init_heap(); frame_allocator::init_frame_allocator(); + KERNEL_SPACE.clone().lock().activate(); } diff --git a/os/src/mm/page_table.rs b/os/src/mm/page_table.rs new file mode 100644 index 00000000..c59ef44c --- /dev/null +++ b/os/src/mm/page_table.rs @@ -0,0 +1,111 @@ +use super::{frame_alloc, PhysPageNum, FrameTracker, VirtPageNum}; +use alloc::vec::Vec; +use alloc::vec; +use bitflags::*; + +bitflags! { + pub struct PTEFlags: u8 { + const V = 1 << 0; + const R = 1 << 1; + const W = 1 << 2; + const X = 1 << 3; + const U = 1 << 4; + const G = 1 << 5; + const A = 1 << 6; + const D = 1 << 7; + } +} + +#[derive(Copy, Clone)] +#[repr(C)] +pub struct PageTableEntry { + pub bits: usize, +} + +impl PageTableEntry { + pub fn new(ppn: PhysPageNum, flags: PTEFlags) -> Self { + PageTableEntry { + bits: ppn.0 << 10 | flags.bits as usize, + } + } + pub fn empty() -> Self { + PageTableEntry { + bits: 0, + } + } + pub fn ppn(&self) -> PhysPageNum { + (self.bits >> 10 & ((1usize << 44) - 1)).into() + } + pub fn flags(&self) -> PTEFlags { + PTEFlags::from_bits(self.bits as u8).unwrap() + } + pub fn is_valid(&self) -> bool { + (self.flags() & PTEFlags::V) != PTEFlags::empty() + } + pub fn readable(&self) -> bool { + (self.flags() & PTEFlags::R) != PTEFlags::empty() + } + pub fn writable(&self) -> bool { + (self.flags() & PTEFlags::W) != PTEFlags::empty() + } + pub fn executable(&self) -> bool { + (self.flags() & PTEFlags::X) != PTEFlags::empty() + } +} + +pub struct PageTable { + root_ppn: PhysPageNum, + frames: Vec, +} + +/// Assume that it won't oom when creating/mapping. +impl PageTable { + pub fn new() -> Self { + //println!("into PageTable::new()"); + let frame = frame_alloc().unwrap(); + PageTable { + root_ppn: frame.ppn, + frames: vec![frame], + } + } + fn find_pte(&mut self, vpn: VirtPageNum, create: bool) -> Option<&mut PageTableEntry> { + let idxs = vpn.indexes(); + let mut ppn = self.root_ppn; + let mut result: Option<&mut PageTableEntry> = None; + for i in 0..3 { + let pte = &mut ppn.get_pte_array()[idxs[i]]; + if i == 2 { + result = Some(pte); + break; + } + if !pte.is_valid() { + if !create { + return None; + } + let frame = frame_alloc().unwrap(); + *pte = PageTableEntry::new(frame.ppn, PTEFlags::V); + self.frames.push(frame); + } + ppn = pte.ppn(); + } + result + } + pub fn map(&mut self, vpn: VirtPageNum, ppn: PhysPageNum, flags: PTEFlags) { + //println!("mapping {:?} {:?}", vpn, ppn); + let pte = self.find_pte(vpn, true).unwrap(); + assert!(!pte.is_valid(), "vpn {:?} is mapped before mapping", vpn); + *pte = PageTableEntry::new(ppn, flags | PTEFlags::V); + } + pub fn unmap(&mut self, vpn: VirtPageNum) { + let pte = self.find_pte(vpn, false).unwrap(); + assert!(pte.is_valid(), "vpn {:?} is invalid before unmapping", vpn); + *pte = PageTableEntry::empty(); + } + pub fn translate(&mut self, vpn: VirtPageNum) -> Option { + self.find_pte(vpn, false) + .map(|pte| {pte.clone()}) + } + pub fn token(&self) -> usize { + 8usize << 60 | self.root_ppn.0 + } +} \ No newline at end of file