introduce MemoryHandler and refactor MemorySet. temporary disable swap.

This is a manual rebase of LCY's code
master
WangRunji 6 years ago
parent 96a76290b6
commit 102866bcf9

@ -0,0 +1,34 @@
use super::*;
#[derive(Debug, Clone)]
pub struct ByFrame<T: FrameAllocator> {
flags: MemoryAttr,
allocator: T,
}
impl<T: FrameAllocator> MemoryHandler for ByFrame<T> {
fn box_clone(&self) -> Box<MemoryHandler> {
Box::new(self.clone())
}
fn map(&self, pt: &mut PageTable, addr: VirtAddr) {
let target = self.allocator.alloc().expect("failed to allocate frame");
self.flags.apply(pt.map(addr, target));
}
fn unmap(&self, pt: &mut PageTable, addr: VirtAddr) {
let target = pt.get_entry(addr).expect("fail to get entry").target();
self.allocator.dealloc(target);
pt.unmap(addr);
}
fn page_fault_handler(&self, _pt: &mut PageTable, _addr: VirtAddr) -> bool {
false
}
}
impl<T: FrameAllocator> ByFrame<T> {
pub fn new(flags: MemoryAttr, allocator: T) -> Self {
ByFrame { flags, allocator }
}
}

@ -0,0 +1,46 @@
use super::*;
#[derive(Debug, Clone)]
pub struct Delay<T: FrameAllocator> {
flags: MemoryAttr,
allocator: T,
}
impl<T: FrameAllocator> MemoryHandler for Delay<T> {
fn box_clone(&self) -> Box<MemoryHandler> {
Box::new(self.clone())
}
fn map(&self, pt: &mut PageTable, addr: VirtAddr) {
let entry = pt.map(addr, 0);
entry.set_present(false);
entry.update();
}
fn unmap(&self, pt: &mut PageTable, addr: VirtAddr) {
let entry = pt.get_entry(addr).expect("failed to get entry");
if entry.present() {
self.allocator.dealloc(entry.target());
}
pt.unmap(addr);
}
fn page_fault_handler(&self, pt: &mut PageTable, addr: VirtAddr) -> bool {
let entry = pt.get_entry(addr).expect("failed to get entry");
if entry.present() {
// not a delay case
return false;
}
let frame = self.allocator.alloc().expect("failed to alloc frame");
entry.set_target(frame);
entry.set_present(true);
entry.update();
true
}
}
impl<T: FrameAllocator> Delay<T> {
pub fn new(flags: MemoryAttr, allocator: T) -> Self {
Delay { flags, allocator }
}
}

@ -0,0 +1,32 @@
use super::*;
#[derive(Debug, Eq, PartialEq, Clone)]
pub struct Linear {
offset: isize,
flags: MemoryAttr,
}
impl MemoryHandler for Linear {
fn box_clone(&self) -> Box<MemoryHandler> {
Box::new(self.clone())
}
fn map(&self, pt: &mut PageTable, addr: VirtAddr) {
let target = (addr as isize + self.offset) as PhysAddr;
self.flags.apply(pt.map(addr, target));
}
fn unmap(&self, pt: &mut PageTable, addr: VirtAddr) {
pt.unmap(addr);
}
fn page_fault_handler(&self, _pt: &mut PageTable, _addr: VirtAddr) -> bool {
false
}
}
impl Linear {
pub fn new(offset: isize, flags: MemoryAttr) -> Self {
Linear { offset, flags }
}
}

@ -0,0 +1,29 @@
use super::*;
// here may be a interesting part for lab
pub trait MemoryHandler: Debug + 'static {
fn box_clone(&self) -> Box<MemoryHandler>;
fn map(&self, pt: &mut PageTable, addr: VirtAddr);
fn unmap(&self, pt: &mut PageTable, addr: VirtAddr);
fn page_fault_handler(&self, pt: &mut PageTable, addr: VirtAddr) -> bool;
}
impl Clone for Box<MemoryHandler> {
fn clone(&self) -> Box<MemoryHandler> {
self.box_clone()
}
}
pub trait FrameAllocator: Debug + Clone + 'static {
fn alloc(&self) -> Option<PhysAddr>;
fn dealloc(&self, target: PhysAddr);
}
mod linear;
mod byframe;
mod delay;
//mod swap;
pub use self::linear::Linear;
pub use self::byframe::ByFrame;
pub use self::delay::Delay;

@ -1,10 +1,13 @@
//! memory set, area
//! and the inactive page table
use alloc::vec::Vec;
use alloc::{vec::Vec, boxed::Box};
use core::fmt::{Debug, Error, Formatter};
use super::*;
use crate::paging::*;
use self::handler::MemoryHandler;
pub mod handler;
/// an inactive page table
/// Note: InactivePageTable is not a PageTable
@ -31,7 +34,7 @@ pub trait InactivePageTable {
** which takes a temporarily activated page table as param
** @retval none
*/
fn edit(&mut self, f: impl FnOnce(&mut Self::Active));
fn edit<T>(&mut self, f: impl FnOnce(&mut Self::Active) -> T) -> T;
/*
** @brief activate the inactive page table
** @retval none
@ -48,88 +51,32 @@ pub trait InactivePageTable {
** @retval usize the token of the inactive page table
*/
fn token(&self) -> usize;
/// Why the methods below are in this trait?
/*
** @brief allocate a frame for use
** @retval Option<PhysAddr> the physics address of the beginning of allocated frame, if present
*/
fn alloc_frame() -> Option<PhysAddr>;
/*
** @brief deallocate a frame for use
** @param PhysAddr the physics address of the beginning of frame to be deallocated
** @retval none
*/
fn dealloc_frame(target: PhysAddr);
}
/// a continuous memory space when the same attribute
/// like `vma_struct` in ucore
#[derive(Debug, Eq, PartialEq, Copy, Clone)]
#[derive(Debug, Clone)]
pub struct MemoryArea {
start_addr: VirtAddr,
end_addr: VirtAddr,
phys_start_addr: Option<PhysAddr>,
flags: MemoryAttr,
handler: Box<MemoryHandler>,
name: &'static str,
}
impl MemoryArea {
/*
** @brief create a memory area from virtual address
** @param start_addr: VirtAddr the virtual address of beginning of the area
** @param end_addr: VirtAddr the virtual address of end of the area
** @param flags: MemoryAttr the common memory attribute of the memory area
** @param name: &'static str the name of the memory area
** @retval MemoryArea the memory area created
*/
pub fn new(start_addr: VirtAddr, end_addr: VirtAddr, flags: MemoryAttr, name: &'static str) -> Self {
assert!(start_addr <= end_addr, "invalid memory area");
MemoryArea { start_addr, end_addr, phys_start_addr: None, flags, name }
}
/*
** @brief create a memory area from virtual address which is identically mapped
** @param start_addr: VirtAddr the virtual address of beginning of the area
** @param end_addr: VirtAddr the virtual address of end of the area
** @param flags: MemoryAttr the common memory attribute of the memory area
** @param name: &'static str the name of the memory area
** @retval MemoryArea the memory area created
*/
pub fn new_identity(start_addr: VirtAddr, end_addr: VirtAddr, flags: MemoryAttr, name: &'static str) -> Self {
assert!(start_addr <= end_addr, "invalid memory area");
MemoryArea { start_addr, end_addr, phys_start_addr: Some(start_addr), flags, name }
}
/*
** @brief create a memory area from physics address
** @param start_addr: PhysAddr the physics address of beginning of the area
** @param end_addr: PhysAddr the physics address of end of the area
** @param offset: usiz the offset between physics address and virtual address
** @param flags: MemoryAttr the common memory attribute of the memory area
** @param name: &'static str the name of the memory area
** @retval MemoryArea the memory area created
*/
pub fn new_physical(phys_start_addr: PhysAddr, phys_end_addr: PhysAddr, offset: usize, flags: MemoryAttr, name: &'static str) -> Self {
let start_addr = phys_start_addr + offset;
let end_addr = phys_end_addr + offset;
assert!(start_addr <= end_addr, "invalid memory area");
let phys_start_addr = Some(phys_start_addr);
MemoryArea { start_addr, end_addr, phys_start_addr, flags, name }
}
/*
** @brief get slice of the content in the memory area
** @retval &[u8] the slice of the content in the memory area
*/
pub unsafe fn as_slice(&self) -> &[u8] {
use core::slice;
slice::from_raw_parts(self.start_addr as *const u8, self.end_addr - self.start_addr)
::core::slice::from_raw_parts(self.start_addr as *const u8, self.end_addr - self.start_addr)
}
/*
** @brief get mutable slice of the content in the memory area
** @retval &mut[u8] the mutable slice of the content in the memory area
*/
pub unsafe fn as_slice_mut(&self) -> &mut [u8] {
use core::slice;
slice::from_raw_parts_mut(self.start_addr as *mut u8, self.end_addr - self.start_addr)
::core::slice::from_raw_parts_mut(self.start_addr as *mut u8, self.end_addr - self.start_addr)
}
/*
** @brief test whether a virtual address is in the memory area
@ -156,64 +103,21 @@ impl MemoryArea {
** @param pt: &mut T::Active the page table to use
** @retval none
*/
fn map<T: InactivePageTable>(&self, pt: &mut T::Active) {
match self.phys_start_addr {
Some(phys_start) => {
for page in Page::range_of(self.start_addr, self.end_addr) {
let addr = page.start_address();
let target = page.start_address() - self.start_addr + phys_start;
self.flags.apply(pt.map(addr, target));
}
}
None => {
for page in Page::range_of(self.start_addr, self.end_addr) {
let addr = page.start_address();
let target = T::alloc_frame().expect("failed to allocate frame");
self.flags.apply(pt.map(addr, target));
// for frame delayed allocation
// let entry = pt.map(addr,0);
// self.flags.apply(entry);
// let entry = pt.get_entry(addr).expect("fail to get entry");
// entry.set_present(false);
// entry.update();
}
}
};
fn map(&self, pt: &mut PageTable) {
for page in Page::range_of(self.start_addr, self.end_addr) {
self.handler.map(pt, page.start_address());
}
}
/*
** @brief unmap the memory area from the physice address in a page table
** @param pt: &mut T::Active the page table to use
** @retval none
*/
fn unmap<T: InactivePageTable>(&self, pt: &mut T::Active) {
fn unmap(&self, pt: &mut PageTable) {
for page in Page::range_of(self.start_addr, self.end_addr) {
let addr = page.start_address();
if self.phys_start_addr.is_none() {
if pt.get_entry(addr).expect("fail to get entry").present(){
let target = pt.get_entry(addr).expect("fail to get entry").target();
T::dealloc_frame(target);
}
else{
// set valid for pt.unmap function
pt.get_entry(addr).expect("fail to get entry").set_present(true);
}
}
pt.unmap(addr);
self.handler.unmap(pt, page.start_address());
}
}
pub fn get_start_addr(&self) -> VirtAddr {
self.start_addr
}
pub fn get_end_addr(&self) -> VirtAddr{
self.end_addr
}
pub fn get_flags(&self) -> &MemoryAttr{
&self.flags
}
}
/// The attributes of the memory
@ -269,7 +173,7 @@ impl MemoryAttr {
** the page table entry to apply the attribute
** @retval none
*/
fn apply(&self, entry: &mut impl Entry) {
fn apply(&self, entry: &mut Entry) {
if self.user { entry.set_user(true); }
if self.readonly { entry.set_writable(false); }
if self.execute { entry.set_execute(true); }
@ -293,13 +197,13 @@ impl<T: InactivePageTable> MemorySet<T> {
*/
pub fn new() -> Self {
MemorySet {
areas: Vec::<MemoryArea>::new(),
areas: Vec::new(),
page_table: T::new(),
}
}
pub fn new_bare() -> Self {
MemorySet {
areas: Vec::<MemoryArea>::new(),
areas: Vec::new(),
page_table: T::new_bare(),
}
}
@ -316,11 +220,13 @@ impl<T: InactivePageTable> MemorySet<T> {
** @param area: MemoryArea the memory area to add
** @retval none
*/
pub fn push(&mut self, area: MemoryArea) {
pub fn push(&mut self, start_addr: VirtAddr, end_addr: VirtAddr, handler: impl MemoryHandler, name: &'static str) {
assert!(start_addr <= end_addr, "invalid memory area");
let area = MemoryArea { start_addr, end_addr, handler: Box::new(handler), name };
assert!(self.areas.iter()
.find(|other| area.is_overlap_with(other))
.is_none(), "memory area overlap");
self.page_table.edit(|pt| area.map::<T>(pt));
self.page_table.edit(|pt| area.map(pt));
self.areas.push(area);
}
/*
@ -364,7 +270,7 @@ impl<T: InactivePageTable> MemorySet<T> {
let Self { ref mut page_table, ref mut areas, .. } = self;
page_table.edit(|pt| {
for area in areas.iter() {
area.unmap::<T>(pt);
area.unmap(pt);
}
});
areas.clear();
@ -378,6 +284,13 @@ impl<T: InactivePageTable> MemorySet<T> {
&mut self.page_table
}
pub fn page_fault_handler(&mut self, addr: VirtAddr) -> bool {
let area = self.areas.iter().find(|area| area.contains(addr));
match area {
Some(area) => self.page_table.edit(|pt| area.handler.page_fault_handler(pt, addr)),
None => false,
}
}
}
impl<T: InactivePageTable> Clone for MemorySet<T> {
@ -385,10 +298,9 @@ impl<T: InactivePageTable> Clone for MemorySet<T> {
let mut page_table = T::new();
page_table.edit(|pt| {
for area in self.areas.iter() {
area.map::<T>(pt);
area.map(pt);
}
});
info!("finish map in clone!");
MemorySet {
areas: self.areas.clone(),
page_table,
@ -398,7 +310,6 @@ impl<T: InactivePageTable> Clone for MemorySet<T> {
impl<T: InactivePageTable> Drop for MemorySet<T> {
fn drop(&mut self) {
info!("come into drop func for memoryset");
self.clear();
}
}

@ -12,14 +12,14 @@ mod mock_page_table;
// trait for PageTable
pub trait PageTable {
type Entry: Entry;
// type Entry: Entry;
/*
** @brief map a virual address to the target physics address
** @param addr: VirtAddr the virual address to map
** @param target: VirtAddr the target physics address
** @retval Entry the page table entry of the mapped virual address
*/
fn map(&mut self, addr: VirtAddr, target: PhysAddr) -> &mut Self::Entry;
fn map(&mut self, addr: VirtAddr, target: PhysAddr) -> &mut Entry;
/*
** @brief unmap a virual address from physics address
** @param addr: VirtAddr the virual address to unmap
@ -31,7 +31,7 @@ pub trait PageTable {
** @param addr: VirtAddr the virual address
** @retval Entry the page table entry of the virual address
*/
fn get_entry(&mut self, addr: VirtAddr) -> Option<&mut Self::Entry>;
fn get_entry(&mut self, addr: VirtAddr) -> Option<&mut Entry>;
// For testing with mock
/*
** @brief used for testing with mock

@ -5,7 +5,7 @@ use ucore_memory::PAGE_SIZE;
use atags::atags::Atags;
use aarch64::{barrier, regs::*, addr::*};
use aarch64::paging::{PhysFrame as Frame, memory_attribute::*};
use crate::memory::{FRAME_ALLOCATOR, init_heap, MemoryArea, MemoryAttr, MemorySet};
use crate::memory::{FRAME_ALLOCATOR, init_heap, MemoryArea, MemoryAttr, MemorySet, Linear};
/// Memory initialization.
pub fn init() {
@ -100,14 +100,14 @@ fn init_frame_allocator() {
/// remap kernel page table after all initialization.
fn remap_the_kernel() {
let mut ms = unsafe { MemorySet::new_bare() };
ms.push(MemoryArea::new_identity(0, bootstacktop as usize, MemoryAttr::default(), "kstack"));
ms.push(MemoryArea::new_identity(stext as usize, etext as usize, MemoryAttr::default().execute().readonly(), "text"));
ms.push(MemoryArea::new_identity(sdata as usize, edata as usize, MemoryAttr::default(), "data"));
ms.push(MemoryArea::new_identity(srodata as usize, erodata as usize, MemoryAttr::default().readonly(), "rodata"));
ms.push(MemoryArea::new_identity(sbss as usize, ebss as usize, MemoryAttr::default(), "bss"));
ms.push(0, bootstacktop as usize, Linear::new(0, MemoryAttr::default()), "kstack");
ms.push(stext as usize, etext as usize, Linear::new(0, MemoryAttr::default().execute().readonly()), "text");
ms.push(sdata as usize, edata as usize, Linear::new(0, MemoryAttr::default()), "data");
ms.push(srodata as usize, erodata as usize, Linear::new(0, MemoryAttr::default().readonly()), "rodata");
ms.push(sbss as usize, ebss as usize, Linear::new(0, MemoryAttr::default()), "bss");
use super::board::{IO_REMAP_BASE, IO_REMAP_END};
ms.push(MemoryArea::new_identity(IO_REMAP_BASE, IO_REMAP_END, MemoryAttr::default().mmio(), "io_remap"));
ms.push(IO_REMAP_BASE, IO_REMAP_END, Linear::new(0, MemoryAttr::default().mmio()), "io_remap");
unsafe { ms.get_page_table_mut().activate_as_kernel(); }
::core::mem::forget(ms);

@ -49,9 +49,7 @@ pub struct ActivePageTable(RecursivePageTable<'static>);
pub struct PageEntry(PageTableEntry);
impl PageTable for ActivePageTable {
type Entry = PageEntry;
fn map(&mut self, addr: usize, target: usize) -> &mut PageEntry {
fn map(&mut self, addr: usize, target: usize) -> &mut Entry {
let flags = EF::default();
let attr = MairNormal::attr_value();
self.0.map_to(Page::of_addr(addr), Frame::of_addr(target), flags, attr, &mut FrameAllocatorForAarch64)
@ -64,7 +62,7 @@ impl PageTable for ActivePageTable {
flush.flush();
}
fn get_entry(&mut self, addr: usize) -> Option<&mut PageEntry> {
fn get_entry(&mut self, addr: usize) -> Option<&mut Entry> {
let entry_addr = ((addr >> 9) & 0o777_777_777_7770) | (RECURSIVE_INDEX << 39);
Some(unsafe { &mut *(entry_addr as *mut PageEntry) })
}
@ -90,7 +88,7 @@ impl ActivePageTable {
pub unsafe fn new() -> Self {
ActivePageTable(RecursivePageTable::new(&mut *(ROOT_PAGE_TABLE as *mut _)).unwrap())
}
fn with_temporary_map(&mut self, frame: &Frame, f: impl FnOnce(&mut ActivePageTable, &mut Aarch64PageTable)) {
fn with_temporary_map<T>(&mut self, frame: &Frame, f: impl FnOnce(&mut ActivePageTable, &mut Aarch64PageTable) -> T) -> T {
// Create a temporary page
let page = Page::of_addr(0xcafebabe);
assert!(self.0.translate_page(page).is_none(), "temporary page is already mapped");
@ -98,9 +96,10 @@ impl ActivePageTable {
self.map(page.start_address().as_u64() as usize, frame.start_address().as_u64() as usize);
// Call f
let table = unsafe { &mut *page.start_address().as_mut_ptr() };
f(self, table);
let ret = f(self, table);
// Unmap the page
self.unmap(0xcafebabe);
ret
}
}
@ -196,7 +195,7 @@ impl InactivePageTable for InactivePageTable0 {
}
fn new_bare() -> Self {
let frame = Self::alloc_frame().map(|target| Frame::of_addr(target))
let frame = alloc_frame().map(|target| Frame::of_addr(target))
.expect("failed to allocate frame");
active_table().with_temporary_map(&frame, |_, table: &mut Aarch64PageTable| {
table.zero();
@ -206,7 +205,7 @@ impl InactivePageTable for InactivePageTable0 {
InactivePageTable0 { p4_frame: frame }
}
fn edit(&mut self, f: impl FnOnce(&mut Self::Active)) {
fn edit<T>(&mut self, f: impl FnOnce(&mut Self::Active) -> T) -> T {
active_table().with_temporary_map(&ttbr_el1_read(0), |active_table, p4_table: &mut Aarch64PageTable| {
let backup = p4_table[RECURSIVE_INDEX].clone();
@ -215,12 +214,13 @@ impl InactivePageTable for InactivePageTable0 {
tlb_invalidate_all();
// execute f in the new context
f(active_table);
let ret = f(active_table);
// restore recursive mapping to original p4 table
p4_table[RECURSIVE_INDEX] = backup;
tlb_invalidate_all();
});
ret
})
}
unsafe fn activate(&self) {
@ -255,14 +255,6 @@ impl InactivePageTable for InactivePageTable0 {
fn token(&self) -> usize {
self.p4_frame.start_address().as_u64() as usize // as TTBRx_EL1
}
fn alloc_frame() -> Option<usize> {
alloc_frame()
}
fn dealloc_frame(target: usize) {
dealloc_frame(target)
}
}
impl InactivePageTable0 {
@ -291,7 +283,7 @@ impl InactivePageTable0 {
impl Drop for InactivePageTable0 {
fn drop(&mut self) {
info!("PageTable dropping: {:?}", self);
Self::dealloc_frame(self.p4_frame.start_address().as_u64() as usize);
dealloc_frame(self.p4_frame.start_address().as_u64() as usize);
}
}

@ -2,7 +2,7 @@ use core::{slice, mem};
use riscv::{addr::*, register::sstatus};
use ucore_memory::PAGE_SIZE;
use log::*;
use crate::memory::{active_table, FRAME_ALLOCATOR, init_heap, MemoryArea, MemoryAttr, MemorySet, MEMORY_ALLOCATOR};
use crate::memory::{active_table, FRAME_ALLOCATOR, init_heap, MemoryArea, MemoryAttr, MemorySet, MEMORY_ALLOCATOR, Linear};
use crate::consts::{MEMORY_OFFSET, MEMORY_END};
#[cfg(feature = "no_mmu")]
@ -78,11 +78,11 @@ fn init_frame_allocator() {
fn remap_the_kernel() {
let mut ms = MemorySet::new_bare();
#[cfg(feature = "no_bbl")]
ms.push(MemoryArea::new_identity(0x10000000, 0x10000008, MemoryAttr::default(), "serial"));
ms.push(MemoryArea::new_identity(stext as usize, etext as usize, MemoryAttr::default().execute().readonly(), "text"));
ms.push(MemoryArea::new_identity(sdata as usize, edata as usize, MemoryAttr::default(), "data"));
ms.push(MemoryArea::new_identity(srodata as usize, erodata as usize, MemoryAttr::default().readonly(), "rodata"));
ms.push(MemoryArea::new_identity(sbss as usize, ebss as usize, MemoryAttr::default(), "bss"));
ms.push(0x10000000, 0x10000008, Linear::new(0, MemoryAttr::default()), "serial");
ms.push(stext as usize, etext as usize, Linear::new(0, MemoryAttr::default().execute().readonly()), "text");
ms.push(sdata as usize, edata as usize, Linear::new(0, MemoryAttr::default()), "data");
ms.push(srodata as usize, erodata as usize, Linear::new(0, MemoryAttr::default().readonly()), "rodata");
ms.push(sbss as usize, ebss as usize, Linear::new(0, MemoryAttr::default()), "bss");
unsafe { ms.activate(); }
unsafe { SATP = ms.token(); }
mem::forget(ms);

@ -42,8 +42,6 @@ pub struct ActivePageTable(RecursivePageTable<'static>);
pub struct PageEntry(PageTableEntry);
impl PageTable for ActivePageTable {
type Entry = PageEntry;
/*
* @param:
* addr: the virtual addr to be matched
@ -53,7 +51,7 @@ impl PageTable for ActivePageTable {
* @retval:
* the matched PageEntry
*/
fn map(&mut self, addr: usize, target: usize) -> &mut PageEntry {
fn map(&mut self, addr: usize, target: usize) -> &mut Entry {
// the flag for the new page entry
let flags = EF::VALID | EF::READABLE | EF::WRITABLE;
// here page is for the virtual address while frame is for the physical, both of them is 4096 bytes align
@ -86,7 +84,7 @@ impl PageTable for ActivePageTable {
* @retval:
* a mutable PageEntry reference of 'addr'
*/
fn get_entry(&mut self, addr: usize) -> Option<&mut PageEntry> {
fn get_entry(&mut self, addr: usize) -> Option<&mut Entry> {
if unsafe { !(*ROOT_PAGE_TABLE)[addr >> 22].flags().contains(EF::VALID) } {
return None;
}
@ -149,7 +147,7 @@ impl ActivePageTable {
* @brief:
* do something on the target physical frame?
*/
fn with_temporary_map(&mut self, frame: &Frame, f: impl FnOnce(&mut ActivePageTable, &mut RvPageTable)) {
fn with_temporary_map<T>(&mut self, frame: &Frame, f: impl FnOnce(&mut ActivePageTable, &mut RvPageTable) -> T) -> T {
// Create a temporary page
let page = Page::of_addr(VirtAddr::new(0xcafebabe));
assert!(self.0.translate_page(page).is_none(), "temporary page is already mapped");
@ -157,9 +155,10 @@ impl ActivePageTable {
self.map(page.start_address().as_usize(), frame.start_address().as_u32() as usize);
// Call f
let table = unsafe { &mut *(page.start_address().as_usize() as *mut _) };
f(self, table);
let ret = f(self, table);
// Unmap the page
self.unmap(0xcafebabe);
ret
}
}
/// implementation for the Entry trait in /crate/memory/src/paging/mod.rs
@ -233,7 +232,7 @@ impl InactivePageTable for InactivePageTable0 {
* the inactive page table
*/
fn new_bare() -> Self {
let frame = Self::alloc_frame().map(|target| Frame::of_addr(PhysAddr::new(target as u32)))
let frame = alloc_frame().map(|target| Frame::of_addr(PhysAddr::new(target as u32)))
.expect("failed to allocate frame");
active_table().with_temporary_map(&frame, |_, table: &mut RvPageTable| {
table.zero();
@ -248,7 +247,7 @@ impl InactivePageTable for InactivePageTable0 {
* @brief:
* temporarily map the inactive pagetable as an active p2page and apply f on the temporary modified active page table
*/
fn edit(&mut self, f: impl FnOnce(&mut Self::Active)) {
fn edit<T>(&mut self, f: impl FnOnce(&mut Self::Active) -> T) -> T {
active_table().with_temporary_map(&satp::read().frame(), |active_table, p2_table: &mut RvPageTable| {
let backup = p2_table[RECURSIVE_INDEX].clone();
@ -257,12 +256,14 @@ impl InactivePageTable for InactivePageTable0 {
sfence_vma_all();
// execute f in the new context
f(active_table);
let ret = f(active_table);
// restore recursive mapping to original p2 table
p2_table[RECURSIVE_INDEX] = backup;
sfence_vma_all();
});
ret
})
}
/*
@ -313,14 +314,6 @@ impl InactivePageTable for InactivePageTable0 {
fn token(&self) -> usize {
self.p2_frame.number() | (1 << 31) // as satp
}
fn alloc_frame() -> Option<usize> {
alloc_frame()
}
fn dealloc_frame(target: usize) {
dealloc_frame(target)
}
}
impl InactivePageTable0 {
@ -332,17 +325,15 @@ impl InactivePageTable0 {
let table = unsafe { &mut *ROOT_PAGE_TABLE };
let e0 = table[0x40];
let e1 = table[KERNEL_P2_INDEX];
assert!(!e1.is_unused());
// for larger heap memroy
let e2 = table[KERNEL_P2_INDEX + 1];
assert!(!e2.is_unused());
let e3 = table[KERNEL_P2_INDEX + 2];
assert!(!e1.is_unused());
assert!(!e2.is_unused());
assert!(!e3.is_unused());
self.edit(|_| {
table[0x40] = e0;
table[KERNEL_P2_INDEX].set(e1.frame(), EF::VALID | EF::GLOBAL);
// for larger heap memroy
table[KERNEL_P2_INDEX + 1].set(e2.frame(), EF::VALID | EF::GLOBAL);
table[KERNEL_P2_INDEX + 2].set(e3.frame(), EF::VALID | EF::GLOBAL);
});
@ -352,7 +343,7 @@ impl InactivePageTable0 {
impl Drop for InactivePageTable0 {
fn drop(&mut self) {
info!("PageTable dropping: {:?}", self);
Self::dealloc_frame(self.p2_frame.start_address().as_u32() as usize);
dealloc_frame(self.p2_frame.start_address().as_u32() as usize);
}
}

@ -1,4 +1,3 @@
use bit_allocator::{BitAlloc, BitAlloc64K};
// Depends on kernel
use crate::memory::{active_table, alloc_frame, dealloc_frame};
use spin::{Mutex, MutexGuard};
@ -44,9 +43,7 @@ pub struct ActivePageTable(RecursivePageTable<'static>);
pub struct PageEntry(PageTableEntry);
impl PageTable for ActivePageTable {
type Entry = PageEntry;
fn map(&mut self, addr: usize, target: usize) -> &mut PageEntry {
fn map(&mut self, addr: usize, target: usize) -> &mut Entry {
let flags = EF::PRESENT | EF::WRITABLE | EF::NO_EXECUTE;
self.0.map_to(Page::of_addr(addr), Frame::of_addr(target), flags, &mut FrameAllocatorForX86)
.unwrap().flush();
@ -58,7 +55,7 @@ impl PageTable for ActivePageTable {
flush.flush();
}
fn get_entry(&mut self, addr: usize) -> Option<&mut PageEntry> {
fn get_entry(&mut self, addr: usize) -> Option<&mut Entry> {
for level in 0..3 {
let entry = get_entry_ptr(addr, 4 - level);
if unsafe { !(*entry).present() } { return None; }
@ -84,7 +81,7 @@ impl ActivePageTable {
pub unsafe fn new() -> Self {
ActivePageTable(RecursivePageTable::new(&mut *(0xffffffff_fffff000 as *mut _)).unwrap())
}
fn with_temporary_map(&mut self, frame: &Frame, f: impl FnOnce(&mut ActivePageTable, &mut x86PageTable)) {
fn with_temporary_map<T>(&mut self, frame: &Frame, f: impl FnOnce(&mut ActivePageTable, &mut x86PageTable) -> T) -> T {
// Create a temporary page
let page = Page::of_addr(0xcafebabe);
assert!(self.0.translate_page(page).is_none(), "temporary page is already mapped");
@ -92,9 +89,10 @@ impl ActivePageTable {
self.map(page.start_address().as_u64() as usize, frame.start_address().as_u64() as usize);
// Call f
let table = unsafe { &mut *page.start_address().as_mut_ptr() };
f(self, table);
let ret = f(self, table);
// Unmap the page
self.unmap(0xcafebabe);
ret
}
}
@ -173,7 +171,7 @@ impl InactivePageTable for InactivePageTable0 {
}
fn new_bare() -> Self {
let frame = Self::alloc_frame().map(|target| Frame::of_addr(target))
let frame = alloc_frame().map(|target| Frame::of_addr(target))
.expect("failed to allocate frame");
active_table().with_temporary_map(&frame, |_, table: &mut x86PageTable| {
table.zero();
@ -183,7 +181,7 @@ impl InactivePageTable for InactivePageTable0 {
InactivePageTable0 { p4_frame: frame }
}
fn edit(&mut self, f: impl FnOnce(&mut Self::Active)) {
fn edit<T>(&mut self, f: impl FnOnce(&mut Self::Active) -> T) -> T {
active_table().with_temporary_map(&Cr3::read().0, |active_table, p4_table: &mut x86PageTable| {
let backup = p4_table[0o777].clone();
@ -192,12 +190,13 @@ impl InactivePageTable for InactivePageTable0 {
tlb::flush_all();
// execute f in the new context
f(active_table);
let ret = f(active_table);
// restore recursive mapping to original p4 table
p4_table[0o777] = backup;
tlb::flush_all();
});
ret
})
}
unsafe fn activate(&self) {
@ -227,14 +226,6 @@ impl InactivePageTable for InactivePageTable0 {
fn token(&self) -> usize {
self.p4_frame.start_address().as_u64() as usize // as CR3
}
fn alloc_frame() -> Option<usize> {
alloc_frame()
}
fn dealloc_frame(target: usize) {
dealloc_frame(target)
}
}
impl InactivePageTable0 {
@ -254,7 +245,7 @@ impl InactivePageTable0 {
impl Drop for InactivePageTable0 {
fn drop(&mut self) {
info!("PageTable dropping: {:?}", self);
Self::dealloc_frame(self.p4_frame.start_address().as_u64() as usize);
dealloc_frame(self.p4_frame.start_address().as_u64() as usize);
}
}

@ -4,7 +4,7 @@ use crate::consts::MEMORY_OFFSET;
use super::HEAP_ALLOCATOR;
use ucore_memory::{*, paging::PageTable};
use ucore_memory::cow::CowExt;
pub use ucore_memory::memory_set::{MemoryArea, MemoryAttr, InactivePageTable};
pub use ucore_memory::memory_set::{MemoryArea, MemoryAttr, InactivePageTable, handler::*};
use ucore_memory::swap::*;
use crate::process::{process};
use crate::sync::{SpinNoIrqLock, SpinNoIrq, MutexGuard};
@ -46,33 +46,29 @@ pub fn active_table() -> MutexGuard<'static, CowExt<ActivePageTable>, SpinNoIrq>
ACTIVE_TABLE.lock()
}
// Page table for swap in and out
lazy_static!{
static ref ACTIVE_TABLE_SWAP: SpinNoIrqLock<SwapExt<ActivePageTable, fifo::FifoSwapManager, mock_swapper::MockSwapper>> =
SpinNoIrqLock::new(unsafe{SwapExt::new(ActivePageTable::new(), fifo::FifoSwapManager::default(), mock_swapper::MockSwapper::default())});
}
pub fn active_table_swap() -> MutexGuard<'static, SwapExt<ActivePageTable, fifo::FifoSwapManager, mock_swapper::MockSwapper>, SpinNoIrq>{
ACTIVE_TABLE_SWAP.lock()
#[derive(Debug, Clone, Copy)]
pub struct GlobalFrameAlloc;
impl FrameAllocator for GlobalFrameAlloc {
fn alloc(&self) -> Option<usize> {
// get the real address of the alloc frame
let ret = FRAME_ALLOCATOR.lock().alloc().map(|id| id * PAGE_SIZE + MEMORY_OFFSET);
trace!("Allocate frame: {:x?}", ret);
ret
// TODO: try to swap out when alloc failed
}
fn dealloc(&self, target: usize) {
trace!("Deallocate frame: {:x}", target);
FRAME_ALLOCATOR.lock().dealloc((target - MEMORY_OFFSET) / PAGE_SIZE);
}
}
/*
* @brief:
* allocate a free physical frame, if no free frame, then swap out one page and reture mapped frame as the free one
* @retval:
* the physical address for the allocated frame
*/
pub fn alloc_frame() -> Option<usize> {
// get the real address of the alloc frame
let ret = FRAME_ALLOCATOR.lock().alloc().map(|id| id * PAGE_SIZE + MEMORY_OFFSET);
trace!("Allocate frame: {:x?}", ret);
//do we need : unsafe { ACTIVE_TABLE_SWAP.force_unlock(); } ???
Some(ret.unwrap_or_else(|| active_table_swap().swap_out_any::<InactivePageTable0>().ok().expect("fail to swap out page")))
GlobalFrameAlloc.alloc()
}
pub fn dealloc_frame(target: usize) {
trace!("Deallocate frame: {:x}", target);
FRAME_ALLOCATOR.lock().dealloc((target - MEMORY_OFFSET) / PAGE_SIZE);
GlobalFrameAlloc.dealloc(target);
}
pub struct KernelStack(usize);
@ -97,44 +93,12 @@ impl Drop for KernelStack {
}
/*
* @param:
* addr: the virtual address of the page fault
* @brief:
* handle page fault
* @retval:
* Return true to continue, false to halt
*/
/// Handle page fault at `addr`.
/// Return true to continue, false to halt.
#[cfg(not(feature = "no_mmu"))]
pub fn page_fault_handler(addr: usize) -> bool {
info!("start handling swap in/out page fault");
//unsafe { ACTIVE_TABLE_SWAP.force_unlock(); }
/*LAB3 EXERCISE 1: YOUR STUDENT NUMBER
* handle the frame deallocated
*/
info!("get pt from processor()");
if process().memory_set.find_area(addr).is_none(){
return false;
}
let pt = process().memory_set.get_page_table_mut();
info!("pt got");
if active_table_swap().page_fault_handler(pt as *mut InactivePageTable0, addr, true, || alloc_frame().expect("fail to alloc frame")){
return true;
}
//////////////////////////////////////////////////////////////////////////////
// Handle copy on write (not being used now)
/*
unsafe { ACTIVE_TABLE.force_unlock(); }
if active_table().page_fault_handler(addr, || alloc_frame().expect("fail to alloc frame")){
return true;
}
*/
false
process().memory_set.page_fault_handler(addr)
}
pub fn init_heap() {

@ -1,5 +1,5 @@
use crate::arch::interrupt::{TrapFrame, Context as ArchContext};
use crate::memory::{MemoryArea, MemoryAttr, MemorySet, KernelStack, active_table_swap, alloc_frame, InactivePageTable0};
use crate::memory::{MemoryArea, MemoryAttr, MemorySet, KernelStack, InactivePageTable0, GlobalFrameAlloc, FrameAllocator};
use xmas_elf::{ElfFile, header, program::{Flags, ProgramHeader, Type, SegmentData}};
use core::fmt::{Debug, Error, Formatter};
use alloc::{boxed::Box, collections::BTreeMap, vec::Vec, sync::Arc, string::String};
@ -82,7 +82,7 @@ impl ContextImpl {
true => (USER32_STACK_OFFSET, USER32_STACK_OFFSET + USER_STACK_SIZE),
false => (USER_STACK_OFFSET, USER_STACK_OFFSET + USER_STACK_SIZE),
};
memory_set.push(MemoryArea::new(ustack_buttom, ustack_top, MemoryAttr::default().user(), "user_stack"));
memory_set.push(ustack_buttom, ustack_top, handler::ByFrame::new(MemoryAttr::default().user(), GlobalFrameAlloc), "user_stack");
ustack_top
};
#[cfg(feature = "no_mmu")]
@ -96,9 +96,6 @@ impl ContextImpl {
let kstack = KernelStack::new();
//set the user Memory pages in the memory set swappable
memory_set_map_swappable(&mut memory_set);
Box::new(ContextImpl {
arch: unsafe {
ArchContext::new_user_thread(
@ -131,9 +128,6 @@ impl ContextImpl {
info!("temporary copy data!");
let kstack = KernelStack::new();
memory_set_map_swappable(&mut memory_set);
info!("FORK() finsihed!");
Box::new(ContextImpl {
arch: unsafe { ArchContext::new_fork(tf, kstack.top(), memory_set.token()) },
memory_set,
@ -144,28 +138,6 @@ impl ContextImpl {
}
}
#[cfg(not(feature = "no_mmu"))]
#[cfg(not(target_arch = "aarch64"))]
impl Drop for ContextImpl {
fn drop(&mut self){
info!("come in to drop for ContextImpl");
//set the user Memory pages in the memory set unswappable
let Self {ref mut arch, ref mut memory_set, ref mut kstack, ..} = self;
let pt = {
memory_set.get_page_table_mut() as *mut InactivePageTable0
};
for area in memory_set.iter(){
for page in Page::range_of(area.get_start_addr(), area.get_end_addr()) {
let addr = page.start_address();
unsafe {
active_table_swap().remove_from_swappable(pt, addr, || alloc_frame().expect("alloc frame failed"));
}
}
}
debug!("Finishing setting pages unswappable");
}
}
impl Debug for ContextImpl {
fn fmt(&self, f: &mut Formatter) -> Result<(), Error> {
write!(f, "{:x?}", self.arch)
@ -222,7 +194,7 @@ fn memory_set_from<'a>(elf: &'a ElfFile<'a>) -> (MemorySet, usize) {
let target = ms.push(mem_size);
#[cfg(not(feature = "no_mmu"))]
let target = {
ms.push(MemoryArea::new(virt_addr, virt_addr + mem_size, memory_attr_from(ph.flags()), ""));
ms.push(virt_addr, virt_addr + mem_size, handler::ByFrame::new(memory_attr_from(ph.flags()), GlobalFrameAlloc), "");
unsafe { ::core::slice::from_raw_parts_mut(virt_addr as *mut u8, mem_size) }
};
// Copy data
@ -249,30 +221,3 @@ fn memory_attr_from(elf_flags: Flags) -> MemoryAttr {
if elf_flags.is_execute() { flags = flags.execute(); }
flags
}
/*
* @param:
* memory_set: the target MemorySet to set swappable
* @brief:
* map the memory area in the memory_set swappalbe, specially for the user process
*/
#[cfg(not(any(feature = "no_mmu", target_arch = "aarch64")))]
pub fn memory_set_map_swappable(memory_set: &mut MemorySet) {
info!("COME INTO memory set map swappable!");
let pt = unsafe {
memory_set.get_page_table_mut() as *mut InactivePageTable0
};
for area in memory_set.iter(){
for page in Page::range_of(area.get_start_addr(), area.get_end_addr()) {
let addr = page.start_address();
unsafe { active_table_swap().set_swappable(pt, addr); }
}
}
info!("Finishing setting pages swappable");
}
#[cfg(any(feature = "no_mmu", target_arch = "aarch64"))]
pub fn memory_set_map_swappable(memory_set: &mut MemorySet) {
// FIXME: Page Fault on aarch64
// NOTE: This function may disappear after refactor memory crate
}
Loading…
Cancel
Save