comments of crate/memory

master
Ben Pig Chu 6 years ago
parent 528c919626
commit 4a17ce8f16

@ -10,12 +10,28 @@ pub struct Page {
}
impl Page {
/*
** @brief get the virtual address of beginning of the page
** @retval VirtAddr the virtual address of beginning of the page
*/
pub fn start_address(&self) -> VirtAddr {
self.number * PAGE_SIZE
}
/*
** @brief get the page of a given virtual address
** @param addr: VirtAddr the given virtual address
** @retval Page the page of the given virtual address
*/
pub fn of_addr(addr: VirtAddr) -> Self {
Page { number: addr / PAGE_SIZE }
}
/*
** @brief get a pageRange between two virtual address
** @param begin: VirtAddr the virtual address of the beginning
** @param end: VirtAddr the virtual address of the end
** @retval PageRange the page of the given virtual address
*/
pub fn range_of(begin: VirtAddr, end: VirtAddr) -> PageRange {
PageRange {
start: Page::of_addr(begin),

@ -1,22 +1,24 @@
//! Shared memory & Copy-on-write extension for page table
//!
//! 实现共享内存和写时复制机制。
//! To use the CowExt, make a wrapper over the original apge table
//! Like: CowExt::new(origin_page_table)
//! Invoke page_fault_handler() on the CowExt to run the COW process
//! If the method above returns true, the COW process is executed, else do your own things.
//!
//! ## 使用说明
//! To implement the CowExt, we added a "shared state" to the page table entry
//! We use 2bits in the entry for "readonly and shared" and "writable and shared"
//! For CPU, the page of the entry is present and readonly,
//! and it's possible to read the page through different page tables
//! but when the page is writen, the page fault will be triggered.
//! When page fault is triggered, the page_fault_handler() on the CowExt should be invoked.
//! In the page_fault_handler() method, we return false if the page is accurately readonly.
//! Elsewise we copy the data in the page into a newly allocated frame,
//! and modify the page table entry to map the page to the frame, and set the present and writable bit.
//!
//! 在原页表的基础上套一层CowExt::new(origin_page_table)
//! 在PageFault时调用`page_fault_handler()`如返回true说明发生了COW否则再进行其他处理。
//!
//! ## 实现概述
//!
//! 我们为页表项定义一个新的状态:共享态。
//! 使用页表项中2bit分别表示只读共享可写共享。
//! 在这一状态下对于CPU而言它是存在+只读的,可以通过不同的页表对该页进行读操作。
//! 当进行写操作时会触发PageFault。如果此页实际是只读的则正常抛出异常。
//! 否则如果实际是可写的,此时再新分配一个物理页,复制数据,将页表项指向该页,并置为存在+可写。
//!
//! 对于同一个物理页,允许同时存在读引用和写引用,为此我们需要维护二者的引用计数。
//! 当PageFault时如果读引用为0写引用为1则直接标记可写。
//! A frame can have write and read reference at the same time,
//! so we need to maintain the count of write and read reference.
//! When page fault occurs, if the read reference count is 0 and the write reference count is 1
//! The copy process should be skipped and the entry is mark as writable directly.
use super::paging::*;
use super::*;
@ -30,12 +32,25 @@ pub struct CowExt<T: PageTable> {
}
impl<T: PageTable> CowExt<T> {
/*
** @brief create a COW extension
** @param page_table: T the inner page table
** @retval CowExt the COW extension created
*/
pub fn new(page_table: T) -> Self {
CowExt {
page_table,
rc_map: FrameRcMap::default(),
}
}
/*
** @brief map the virtual address to a target physics address as shared
** @param addr: VirtAddr the virual address to map
** @param target: VirtAddr the target physics address
** @param writable: bool if it is true, set the page as writable and shared
** else set the page as readonly and shared
** @retval none
*/
pub fn map_to_shared(&mut self, addr: VirtAddr, target: PhysAddr, writable: bool) {
let entry = self.page_table.map(addr, target);
entry.set_writable(false);
@ -47,6 +62,12 @@ impl<T: PageTable> CowExt<T> {
false => self.rc_map.read_increase(&frame),
}
}
/*
** @brief unmap a virual address from physics address
** with apecial additional process for shared page
** @param addr: VirtAddr the virual address to unmap
** @retval none
*/
pub fn unmap_shared(&mut self, addr: VirtAddr) {
{
let entry = self.page_table.get_entry(addr);
@ -59,8 +80,16 @@ impl<T: PageTable> CowExt<T> {
}
self.page_table.unmap(addr);
}
/// This function must be called whenever PageFault happens.
/// Return whether copy-on-write happens.
/*
** @brief execute the COW process for page fault
** This function must be called whenever PageFault happens.
** @param addr: VirtAddr the virual address of the page fault
** @param alloc_frame: impl FnOnce() -> PhysAddr
** the page allocation function
** that allocate a page and returns physics address
** of beginning of the page
** @retval bool whether copy-on-write happens.
*/
pub fn page_fault_handler(&mut self, addr: VirtAddr, alloc_frame: impl FnOnce() -> PhysAddr) -> bool {
{
let entry = self.page_table.get_entry(addr);
@ -111,26 +140,61 @@ struct FrameRcMap(Option<BTreeMap<Frame, (u16, u16)>>);
type Frame = usize;
impl FrameRcMap {
/*
** @brief get the read reference count of the frame
** @param frame: &Frame the frame to get the read reference count
** @retval u16 the read reference count
*/
fn read_count(&mut self, frame: &Frame) -> u16 {
self.map().get(frame).unwrap_or(&(0, 0)).0
}
/*
** @brief get the write reference count of the frame
** @param frame: &Frame the frame to get the write reference count
** @retval u16 the write reference count
*/
fn write_count(&mut self, frame: &Frame) -> u16 {
self.map().get(frame).unwrap_or(&(0, 0)).1
}
/*
** @brief increase the read reference count of the frame
** @param frame: &Frame the frame to increase the read reference count
** @retval none
*/
fn read_increase(&mut self, frame: &Frame) {
let (r, w) = self.map().get(&frame).unwrap_or(&(0, 0)).clone();
self.map().insert(frame.clone(), (r + 1, w));
}
/*
** @brief decrease the read reference count of the frame
** @param frame: &Frame the frame to decrease the read reference count
** @retval none
*/
fn read_decrease(&mut self, frame: &Frame) {
self.map().get_mut(frame).unwrap().0 -= 1;
}
/*
** @brief increase the write reference count of the frame
** @param frame: &Frame the frame to increase the write reference count
** @retval none
*/
fn write_increase(&mut self, frame: &Frame) {
let (r, w) = self.map().get(&frame).unwrap_or(&(0, 0)).clone();
self.map().insert(frame.clone(), (r, w + 1));
}
/*
** @brief decrease the write reference count of the frame
** @param frame: &Frame the frame to decrease the write reference count
** @retval none
*/
fn write_decrease(&mut self, frame: &Frame) {
self.map().get_mut(frame).unwrap().1 -= 1;
}
/*
** @brief get the internal btree map, lazily initialize the btree map if it is not present
** @retval &mut BTreeMap<Frame, (u16, u16)>
** the internal btree map
*/
fn map(&mut self) -> &mut BTreeMap<Frame, (u16, u16)> {
if self.0.is_none() {
self.0 = Some(BTreeMap::new());

@ -1,25 +1,75 @@
//! memory set, area
//! and the inactive page table
use alloc::vec::Vec;
use core::fmt::{Debug, Error, Formatter};
use super::*;
use paging::*;
/// an inactive page table
/// Note: InactivePageTable is not a PageTable
/// but it can be activated and "become" a PageTable
/// Why this trait is in this file?(seems should in paging/mod.rs)
pub trait InactivePageTable {
/// the active version of page table
type Active: PageTable;
/*
** @brief create a inactive page table with kernel memory mapped
** @retval InactivePageTable the created inactive page table
*/
fn new() -> Self;
/*
** @brief create a inactive page table without kernel memory mapped
** @retval InactivePageTable the created inactive page table
*/
fn new_bare() -> Self;
/*
** @brief temporarily active the page table and edit it
** @retval impl FnOnce(&mut Self::Active)
** the function of the editing action,
** which takes a temporarily activated page table as param
** @retval none
*/
fn edit(&mut self, f: impl FnOnce(&mut Self::Active));
/*
** @brief activate the inactive page table
** @retval none
*/
unsafe fn activate(&self);
/*
** @brief execute function with this inactive page table
** @param f: impl FnOnce() the function to be executed
** @retval none
*/
unsafe fn with(&self, f: impl FnOnce());
/*
** @brief get the token of the inactive page table
** @retval usize the token of the inactive page table
*/
fn token(&self) -> usize;
/// Why the methods below are in this trait?
/*
** @brief allocate a frame for use
** @retval Option<PhysAddr> the physics address of the beginning of allocated frame, if present
*/
fn alloc_frame() -> Option<PhysAddr>;
/*
** @brief deallocate a frame for use
** @param PhysAddr the physics address of the beginning of frame to be deallocated
** @retval none
*/
fn dealloc_frame(target: PhysAddr);
/*
** @brief allocate a stack space
** @retval Stack the stack allocated
*/
fn alloc_stack() -> Stack;
}
/// 一片连续内存空间,有相同的访问权限
/// 对应ucore中 `vma_struct`
/// a continuous memory space when the same attribute
/// like `vma_struct` in ucore
#[derive(Debug, Eq, PartialEq, Copy, Clone)]
pub struct MemoryArea {
start_addr: VirtAddr,
@ -30,14 +80,39 @@ pub struct MemoryArea {
}
impl MemoryArea {
/*
** @brief create a memory area from virtual address
** @param start_addr: VirtAddr the virtual address of beginning of the area
** @param end_addr: VirtAddr the virtual address of end of the area
** @param flags: MemoryAttr the common memory attribute of the memory area
** @param name: &'static str the name of the memory area
** @retval MemoryArea the memory area created
*/
pub fn new(start_addr: VirtAddr, end_addr: VirtAddr, flags: MemoryAttr, name: &'static str) -> Self {
assert!(start_addr <= end_addr, "invalid memory area");
MemoryArea { start_addr, end_addr, phys_start_addr: None, flags, name }
}
/*
** @brief create a memory area from virtual address which is identically mapped
** @param start_addr: VirtAddr the virtual address of beginning of the area
** @param end_addr: VirtAddr the virtual address of end of the area
** @param flags: MemoryAttr the common memory attribute of the memory area
** @param name: &'static str the name of the memory area
** @retval MemoryArea the memory area created
*/
pub fn new_identity(start_addr: VirtAddr, end_addr: VirtAddr, flags: MemoryAttr, name: &'static str) -> Self {
assert!(start_addr <= end_addr, "invalid memory area");
MemoryArea { start_addr, end_addr, phys_start_addr: Some(start_addr), flags, name }
}
/*
** @brief create a memory area from physics address
** @param start_addr: PhysAddr the physics address of beginning of the area
** @param end_addr: PhysAddr the physics address of end of the area
** @param offset: usiz the offset between physics address and virtual address
** @param flags: MemoryAttr the common memory attribute of the memory area
** @param name: &'static str the name of the memory area
** @retval MemoryArea the memory area created
*/
pub fn new_physical(phys_start_addr: PhysAddr, phys_end_addr: PhysAddr, offset: usize, flags: MemoryAttr, name: &'static str) -> Self {
let start_addr = phys_start_addr + offset;
let end_addr = phys_end_addr + offset;
@ -45,17 +120,35 @@ impl MemoryArea {
let phys_start_addr = Some(phys_start_addr);
MemoryArea { start_addr, end_addr, phys_start_addr, flags, name }
}
/*
** @brief get slice of the content in the memory area
** @retval &[u8] the slice of the content in the memory area
*/
pub unsafe fn as_slice(&self) -> &[u8] {
use core::slice;
slice::from_raw_parts(self.start_addr as *const u8, self.end_addr - self.start_addr)
}
/*
** @brief get mutable slice of the content in the memory area
** @retval &mut[u8] the mutable slice of the content in the memory area
*/
pub unsafe fn as_slice_mut(&self) -> &mut [u8] {
use core::slice;
slice::from_raw_parts_mut(self.start_addr as *mut u8, self.end_addr - self.start_addr)
}
/*
** @brief test whether a virtual address is in the memory area
** @param addr: VirtAddr the virtual address to test
** @retval bool whether the virtual address is in the memory area
*/
pub fn contains(&self, addr: VirtAddr) -> bool {
addr >= self.start_addr && addr < self.end_addr
}
/*
** @brief test whether the memory area is overlap with another memory area
** @param other: &MemoryArea another memory area to test
** @retval bool whether the memory area is overlap with another memory area
*/
fn is_overlap_with(&self, other: &MemoryArea) -> bool {
let p0 = Page::of_addr(self.start_addr);
let p1 = Page::of_addr(self.end_addr - 1) + 1;
@ -63,6 +156,11 @@ impl MemoryArea {
let p3 = Page::of_addr(other.end_addr - 1) + 1;
!(p1 <= p2 || p0 >= p3)
}
/*
** @brief map the memory area to the physice address in a page table
** @param pt: &mut T::Active the page table to use
** @retval none
*/
fn map<T: InactivePageTable>(&self, pt: &mut T::Active) {
match self.phys_start_addr {
Some(phys_start) => {
@ -81,6 +179,11 @@ impl MemoryArea {
}
}
}
/*
** @brief map the memory area from the physice address in a page table
** @param pt: &mut T::Active the page table to use
** @retval none
*/
fn unmap<T: InactivePageTable>(&self, pt: &mut T::Active) {
for page in Page::range_of(self.start_addr, self.end_addr) {
let addr = page.start_address();
@ -93,6 +196,7 @@ impl MemoryArea {
}
}
/// The attributes of the memory
#[derive(Debug, Copy, Clone, Eq, PartialEq, Default)]
pub struct MemoryAttr {
user: bool,
@ -102,22 +206,44 @@ pub struct MemoryAttr {
}
impl MemoryAttr {
/*
** @brief set the memory attribute's user bit
** @retval MemoryAttr the memory attribute itself
*/
pub fn user(mut self) -> Self {
self.user = true;
self
}
/*
** @brief set the memory attribute's readonly bit
** @retval MemoryAttr the memory attribute itself
*/
pub fn readonly(mut self) -> Self {
self.readonly = true;
self
}
/*
** @brief set the memory attribute's execute bit
** @retval MemoryAttr the memory attribute itself
*/
pub fn execute(mut self) -> Self {
self.execute = true;
self
}
/*
** @brief set the memory attribute's hide bit
** @retval MemoryAttr the memory attribute itself
*/
pub fn hide(mut self) -> Self {
self.hide = true;
self
}
/*
** @brief apply the memory attribute to a page table entry
** @param entry: &mut impl Entry
** the page table entry to apply the attribute
** @retval none
*/
fn apply(&self, entry: &mut impl Entry) {
if self.user { entry.set_user(true); }
if self.readonly { entry.set_writable(false); }
@ -127,8 +253,8 @@ impl MemoryAttr {
}
}
/// 内存空间集合,包含若干段连续空间
/// 对应ucore中 `mm_struct`
/// set of memory space with multiple memory area with associated page table and stack space
/// like `mm_struct` in ucore
pub struct MemorySet<T: InactivePageTable> {
areas: Vec<MemoryArea>,
page_table: T,
@ -136,6 +262,10 @@ pub struct MemorySet<T: InactivePageTable> {
}
impl<T: InactivePageTable> MemorySet<T> {
/*
** @brief create a memory set
** @retval MemorySet<T> the memory set created
*/
pub fn new() -> Self {
MemorySet {
areas: Vec::<MemoryArea>::new(),
@ -143,7 +273,13 @@ impl<T: InactivePageTable> MemorySet<T> {
kstack: T::alloc_stack(),
}
}
/// Used for remap_kernel() where heap alloc is unavailable
/*
** @brief create a memory set from raw space
** Used for remap_kernel() where heap alloc is unavailable
** @param slice: &mut [u8] the initial memory for the Vec in the struct
** @param kstack: Stack kernel stack space
** @retval MemorySet<T> the memory set created
*/
pub unsafe fn new_from_raw_space(slice: &mut [u8], kstack: Stack) -> Self {
use core::mem::size_of;
let cap = slice.len() / size_of::<MemoryArea>();
@ -153,9 +289,19 @@ impl<T: InactivePageTable> MemorySet<T> {
kstack,
}
}
/*
** @brief find the memory area from virtual address
** @param addr: VirtAddr the virtual address to find
** @retval Option<&MemoryArea> the memory area with the virtual address, if presented
*/
pub fn find_area(&self, addr: VirtAddr) -> Option<&MemoryArea> {
self.areas.iter().find(|area| area.contains(addr))
}
/*
** @brief add the memory area to the memory set
** @param area: MemoryArea the memory area to add
** @retval none
*/
pub fn push(&mut self, area: MemoryArea) {
assert!(self.areas.iter()
.find(|other| area.is_overlap_with(other))
@ -163,21 +309,47 @@ impl<T: InactivePageTable> MemorySet<T> {
self.page_table.edit(|pt| area.map::<T>(pt));
self.areas.push(area);
}
/*
** @brief get iterator of the memory area
** @retval impl Iterator<Item=&MemoryArea>
** the memory area iterator
*/
pub fn iter(&self) -> impl Iterator<Item=&MemoryArea> {
self.areas.iter()
}
/*
** @brief execute function with the associated page table
** @param f: impl FnOnce() the function to be executed
** @retval none
*/
pub unsafe fn with(&self, f: impl FnOnce()) {
self.page_table.with(f);
}
/*
** @brief activate the associated page table
** @retval none
*/
pub unsafe fn activate(&self) {
self.page_table.activate();
}
/*
** @brief get the token of the associated page table
** @retval usize the token of the inactive page table
*/
pub fn token(&self) -> usize {
self.page_table.token()
}
/*
** @brief get the top of the associated kernel stack
** @retval usize the top of the associated kernel stack
*/
pub fn kstack_top(&self) -> usize {
self.kstack.top
}
/*
** @brief clear the memory set
** @retval none
*/
pub fn clear(&mut self) {
let Self { ref mut page_table, ref mut areas, .. } = self;
page_table.edit(|pt| {
@ -219,6 +391,7 @@ impl<T: InactivePageTable> Debug for MemorySet<T> {
}
}
/// the stack structure
#[derive(Debug)]
pub struct Stack {
pub top: usize,

@ -1,15 +1,22 @@
//! Mock Page Table
//!
//! An mock implementation for the PageTable.
//! Used to test page table operation.
use alloc::boxed::Box;
use super::*;
const PAGE_COUNT: usize = 16;
const PAGE_SIZE: usize = 4096;
// a mock page table for test purpose
pub struct MockPageTable {
entries: [MockEntry; PAGE_COUNT],
data: [u8; PAGE_SIZE * PAGE_COUNT],
page_fault_handler: Option<PageFaultHandler>,
}
// the entry of the mock page table
#[derive(Default, Copy, Clone)]
pub struct MockEntry {
target: PhysAddr,
@ -90,6 +97,10 @@ impl PageTable for MockPageTable {
}
impl MockPageTable {
/*
** @brief create a new MockPageTable
** @retval MockPageTable the mock page table created
*/
pub fn new() -> Self {
use core::mem::uninitialized;
MockPageTable {
@ -98,9 +109,22 @@ impl MockPageTable {
page_fault_handler: None,
}
}
/*
** @brief set the page fault handler
** used for mock the page fault feature
** @param page_fault_handler: PageFaultHandler
** the page fault handler
** @retval none
*/
pub fn set_handler(&mut self, page_fault_handler: PageFaultHandler) {
self.page_fault_handler = Some(page_fault_handler);
}
/*
** @brief trigger page fault
** used for mock the page fault feature
** @param addr: VirtAddr the virtual address used to trigger the page fault
** @retval none
*/
fn trigger_page_fault(&mut self, addr: VirtAddr) {
// In order to call the handler with &mut self as an argument
// We have to first take the handler out of self, finally put it back
@ -108,6 +132,12 @@ impl MockPageTable {
handler(self, addr);
self.page_fault_handler = Some(handler);
}
/*
** @brief translate virtual address to physics address
** used for mock address translation feature
** @param addr: VirtAddr the virtual address to translation
** @retval PhysAddr the translation result
*/
fn translate(&self, addr: VirtAddr) -> PhysAddr {
let entry = &self.entries[addr / PAGE_SIZE];
assert!(entry.present);
@ -115,12 +145,24 @@ impl MockPageTable {
assert!(pa < self.data.len(), "Physical memory access out of range");
pa
}
/*
** @brief attempt to read the virtual address
** trigger page fault when failed
** @param addr: VirtAddr the virual address of data to read
** @retval none
*/
fn _read(&mut self, addr: VirtAddr) {
while !self.entries[addr / PAGE_SIZE].present {
self.trigger_page_fault(addr);
}
self.entries[addr / PAGE_SIZE].accessed = true;
}
/*
** @brief attempt to write the virtual address
** trigger page fault when failed
** @param addr: VirtAddr the virual address of data to write
** @retval none
*/
fn _write(&mut self, addr: VirtAddr) {
while !(self.entries[addr / PAGE_SIZE].present && self.entries[addr / PAGE_SIZE].writable) {
self.trigger_page_fault(addr);

@ -9,52 +9,190 @@ pub use self::mock_page_table::MockPageTable;
#[cfg(test)]
mod mock_page_table;
// trait for PageTable
pub trait PageTable {
type Entry: Entry;
/*
** @brief map a virual address to the target physics address
** @param addr: VirtAddr the virual address to map
** @param target: VirtAddr the target physics address
** @retval Entry the page table entry of the mapped virual address
*/
fn map(&mut self, addr: VirtAddr, target: PhysAddr) -> &mut Self::Entry;
/*
** @brief unmap a virual address from physics address
** @param addr: VirtAddr the virual address to unmap
** @retval none
*/
fn unmap(&mut self, addr: VirtAddr);
/*
** @brief get the page table entry of a virual address
** @param addr: VirtAddr the virual address
** @retval Entry the page table entry of the virual address
*/
fn get_entry(&mut self, addr: VirtAddr) -> &mut Self::Entry;
// For testing with mock
/*
** @brief used for testing with mock
** get a mutable reference of the content of a page from a virtual address
** @param addr: VirtAddr the virual address of the page
** @retval &'b mut [u8] mutable reference of the content of a page as array of bytes
*/
fn get_page_slice_mut<'a,'b>(&'a mut self, addr: VirtAddr) -> &'b mut [u8];
/*
** @brief used for testing with mock
** read data from a virtual address
** @param addr: VirtAddr the virual address of data to read
** @retval u8 the data read
*/
fn read(&mut self, addr: VirtAddr) -> u8;
/*
** @brief used for testing with mock
** write data to a virtual address
** @param addr: VirtAddr the virual address of data to write
** @param data: u8 the data to write
** @retval none
*/
fn write(&mut self, addr: VirtAddr, data: u8);
}
// trait for Entry in PageTable
pub trait Entry {
/// IMPORTANT!
/// This must be called after any change to ensure it become effective.
/// Usually this will make a flush to TLB/MMU.
/*
** @brief force update this page table entry
** IMPORTANT!
** This must be called after any change to ensure it become effective.
** Usually this will make a flush to TLB/MMU.
** @retval none
*/
fn update(&mut self);
/// Will be set when accessed
/*
** @brief get the accessed bit of the entry
** Will be set when accessed
** @retval bool the accessed bit
*/
fn accessed(&self) -> bool;
/// Will be set when written
/*
** @brief get the dirty bit of the entry
** Will be set when written
** @retval bool the dirty bit
*/
fn dirty(&self) -> bool;
/// Will PageFault when try to write page where writable=0
/*
** @brief get the writable bit of the entry
** Will PageFault when try to write page where writable=0
** @retval bool the writable bit
*/
fn writable(&self) -> bool;
/// Will PageFault when try to access page where present=0
/*
** @brief get the present bit of the entry
** Will PageFault when try to access page where present=0
** @retval bool the present bit
*/
fn present(&self) -> bool;
/*
** @brief clear the accessed bit
** @retval none
*/
fn clear_accessed(&mut self);
/*
** @brief clear the dirty bit
** @retval none
*/
fn clear_dirty(&mut self);
/*
** @brief set value of writable bit
** @param value: bool the writable bit value
** @retval none
*/
fn set_writable(&mut self, value: bool);
/*
** @brief set value of present bit
** @param value: bool the present bit value
** @retval none
*/
fn set_present(&mut self, value: bool);
/*
** @brief get the target physics address in the entry
** can be used for other purpose if present=0
** @retval target: PhysAddr the target physics address
*/
fn target(&self) -> PhysAddr;
/*
** @brief set the target physics address in the entry
** @param target: PhysAddr the target physics address
** @retval none
*/
fn set_target(&mut self, target: PhysAddr);
// For Copy-on-write extension
/*
** @brief used for Copy-on-write extension
** get the writable and shared bit
** @retval value: bool the writable and shared bit
*/
fn writable_shared(&self) -> bool;
/*
** @brief used for Copy-on-write extension
** get the readonly and shared bit
** @retval value: bool the readonly and shared bit
*/
fn readonly_shared(&self) -> bool;
/*
** @brief used for Copy-on-write extension
** mark the page as (writable or readonly) shared
** @param writable: bool if it is true, set the page as writable and shared
** else set the page as readonly and shared
** @retval value: none
*/
fn set_shared(&mut self, writable: bool);
/*
** @brief used for Copy-on-write extension
** mark the page as not shared
** @retval value: none
*/
fn clear_shared(&mut self);
// For Swap extension
/*
** @brief used for Swap extension
** get the swapped bit
** @retval value: bool the swapped bit
*/
fn swapped(&self) -> bool;
/*
** @brief used for Swap extension
** set the swapped bit
** @param value: bool the swapped bit value
** @retval none
*/
fn set_swapped(&mut self, value: bool);
/*
** @brief get the user bit of the entry
** @retval bool the user bit
*/
fn user(&self) -> bool;
/*
** @brief set value of user bit
** @param value: bool the user bit value
** @retval none
*/
fn set_user(&mut self, value: bool);
/*
** @brief get the execute bit of the entry
** @retval bool the execute bit
*/
fn execute(&self) -> bool;
/*
** @brief set value of user bit
** @param value: bool the execute bit value
** @retval none
*/
fn set_execute(&mut self, value: bool);
}

@ -1,3 +1,5 @@
//! Implememnt the swap manager with the enhanced clock page replacement algorithm
use alloc::collections::VecDeque;
use super::*;
use paging::Entry;

@ -1,3 +1,5 @@
//! Implememnt the swap manager with the FIFO page replacement algorithm
use alloc::collections::VecDeque;
use super::*;

@ -1,3 +1,8 @@
//! Mock Swapper
//!
//! An mock implement of the swapper
//! Used to test page table operation
use super::Swapper;
use alloc::btree_map::BTreeMap;
use core::mem::uninitialized;
@ -37,6 +42,10 @@ impl Swapper for MockSwapper {
}
impl MockSwapper {
/*
** @brief allocate an unused id for location on the mock device
** @retval usize the allocated location id
*/
fn alloc_id(&self) -> usize {
(0 .. 100usize).find(|i| !self.map.contains_key(i)).unwrap()
}

@ -1,3 +1,11 @@
//! Swap extension for page table
//! and generic interface for swap manager and swapper
//!
//! To use the SwapExt, make a wrapper over the original apge table using swap manager and swapper
//! Like: SwapExt::new(origin_page_table,swap_manager,swapper)
//! Invoke page_fault_handler() on the SwapExt to run the swap process
//! If the method above returns true, a page is swapped in, else do your own things.
use super::*;
use super::paging::*;
use core::ops::{Deref, DerefMut};
@ -12,26 +20,58 @@ mod mock_swapper;
/// Manage all swappable pages, decide which to swap out
pub trait SwapManager {
/// Called when tick interrupt occured
/*
** @brief update intarnal state pre tick
** Called when tick interrupt occured
** @retval none
*/
fn tick(&mut self);
/// Called when map a swappable page into the memory
/*
** @brief update intarnal state when page is pushed into memory
** Called when map a swappable page into the memory
** @param addr: VirtAddr the virual address of the page pushed into memory
** @retval none
*/
fn push(&mut self, addr: VirtAddr);
/// Called to delete the addr entry from the swap manager
/*
** @brief update intarnal state when page is removed from memory
** Called to delete the addr entry from the swap manager
** @param addr: VirtAddr the virual address of the page removed from memory
** @retval none
*/
fn remove(&mut self, addr: VirtAddr);
/// Try to swap out a page, return then victim
/// (The params is only used by `EnhancedClockSwapManager`)
/*
** @brief select swap out victim when there is need to swap out a page
** (The params is only used by `EnhancedClockSwapManager` currently)
** @param page_table: &mut T the current page table
** @param swapper: &mut S the swapper used
** @retval Option<VirtAddr> the virual address of the victim page, if present
*/
fn pop<T, S>(&mut self, page_table: &mut T, swapper: &mut S) -> Option<VirtAddr>
where T: PageTable, S: Swapper;
}
/// Do swap in & out
/// Implement swap in & out execution
pub trait Swapper {
/// Allocate space on device and write data to it.
/// Return a token indicating the location.
/*
** @brief Allocate space on device and write data to it
** @param data: &[u8] the data to write to the device
** @retval Result<usize, ()> the execute result, and a token indicating the location on the device if success
*/
fn swap_out(&mut self, data: &[u8]) -> Result<usize, ()>;
/// Update data on device.
/*
** @brief Update data on device.
** @param token: usize the token indicating the location on the device
** @param data: &[u8] the data to overwrite on the device
** @retval Result<(), ()> the execute result
*/
fn swap_update(&mut self, token: usize, data: &[u8]) -> Result<(), ()>;
/// Recover data from device and deallocate the space.
/*
** @brief Recover data from device and deallocate the space.
** @param token: usize the token indicating the location on the device
** @param data: &mut [u8] the reference to data in the space in memory
** @retval Result<(), ()> the execute result
*/
fn swap_in(&mut self, token: usize, data: &mut [u8]) -> Result<(), ()>;
}
@ -43,6 +83,13 @@ struct SwapExt<T: PageTable, M: SwapManager, S: Swapper> {
}
impl<T: PageTable, M: SwapManager, S: Swapper> SwapExt<T, M, S> {
/*
** @brief create a swap extension
** @param page_table: T the inner page table
** @param swap_manager: M the SwapManager used
** @param swapper: S the Swapper used
** @retval SwapExt the swap extension created
*/
pub fn new(page_table: T, swap_manager: M, swapper: S) -> Self {
SwapExt {
page_table,
@ -50,11 +97,22 @@ impl<T: PageTable, M: SwapManager, S: Swapper> SwapExt<T, M, S> {
swapper,
}
}
/*
** @brief map the virtual address to a target physics address as swappable
** @param addr: VirtAddr the virual address to map
** @param target: VirtAddr the target physics address
** @retval none
*/
pub fn map_to_swappable(&mut self, addr: VirtAddr, target: PhysAddr) -> &mut T::Entry {
self.swap_manager.push(addr);
self.map(addr, target)
}
/// Swap out any one of the swapped pages, return the released PhysAddr.
/*
** @brief Swap out any one of the swapped pages
** @retval Result<PhysAddr, SwapError>
** the physics address of released frame if success,
** the error if failed
*/
pub fn swap_out_any(&mut self) -> Result<PhysAddr, SwapError> {
let victim = {
let Self {ref mut page_table, ref mut swap_manager, ref mut swapper} = self;
@ -65,7 +123,13 @@ impl<T: PageTable, M: SwapManager, S: Swapper> SwapExt<T, M, S> {
Some(addr) => self.swap_out(addr),
}
}
/// Swap out page of `addr`, return the origin map target.
/*
** @brief Swap out page
** @param addr: VirtAddr the virual address of beginning of page
** @retval Result<PhysAddr, SwapError>
** the physics address of the original map target frame if success,
** the error if failed
*/
fn swap_out(&mut self, addr: VirtAddr) -> Result<PhysAddr, SwapError> {
let data = self.page_table.get_page_slice_mut(addr);
let entry = self.page_table.get_entry(addr);
@ -80,7 +144,13 @@ impl<T: PageTable, M: SwapManager, S: Swapper> SwapExt<T, M, S> {
entry.update();
Ok(target)
}
/// Map page of `addr` to `target`, then swap in the data.
/*
** @brief map the virtual address to a target physics address and then swap in page data
** @param addr: VirtAddr the virual address of beginning of page
** @param addr: PhysAddr the target physics address
** @retval Result<()), SwapError>
** the execute result, and the error if failed
*/
fn swap_in(&mut self, addr: VirtAddr, target: PhysAddr) -> Result<(), SwapError> {
let token = {
let entry = self.page_table.get_entry(addr);
@ -99,6 +169,16 @@ impl<T: PageTable, M: SwapManager, S: Swapper> SwapExt<T, M, S> {
self.swap_manager.push(addr);
Ok(())
}
/*
** @brief execute the swap process for page fault
** This function must be called whenever PageFault happens.
** @param addr: VirtAddr the virual address of the page fault
** @param alloc_frame: impl FnOnce() -> PhysAddr
** the page allocation function
** that allocate a page and returns physics address
** of beginning of the page
** @retval bool whether swap in happens.
*/
pub fn page_fault_handler(&mut self, addr: VirtAddr, alloc_frame: impl FnOnce() -> Option<PhysAddr>) -> bool {
if !self.page_table.get_entry(addr).swapped() {
return false;
@ -111,9 +191,13 @@ impl<T: PageTable, M: SwapManager, S: Swapper> SwapExt<T, M, S> {
}
pub enum SwapError {
/// attempt to swap out a page that is already swapped out
AlreadySwapped,
/// attempt to swap in a page that is already in the memory
NotSwapped,
/// there are no page to be swapped out
NoSwapped,
/// swap failed due to IO error while interact with device
IOError,
}

Loading…
Cancel
Save