parent
d2d9affddf
commit
aa109498f1
@ -1,89 +0,0 @@
|
||||
use alloc::vec::Vec;
|
||||
|
||||
type Addr = usize;
|
||||
|
||||
/// 一片连续内存空间,有相同的访问权限
|
||||
/// 对应ucore中 `vma_struct`
|
||||
#[derive(Debug, Eq, PartialEq)]
|
||||
pub struct MemoryArea {
|
||||
pub start_addr: Addr,
|
||||
pub end_addr: Addr,
|
||||
pub flags: u32,
|
||||
pub name: &'static str,
|
||||
}
|
||||
|
||||
impl MemoryArea {
|
||||
pub fn contains(&self, addr: Addr) -> bool {
|
||||
addr >= self.start_addr && addr < self.end_addr
|
||||
}
|
||||
fn is_overlap_with(&self, other: &MemoryArea) -> bool {
|
||||
!(self.end_addr <= other.start_addr || self.start_addr >= other.end_addr)
|
||||
}
|
||||
}
|
||||
|
||||
/// 内存空间集合,包含若干段连续空间
|
||||
/// 对应ucore中 `mm_struct`
|
||||
#[derive(Debug)]
|
||||
pub struct MemorySet {
|
||||
areas: Vec<MemoryArea>,
|
||||
}
|
||||
|
||||
impl MemorySet {
|
||||
pub fn new() -> Self {
|
||||
MemorySet { areas: Vec::<MemoryArea>::new() }
|
||||
}
|
||||
pub fn find_area(&self, addr: Addr) -> Option<&MemoryArea> {
|
||||
self.areas.iter().find(|area| area.contains(addr))
|
||||
}
|
||||
pub fn push(&mut self, area: MemoryArea) {
|
||||
assert!(area.start_addr <= area.end_addr, "invalid memory area");
|
||||
if self.areas.iter()
|
||||
.find(|other| area.is_overlap_with(other))
|
||||
.is_some() {
|
||||
panic!("memory area overlap");
|
||||
}
|
||||
self.areas.push(area);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn push_and_find() {
|
||||
let mut ms = MemorySet::new();
|
||||
ms.push(MemoryArea {
|
||||
start_addr: 0x0,
|
||||
end_addr: 0x8,
|
||||
flags: 0x0,
|
||||
name: "code",
|
||||
});
|
||||
ms.push(MemoryArea {
|
||||
start_addr: 0x8,
|
||||
end_addr: 0x10,
|
||||
flags: 0x1,
|
||||
name: "data",
|
||||
});
|
||||
assert_eq!(ms.find_area(0x6).unwrap().name, "code");
|
||||
assert_eq!(ms.find_area(0x11), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn push_overlap() {
|
||||
let mut ms = MemorySet::new();
|
||||
ms.push(MemoryArea {
|
||||
start_addr: 0x0,
|
||||
end_addr: 0x8,
|
||||
flags: 0x0,
|
||||
name: "code",
|
||||
});
|
||||
ms.push(MemoryArea {
|
||||
start_addr: 0x4,
|
||||
end_addr: 0x10,
|
||||
flags: 0x1,
|
||||
name: "data",
|
||||
});
|
||||
}
|
||||
}
|
@ -0,0 +1,146 @@
|
||||
use alloc::boxed::Box;
|
||||
use super::*;
|
||||
|
||||
const PAGE_COUNT: usize = 16;
|
||||
const PAGE_SIZE: usize = 4096;
|
||||
|
||||
pub struct MockPageTable {
|
||||
entries: [MockEntry; PAGE_COUNT],
|
||||
data: [u8; PAGE_SIZE * PAGE_COUNT],
|
||||
page_fault_handler: Option<PageFaultHandler>,
|
||||
}
|
||||
|
||||
#[derive(Default, Copy, Clone)]
|
||||
pub struct MockEntry {
|
||||
target: PhysAddr,
|
||||
present: bool,
|
||||
writable: bool,
|
||||
accessed: bool,
|
||||
dirty: bool,
|
||||
}
|
||||
|
||||
impl Entry for MockEntry {
|
||||
fn accessed(&self) -> bool { self.accessed }
|
||||
fn dirty(&self) -> bool { self.dirty }
|
||||
fn writable(&self) -> bool { self.writable }
|
||||
fn present(&self) -> bool { self.present }
|
||||
fn clear_accessed(&mut self) { self.accessed = false; }
|
||||
fn clear_dirty(&mut self) { self.dirty = false; }
|
||||
fn set_writable(&mut self, value: bool) { self.writable = value; }
|
||||
fn set_present(&mut self, value: bool) { self.present = value; }
|
||||
fn target(&self) -> usize { self.target }
|
||||
}
|
||||
|
||||
type PageFaultHandler = Box<FnMut(&mut MockPageTable, VirtAddr)>;
|
||||
|
||||
impl PageTable for MockPageTable {
|
||||
type Entry = MockEntry;
|
||||
|
||||
/// Map a page, return false if no more space
|
||||
fn map(&mut self, addr: VirtAddr, target: PhysAddr) -> &mut Self::Entry {
|
||||
let entry = &mut self.entries[addr / PAGE_SIZE];
|
||||
assert!(!entry.present);
|
||||
entry.present = true;
|
||||
entry.target = target & !(PAGE_SIZE - 1);
|
||||
entry
|
||||
}
|
||||
fn unmap(&mut self, addr: VirtAddr) {
|
||||
let entry = &mut self.entries[addr / PAGE_SIZE];
|
||||
assert!(entry.present);
|
||||
entry.present = false;
|
||||
}
|
||||
|
||||
fn get_entry(&mut self, addr: VirtAddr) -> &mut <Self as PageTable>::Entry {
|
||||
&mut self.entries[addr / PAGE_SIZE]
|
||||
}
|
||||
}
|
||||
|
||||
impl MockPageTable {
|
||||
pub fn new() -> Self {
|
||||
use core::mem::uninitialized;
|
||||
MockPageTable {
|
||||
entries: [MockEntry::default(); PAGE_COUNT],
|
||||
data: unsafe { uninitialized() },
|
||||
page_fault_handler: None,
|
||||
}
|
||||
}
|
||||
pub fn set_handler(&mut self, page_fault_handler: PageFaultHandler) {
|
||||
self.page_fault_handler = Some(page_fault_handler);
|
||||
}
|
||||
fn trigger_page_fault(&mut self, addr: VirtAddr) {
|
||||
// In order to call the handler with &mut self as an argument
|
||||
// We have to first take the handler out of self, finally put it back
|
||||
let mut handler = self.page_fault_handler.take().unwrap();
|
||||
handler(self, addr);
|
||||
self.page_fault_handler = Some(handler);
|
||||
}
|
||||
fn translate(&self, addr: VirtAddr) -> PhysAddr {
|
||||
let entry = &self.entries[addr / PAGE_SIZE];
|
||||
assert!(entry.present);
|
||||
(entry.target & !(PAGE_SIZE - 1)) | (addr & (PAGE_SIZE - 1))
|
||||
}
|
||||
/// Read memory, mark accessed, trigger page fault if not present
|
||||
pub fn read(&mut self, addr: VirtAddr) -> u8 {
|
||||
while !self.entries[addr / PAGE_SIZE].present {
|
||||
self.trigger_page_fault(addr);
|
||||
}
|
||||
self.entries[addr / PAGE_SIZE].accessed = true;
|
||||
self.data[self.translate(addr)]
|
||||
}
|
||||
/// Write memory, mark accessed and dirty, trigger page fault if not present
|
||||
pub fn write(&mut self, addr: VirtAddr, data: u8) {
|
||||
while !(self.entries[addr / PAGE_SIZE].present && self.entries[addr / PAGE_SIZE].writable) {
|
||||
self.trigger_page_fault(addr);
|
||||
}
|
||||
self.entries[addr / PAGE_SIZE].accessed = true;
|
||||
self.entries[addr / PAGE_SIZE].dirty = true;
|
||||
self.data[self.translate(addr)] = data;
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use alloc::arc::Arc;
|
||||
use core::cell::RefCell;
|
||||
|
||||
#[test]
|
||||
fn test() {
|
||||
let page_fault_count = Arc::new(RefCell::new(0usize));
|
||||
|
||||
let mut pt = MockPageTable::new();
|
||||
pt.set_handler(Box::new({
|
||||
let page_fault_count1 = page_fault_count.clone();
|
||||
move |pt: &mut MockPageTable, addr: VirtAddr| {
|
||||
*page_fault_count1.borrow_mut() += 1;
|
||||
pt.map(addr, addr).set_writable(true);
|
||||
}
|
||||
}));
|
||||
|
||||
pt.map(0, 0);
|
||||
pt.read(0);
|
||||
assert_eq!(*page_fault_count.borrow(), 0);
|
||||
assert!(pt.get_entry(0).accessed());
|
||||
assert!(!pt.get_entry(0).dirty());
|
||||
|
||||
pt.get_entry(0).clear_accessed();
|
||||
assert!(!pt.get_entry(0).accessed());
|
||||
|
||||
pt.read(1);
|
||||
assert_eq!(*page_fault_count.borrow(), 0);
|
||||
assert!(pt.get_entry(0).accessed());
|
||||
|
||||
pt.write(0x1000, 0xff);
|
||||
assert_eq!(*page_fault_count.borrow(), 1);
|
||||
assert!(pt.get_entry(0x1000).accessed());
|
||||
assert!(pt.get_entry(0x1000).dirty());
|
||||
assert_eq!(pt.read(0x1000), 0xff);
|
||||
|
||||
pt.get_entry(0x1000).clear_dirty();
|
||||
assert!(!pt.get_entry(0x1000).dirty());
|
||||
|
||||
pt.unmap(0);
|
||||
pt.read(0);
|
||||
assert_eq!(*page_fault_count.borrow(), 2);
|
||||
}
|
||||
}
|
@ -1,4 +1,28 @@
|
||||
pub use self::page_table::*;
|
||||
use super::*;
|
||||
pub use self::mock_page_table::MockPageTable;
|
||||
|
||||
mod page_table;
|
||||
mod mock_page_table;
|
||||
|
||||
pub trait PageTable {
|
||||
type Entry: Entry;
|
||||
fn map(&mut self, addr: VirtAddr, target: PhysAddr) -> &mut Self::Entry;
|
||||
fn unmap(&mut self, addr: VirtAddr);
|
||||
fn get_entry(&mut self, addr: VirtAddr) -> &mut Self::Entry;
|
||||
}
|
||||
|
||||
pub trait Entry {
|
||||
fn accessed(&self) -> bool;
|
||||
// Will be set when accessed
|
||||
fn dirty(&self) -> bool;
|
||||
// Will be set when written
|
||||
fn writable(&self) -> bool;
|
||||
// Will PageFault when try to write page where writable=0
|
||||
fn present(&self) -> bool; // Will PageFault when try to access page where present=0
|
||||
|
||||
fn clear_accessed(&mut self);
|
||||
fn clear_dirty(&mut self);
|
||||
fn set_writable(&mut self, value: bool);
|
||||
fn set_present(&mut self, value: bool);
|
||||
|
||||
fn target(&self) -> PhysAddr;
|
||||
}
|
||||
|
@ -1,130 +0,0 @@
|
||||
use alloc::boxed::Box;
|
||||
use super::*;
|
||||
|
||||
const PAGE_COUNT: usize = 16;
|
||||
const PAGE_SIZE: usize = 4096;
|
||||
|
||||
pub struct MockPageTable {
|
||||
mapped: [bool; PAGE_COUNT],
|
||||
accessed: [bool; PAGE_COUNT],
|
||||
dirty: [bool; PAGE_COUNT],
|
||||
data: [u8; PAGE_SIZE * PAGE_COUNT],
|
||||
page_fault_handler: Option<PageFaultHandler>,
|
||||
capacity: usize,
|
||||
}
|
||||
|
||||
type PageFaultHandler = Box<FnMut(&mut MockPageTable, VirtAddr)>;
|
||||
|
||||
impl PageTable for MockPageTable {
|
||||
fn accessed(&self, addr: VirtAddr) -> bool {
|
||||
self.accessed[addr / PAGE_SIZE]
|
||||
}
|
||||
fn dirty(&self, addr: VirtAddr) -> bool {
|
||||
self.dirty[addr / PAGE_SIZE]
|
||||
}
|
||||
fn clear_accessed(&mut self, addr: usize) {
|
||||
self.accessed[addr / PAGE_SIZE] = false;
|
||||
}
|
||||
fn clear_dirty(&mut self, addr: usize) {
|
||||
self.dirty[addr / PAGE_SIZE] = false;
|
||||
}
|
||||
/// Map a page, return false if no more space
|
||||
fn map(&mut self, addr: VirtAddr) -> bool {
|
||||
if self.mapped.iter().filter(|&&b| b).count() == self.capacity {
|
||||
return false;
|
||||
}
|
||||
self.mapped[addr / PAGE_SIZE] = true;
|
||||
true
|
||||
}
|
||||
fn unmap(&mut self, addr: VirtAddr) {
|
||||
self.mapped[addr / PAGE_SIZE] = false;
|
||||
}
|
||||
}
|
||||
|
||||
impl MockPageTable {
|
||||
pub fn new(capacity: usize) -> Self {
|
||||
use core::mem::uninitialized;
|
||||
MockPageTable {
|
||||
mapped: [false; PAGE_COUNT],
|
||||
accessed: [false; PAGE_COUNT],
|
||||
dirty: [false; PAGE_COUNT],
|
||||
data: unsafe{ uninitialized() },
|
||||
page_fault_handler: None,
|
||||
capacity,
|
||||
}
|
||||
}
|
||||
pub fn set_handler(&mut self, page_fault_handler: PageFaultHandler) {
|
||||
self.page_fault_handler = Some(page_fault_handler);
|
||||
}
|
||||
fn trigger_page_fault_if_not_present(&mut self, addr: VirtAddr) {
|
||||
let page_id = addr / PAGE_SIZE;
|
||||
while !self.mapped[page_id] {
|
||||
let self_mut = unsafe{ &mut *(self as *mut Self) };
|
||||
(self.page_fault_handler.as_mut().unwrap())(self_mut, addr);
|
||||
}
|
||||
}
|
||||
/// Read memory, mark accessed, trigger page fault if not present
|
||||
pub fn read(&mut self, addr: VirtAddr) -> u8 {
|
||||
let page_id = addr / PAGE_SIZE;
|
||||
self.trigger_page_fault_if_not_present(addr);
|
||||
self.accessed[page_id] = true;
|
||||
self.data[addr]
|
||||
}
|
||||
/// Write memory, mark accessed and dirty, trigger page fault if not present
|
||||
pub fn write(&mut self, addr: VirtAddr, data: u8) {
|
||||
let page_id = addr / PAGE_SIZE;
|
||||
self.trigger_page_fault_if_not_present(addr);
|
||||
self.accessed[page_id] = true;
|
||||
self.dirty[page_id] = true;
|
||||
self.data[addr] = data;
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use alloc::arc::Arc;
|
||||
use core::cell::RefCell;
|
||||
|
||||
#[test]
|
||||
fn test() {
|
||||
let page_fault_count = Arc::new(RefCell::new(0usize));
|
||||
|
||||
let mut pt = MockPageTable::new(2);
|
||||
pt.set_handler(Box::new({
|
||||
let page_fault_count1 = page_fault_count.clone();
|
||||
move |pt: &mut MockPageTable, addr: VirtAddr| {
|
||||
*page_fault_count1.borrow_mut() += 1;
|
||||
pt.map(addr);
|
||||
}
|
||||
}));
|
||||
|
||||
pt.map(0);
|
||||
pt.read(0);
|
||||
assert_eq!(*page_fault_count.borrow(), 0);
|
||||
assert!(pt.accessed(0));
|
||||
assert!(!pt.dirty(0));
|
||||
|
||||
pt.clear_accessed(0);
|
||||
assert!(!pt.accessed(0));
|
||||
|
||||
pt.read(1);
|
||||
assert_eq!(*page_fault_count.borrow(), 0);
|
||||
assert!(pt.accessed(0));
|
||||
|
||||
pt.write(0x1000, 0xff);
|
||||
assert_eq!(*page_fault_count.borrow(), 1);
|
||||
assert!(pt.accessed(0x1000));
|
||||
assert!(pt.dirty(0x1000));
|
||||
assert_eq!(pt.read(0x1000), 0xff);
|
||||
|
||||
pt.clear_dirty(0x1000);
|
||||
assert!(!pt.dirty(0x1000));
|
||||
|
||||
assert_eq!(pt.map(0x2000), false);
|
||||
|
||||
pt.unmap(0);
|
||||
pt.read(0);
|
||||
assert_eq!(*page_fault_count.borrow(), 2);
|
||||
}
|
||||
}
|
@ -1,13 +0,0 @@
|
||||
use super::*;
|
||||
pub use self::mock_page_table::MockPageTable;
|
||||
|
||||
mod mock_page_table;
|
||||
|
||||
pub trait PageTable {
|
||||
fn accessed(&self, addr: VirtAddr) -> bool;
|
||||
fn dirty(&self, addr: VirtAddr) -> bool;
|
||||
fn clear_accessed(&mut self, addr: VirtAddr);
|
||||
fn clear_dirty(&mut self, addr: VirtAddr);
|
||||
fn map(&mut self, addr: VirtAddr) -> bool;
|
||||
fn unmap(&mut self, addr: VirtAddr);
|
||||
}
|
@ -1,33 +0,0 @@
|
||||
use super::*;
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord)]
|
||||
pub struct Frame {
|
||||
number: usize,
|
||||
}
|
||||
|
||||
impl Frame {
|
||||
pub fn containing_address(address: PhysAddr) -> Frame {
|
||||
Frame{ number: address.get() as usize / PAGE_SIZE }
|
||||
}
|
||||
//TODO: Set private
|
||||
pub fn start_address(&self) -> PhysAddr {
|
||||
PhysAddr::new((self.number * PAGE_SIZE) as u64)
|
||||
}
|
||||
|
||||
pub fn clone(&self) -> Frame {
|
||||
Frame { number: self.number }
|
||||
}
|
||||
//TODO: Set private
|
||||
// pub fn range_inclusive(start: Frame, end: Frame) -> FrameIter {
|
||||
// FrameIter {
|
||||
// start: start,
|
||||
// end: end,
|
||||
// }
|
||||
// }
|
||||
}
|
||||
|
||||
impl Drop for Frame {
|
||||
fn drop(&mut self) {
|
||||
panic!("frame must be deallocate");
|
||||
}
|
||||
}
|
@ -1,11 +0,0 @@
|
||||
use super::*;
|
||||
|
||||
pub trait FrameAllocator {
|
||||
fn allocate_frame(&mut self) -> Option<Frame>;
|
||||
fn deallocate_frame(&mut self, frame: Frame);
|
||||
}
|
||||
|
||||
pub trait MemoryArea {
|
||||
fn begin(&self) -> PhysAddr;
|
||||
fn end(&self) -> PhysAddr;
|
||||
}
|
@ -1,9 +0,0 @@
|
||||
pub use self::physaddr::PhysAddr;
|
||||
pub use self::frame::Frame;
|
||||
pub use self::frame_allocator::FrameAllocator;
|
||||
|
||||
use super::*;
|
||||
|
||||
mod frame;
|
||||
mod physaddr;
|
||||
mod frame_allocator;
|
@ -1,50 +0,0 @@
|
||||
use core::fmt;
|
||||
|
||||
/// Represents a physical memory address
|
||||
#[derive(Copy, Clone, Eq, Ord, PartialEq, PartialOrd)]
|
||||
pub struct PhysAddr(u64);
|
||||
|
||||
impl PhysAddr {
|
||||
pub fn new(addr: u64) -> PhysAddr {
|
||||
PhysAddr(addr)
|
||||
}
|
||||
pub fn get(&self) -> u64 {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for PhysAddr {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "{:#x}", self.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Binary for PhysAddr {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
self.0.fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for PhysAddr {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
self.0.fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::LowerHex for PhysAddr {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
self.0.fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Octal for PhysAddr {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
self.0.fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::UpperHex for PhysAddr {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
self.0.fmt(f)
|
||||
}
|
||||
}
|
Loading…
Reference in new issue