Use CowExt for RV32

master
WangRunji 6 years ago
parent 7d856fe009
commit 96d8af8034

@ -103,31 +103,39 @@ impl<T: PageTable> DerefMut for CowExt<T> {
} }
/// A map contains reference count for shared frame /// A map contains reference count for shared frame
///
/// It will lazily construct the `BTreeMap`, to avoid heap alloc when heap is unavailable.
#[derive(Default)] #[derive(Default)]
struct FrameRcMap(BTreeMap<Frame, (u16, u16)>); struct FrameRcMap(Option<BTreeMap<Frame, (u16, u16)>>);
type Frame = usize; type Frame = usize;
impl FrameRcMap { impl FrameRcMap {
fn read_count(&mut self, frame: &Frame) -> u16 { fn read_count(&mut self, frame: &Frame) -> u16 {
self.0.get(frame).unwrap_or(&(0, 0)).0 self.map().get(frame).unwrap_or(&(0, 0)).0
} }
fn write_count(&mut self, frame: &Frame) -> u16 { fn write_count(&mut self, frame: &Frame) -> u16 {
self.0.get(frame).unwrap_or(&(0, 0)).1 self.map().get(frame).unwrap_or(&(0, 0)).1
} }
fn read_increase(&mut self, frame: &Frame) { fn read_increase(&mut self, frame: &Frame) {
let (r, w) = self.0.get(&frame).unwrap_or(&(0, 0)).clone(); let (r, w) = self.map().get(&frame).unwrap_or(&(0, 0)).clone();
self.0.insert(frame.clone(), (r + 1, w)); self.map().insert(frame.clone(), (r + 1, w));
} }
fn read_decrease(&mut self, frame: &Frame) { fn read_decrease(&mut self, frame: &Frame) {
self.0.get_mut(frame).unwrap().0 -= 1; self.map().get_mut(frame).unwrap().0 -= 1;
} }
fn write_increase(&mut self, frame: &Frame) { fn write_increase(&mut self, frame: &Frame) {
let (r, w) = self.0.get(&frame).unwrap_or(&(0, 0)).clone(); let (r, w) = self.map().get(&frame).unwrap_or(&(0, 0)).clone();
self.0.insert(frame.clone(), (r, w + 1)); self.map().insert(frame.clone(), (r, w + 1));
} }
fn write_decrease(&mut self, frame: &Frame) { fn write_decrease(&mut self, frame: &Frame) {
self.0.get_mut(frame).unwrap().1 -= 1; self.map().get_mut(frame).unwrap().1 -= 1;
}
fn map(&mut self) -> &mut BTreeMap<Frame, (u16, u16)> {
if self.0.is_none() {
self.0 = Some(BTreeMap::new());
}
self.0.as_mut().unwrap()
} }
} }

@ -4,7 +4,6 @@ use consts::MEMORY_OFFSET;
use spin::{Mutex, MutexGuard}; use spin::{Mutex, MutexGuard};
use super::HEAP_ALLOCATOR; use super::HEAP_ALLOCATOR;
use ucore_memory::{*, paging::PageTable}; use ucore_memory::{*, paging::PageTable};
#[cfg(target_arch = "x86_64")]
use ucore_memory::cow::CowExt; use ucore_memory::cow::CowExt;
pub use ucore_memory::memory_set::{MemoryArea, MemoryAttr, MemorySet as MemorySet_, Stack}; pub use ucore_memory::memory_set::{MemoryArea, MemoryAttr, MemorySet as MemorySet_, Stack};
@ -34,44 +33,24 @@ pub fn alloc_stack() -> Stack {
Stack { top, bottom } Stack { top, bottom }
} }
#[cfg(target_arch = "x86_64")]
lazy_static! { lazy_static! {
static ref ACTIVE_TABLE: Mutex<CowExt<ActivePageTable>> = Mutex::new(unsafe { static ref ACTIVE_TABLE: Mutex<CowExt<ActivePageTable>> = Mutex::new(unsafe {
CowExt::new(ActivePageTable::new()) CowExt::new(ActivePageTable::new())
}); });
} }
#[cfg(target_arch = "riscv")]
lazy_static! {
static ref ACTIVE_TABLE: Mutex<ActivePageTable> = Mutex::new(unsafe {
ActivePageTable::new()
});
}
/// The only way to get active page table /// The only way to get active page table
#[cfg(target_arch = "x86_64")]
pub fn active_table() -> MutexGuard<'static, CowExt<ActivePageTable>> { pub fn active_table() -> MutexGuard<'static, CowExt<ActivePageTable>> {
ACTIVE_TABLE.lock() ACTIVE_TABLE.lock()
} }
#[cfg(target_arch = "riscv")]
pub fn active_table() -> MutexGuard<'static, ActivePageTable> {
ACTIVE_TABLE.lock()
}
// Return true to continue, false to halt // Return true to continue, false to halt
#[cfg(target_arch = "x86_64")]
pub fn page_fault_handler(addr: usize) -> bool { pub fn page_fault_handler(addr: usize) -> bool {
// Handle copy on write // Handle copy on write
unsafe { ACTIVE_TABLE.force_unlock(); } unsafe { ACTIVE_TABLE.force_unlock(); }
active_table().page_fault_handler(addr, || alloc_frame().unwrap()) active_table().page_fault_handler(addr, || alloc_frame().unwrap())
} }
#[cfg(target_arch = "riscv")]
pub fn page_fault_handler(addr: usize) -> bool {
false
}
pub fn init_heap() { pub fn init_heap() {
use consts::{KERNEL_HEAP_OFFSET, KERNEL_HEAP_SIZE}; use consts::{KERNEL_HEAP_OFFSET, KERNEL_HEAP_SIZE};
unsafe { HEAP_ALLOCATOR.lock().init(KERNEL_HEAP_OFFSET, KERNEL_HEAP_SIZE); } unsafe { HEAP_ALLOCATOR.lock().init(KERNEL_HEAP_OFFSET, KERNEL_HEAP_SIZE); }

Loading…
Cancel
Save