refactor fork MemorySet, introduce clone_map to MemoryHandler

toolchain_update
WangRunji 6 years ago
parent 8024401bd2
commit 88e1055eed

@ -22,6 +22,20 @@ impl<T: FrameAllocator> MemoryHandler for ByFrame<T> {
pt.unmap(addr);
}
fn clone_map(
&self,
pt: &mut PageTable,
with: &Fn(&mut FnMut()),
addr: VirtAddr,
attr: &MemoryAttr,
) {
let data = Vec::from(pt.get_page_slice_mut(addr));
with(&mut || {
self.map(pt, addr, attr);
pt.get_page_slice_mut(addr).copy_from_slice(&data);
});
}
fn handle_page_fault(&self, _pt: &mut PageTable, _addr: VirtAddr) -> bool {
false
}

@ -16,13 +16,6 @@ impl<T: FrameAllocator> MemoryHandler for Delay<T> {
attr.apply(entry);
}
fn map_eager(&self, pt: &mut PageTable, addr: VirtAddr, attr: &MemoryAttr) {
let target = self.allocator.alloc().expect("failed to alloc frame");
let entry = pt.map(addr, target);
entry.set_present(true);
attr.apply(entry);
}
fn unmap(&self, pt: &mut PageTable, addr: VirtAddr) {
let entry = pt.get_entry(addr).expect("failed to get entry");
if entry.present() {
@ -34,6 +27,29 @@ impl<T: FrameAllocator> MemoryHandler for Delay<T> {
pt.unmap(addr);
}
fn clone_map(
&self,
pt: &mut PageTable,
with: &Fn(&mut FnMut()),
addr: VirtAddr,
attr: &MemoryAttr,
) {
let entry = pt.get_entry(addr).expect("failed to get entry");
if entry.present() {
// eager map and copy data
let data = Vec::from(pt.get_page_slice_mut(addr));
with(&mut || {
let target = self.allocator.alloc().expect("failed to alloc frame");
let entry = pt.map(addr, target);
attr.apply(entry);
pt.get_page_slice_mut(addr).copy_from_slice(&data);
});
} else {
// delay map
with(&mut || self.map(pt, addr, attr));
}
}
fn handle_page_fault(&self, pt: &mut PageTable, addr: VirtAddr) -> bool {
let entry = pt.get_entry(addr).expect("failed to get entry");
if entry.present() {

@ -20,6 +20,16 @@ impl MemoryHandler for Linear {
pt.unmap(addr);
}
fn clone_map(
&self,
pt: &mut PageTable,
with: &Fn(&mut FnMut()),
addr: VirtAddr,
attr: &MemoryAttr,
) {
with(&mut || self.map(pt, addr, attr));
}
fn handle_page_fault(&self, _pt: &mut PageTable, _addr: VirtAddr) -> bool {
false
}

@ -1,23 +1,28 @@
use super::*;
// here may be a interesting part for lab
pub trait MemoryHandler: Debug + 'static {
pub trait MemoryHandler: Debug + Send + Sync + 'static {
fn box_clone(&self) -> Box<MemoryHandler>;
/// Map `addr` in the page table
/// Should set page flags here instead of in page_fault_handler
fn map(&self, pt: &mut PageTable, addr: VirtAddr, attr: &MemoryAttr);
/// Map `addr` in the page table eagerly (i.e. no delay allocation)
/// Should set page flags here instead of in page_fault_handler
fn map_eager(&self, pt: &mut PageTable, addr: VirtAddr, attr: &MemoryAttr) {
// override this when pages are allocated lazily
self.map(pt, addr, attr);
}
/// Unmap `addr` in the page table
fn unmap(&self, pt: &mut PageTable, addr: VirtAddr);
/// Clone map `addr` from one page table to another.
/// `pt` is the current active page table.
/// `with` is the `InactivePageTable::with` function.
/// Call `with` then use `pt` as target page table inside.
fn clone_map(
&self,
pt: &mut PageTable,
with: &Fn(&mut FnMut()),
addr: VirtAddr,
attr: &MemoryAttr,
);
/// Handle page fault on `addr`
/// Return true if success, false if error
fn handle_page_fault(&self, pt: &mut PageTable, addr: VirtAddr) -> bool;
@ -29,7 +34,7 @@ impl Clone for Box<MemoryHandler> {
}
}
pub trait FrameAllocator: Debug + Clone + 'static {
pub trait FrameAllocator: Debug + Clone + Send + Sync + 'static {
fn alloc(&self) -> Option<PhysAddr>;
fn dealloc(&self, target: PhysAddr);
}

@ -23,8 +23,6 @@ pub struct MemoryArea {
name: &'static str,
}
unsafe impl Send for MemoryArea {}
impl MemoryArea {
/*
** @brief get slice of the content in the memory area
@ -87,31 +85,13 @@ impl MemoryArea {
let p3 = Page::of_addr(end_addr - 1) + 1;
!(p1 <= p2 || p0 >= p3)
}
/*
** @brief map the memory area to the physice address in a page table
** @param pt: &mut T::Active the page table to use
** @retval none
*/
/// Map all pages in the area to page table `pt`
fn map(&self, pt: &mut PageTable) {
for page in Page::range_of(self.start_addr, self.end_addr) {
self.handler.map(pt, page.start_address(), &self.attr);
}
}
/*
** @brief map the memory area to the physice address in a page table eagerly
** @param pt: &mut T::Active the page table to use
** @retval none
*/
fn map_eager(&self, pt: &mut PageTable) {
for page in Page::range_of(self.start_addr, self.end_addr) {
self.handler.map_eager(pt, page.start_address(), &self.attr);
}
}
/*
** @brief unmap the memory area from the physice address in a page table
** @param pt: &mut T::Active the page table to use
** @retval none
*/
/// Unmap all pages in the area from page table `pt`
fn unmap(&self, pt: &mut PageTable) {
for page in Page::range_of(self.start_addr, self.end_addr) {
self.handler.unmap(pt, page.start_address());
@ -509,20 +489,29 @@ impl<T: InactivePageTable> MemorySet<T> {
None => false,
}
}
}
impl<T: InactivePageTable> Clone for MemorySet<T> {
fn clone(&self) -> Self {
let mut page_table = T::new();
pub fn clone(&mut self) -> Self {
let new_page_table = T::new();
let Self {
ref mut page_table,
ref areas,
..
} = self;
page_table.edit(|pt| {
// without CoW, we should allocate the pages eagerly
for area in self.areas.iter() {
area.map_eager(pt);
for area in areas.iter() {
for page in Page::range_of(area.start_addr, area.end_addr) {
area.handler.clone_map(
pt,
&|f| unsafe { new_page_table.with(f) },
page.start_address(),
&area.attr,
);
}
}
});
MemorySet {
areas: self.areas.clone(),
page_table,
areas: areas.clone(),
page_table: new_page_table,
}
}
}

@ -14,7 +14,7 @@ use xmas_elf::{
use crate::arch::interrupt::{Context, TrapFrame};
use crate::fs::{FileHandle, FileLike, INodeExt, OpenOptions, FOLLOW_MAX_DEPTH};
use crate::memory::{ByFrame, GlobalFrameAlloc, KernelStack, MemoryAttr, MemorySet};
use crate::memory::{ByFrame, Delay, GlobalFrameAlloc, KernelStack, MemoryAttr, MemorySet};
use crate::sync::{Condvar, SpinNoIrqLock as Mutex};
use super::abi::{self, ProcInitInfo};
@ -181,6 +181,14 @@ impl Thread {
let ustack_top = USER_STACK_OFFSET + USER_STACK_SIZE;
vm.push(
ustack_buttom,
ustack_top - PAGE_SIZE,
MemoryAttr::default().user(),
Delay::new(GlobalFrameAlloc),
"user_stack_delay",
);
// We are going to write init info now. So map the last page eagerly.
vm.push(
ustack_top - PAGE_SIZE,
ustack_top,
MemoryAttr::default().user(),
ByFrame::new(GlobalFrameAlloc),
@ -284,42 +292,30 @@ impl Thread {
/// Fork a new process from current one
pub fn fork(&self, tf: &TrapFrame) -> Box<Thread> {
// Clone memory set, make a new page table
let proc = self.proc.lock();
let vm = proc.vm.clone();
let files = proc.files.clone();
let cwd = proc.cwd.clone();
drop(proc);
let parent = Some(self.proc.clone());
debug!("fork: finish clone MemorySet");
// MMU: copy data to the new space
// NoMMU: coping data has been done in `vm.clone()`
for area in vm.iter() {
let data = Vec::<u8>::from(unsafe { area.as_slice() });
unsafe { vm.with(|| area.as_slice_mut().copy_from_slice(data.as_slice())) }
}
debug!("fork: temporary copy data!");
let mut proc = self.proc.lock();
let kstack = KernelStack::new();
let vm = proc.vm.clone();
let context = unsafe { Context::new_fork(tf, kstack.top(), vm.token()) };
let new_proc = Process {
vm,
files: proc.files.clone(),
cwd: proc.cwd.clone(),
futexes: BTreeMap::default(),
pid: Pid(0),
parent: Some(self.proc.clone()),
children: Vec::new(),
threads: Vec::new(),
child_exit: Arc::new(Condvar::new()),
child_exit_code: BTreeMap::new(),
}.add_to_table();
// link to parent
proc.children.push(Arc::downgrade(&new_proc));
Box::new(Thread {
context: unsafe { Context::new_fork(tf, kstack.top(), vm.token()) },
context,
kstack,
clear_child_tid: 0,
proc: Process {
vm,
files,
cwd,
futexes: BTreeMap::default(),
pid: Pid(0),
parent,
children: Vec::new(),
threads: Vec::new(),
child_exit: Arc::new(Condvar::new()),
child_exit_code: BTreeMap::new(),
}
.add_to_table(),
proc: new_proc,
})
}
@ -360,11 +356,6 @@ impl Process {
let self_ref = Arc::new(Mutex::new(self));
process_table.insert(pid, Arc::downgrade(&self_ref));
// link to parent
if let Some(parent) = &self_ref.lock().parent {
parent.lock().children.push(Arc::downgrade(&self_ref));
}
self_ref
}
fn get_free_fd(&self) -> usize {

Loading…
Cancel
Save