Fix bug and enable swap in/out and frame delayed allocating.

master
lcy1996 6 years ago
parent b1425a53f9
commit c8ea2cb0cf

@ -91,27 +91,6 @@ impl<T: PageTable> CowExt<T> {
** @retval bool whether copy-on-write happens.
*/
pub fn page_fault_handler(&mut self, addr: VirtAddr, alloc_frame: impl FnOnce() -> PhysAddr) -> bool {
// handle page delayed allocating
{
info!("try handling delayed frame allocator");
let need_alloc ={
let entry = self.page_table.get_entry(addr);
//info!("got entry!");
!entry.present() && !entry.swapped()
};
if need_alloc{
info!("need_alloc!");
let frame = alloc_frame();
let entry = self.page_table.get_entry(addr);
entry.set_target(frame);
//let new_entry = self.page_table.map(addr, frame);
entry.set_present(true);
entry.update();
//area.get_flags().apply(new_entry); this instruction may be used when hide attr is used
info!("allocated successfully");
return true;
}
}
// below is not being used now(no shared pages)
{
let entry = self.page_table.get_entry(addr);

@ -109,12 +109,27 @@ impl<T: PageTable, M: SwapManager, S: Swapper> SwapExt<T, M, S> {
** @param pt: *mut T2 the raw pointer for the target page's inactive page table
** @param addr: VirtAddr the target page's virtual address
*/
pub fn set_swappable<T2: InactivePageTable>(&mut self, pt: *mut T2, addr: VirtAddr){
pub unsafe fn set_swappable<T2: InactivePageTable>(&mut self, pt: *mut T2, addr: VirtAddr){
let Self {ref mut page_table, ref mut swap_manager, ref mut swapper} = self;
let targetpt = &mut *(pt);
let pttoken = {
debug!("the target page table token is {:x?}", targetpt.token());
targetpt.token()
};
targetpt.with(||{
let entry = page_table.get_entry(addr);
if entry.present() {
let frame = Frame::new(pt as usize, addr, pttoken);
swap_manager.push(frame);
}
});
/*
let token = unsafe{
(*pt).token()
};
let frame = Frame::new(pt as usize, addr, token);
self.swap_manager.push(frame);
*/
}
/*
@ -132,11 +147,14 @@ impl<T: PageTable, M: SwapManager, S: Swapper> SwapExt<T, M, S> {
targetpt.token()
};
debug!("try to change pagetable");
let targetaddr = targetpt.with(||{
targetpt.with(||{
let token = {
let entry = page_table.get_entry(addr);
if !entry.swapped() {
swap_manager.remove(pttoken, addr);
if entry.present(){
// if the addr isn't indicating a swapped page, panic occured here
swap_manager.remove(pttoken, addr);
}
return;
}
let token = entry.target() / PAGE_SIZE;
@ -250,17 +268,46 @@ impl<T: PageTable, M: SwapManager, S: Swapper> SwapExt<T, M, S> {
Ok(())
}
/*
** @brief execute the swap process for page fault
** @brief execute the frame delayed allocate and swap process for page fault
** This function must be called whenever PageFault happens.
** @param pt: *mut T2 the raw pointer for the target page's inactive page table (exactly the current page table)
** @param pt: *mut T2 the raw pointer for the target page's inactive page table (exactly the current page table)
** @param addr: VirtAddr the virual address of the page fault
** @param swapin: bool whether to set the page swappable if delayed allocate a frame for a page
** @param alloc_frame: impl FnOnce() -> PhysAddr
** the page allocation function
** that allocate a page and returns physics address
** of beginning of the page
** @retval bool whether swap in happens.
*/
pub fn page_fault_handler<T2: InactivePageTable>(&mut self, pt: *mut T2, addr: VirtAddr, alloc_frame: impl FnOnce() -> PhysAddr) -> bool {
pub fn page_fault_handler<T2: InactivePageTable>(&mut self, pt: *mut T2, addr: VirtAddr, swapin: bool, alloc_frame: impl FnOnce() -> PhysAddr) -> bool {
// handle page delayed allocating
{
info!("try handling delayed frame allocator");
let need_alloc ={
let entry = self.page_table.get_entry(addr);
//info!("got entry!");
!entry.present() && !entry.swapped()
};
if need_alloc{
info!("need_alloc!");
let frame = alloc_frame();
{
let entry = self.page_table.get_entry(addr);
entry.set_target(frame);
//let new_entry = self.page_table.map(addr, frame);
entry.set_present(true);
entry.update();
}
if(swapin){
unsafe {
self.set_swappable(pt, addr);
}
}
//area.get_flags().apply(new_entry); this instruction may be used when hide attr is used
info!("allocated successfully");
return true;
}
}
// now we didn't attach the cow so the present will be false when swapped(), to enable the cow some changes will be needed
if !self.page_table.get_entry(addr).swapped() {
return false;

@ -262,6 +262,10 @@ impl<T: Context, S: Scheduler> Processor_<T, S> {
&mut self.get_mut(id).context
}
pub fn get_context_mut(&mut self, pid: Pid) -> &mut T{
&mut self.get_mut(pid).context
}
/*
** @brief get pid of current process
** @param none

@ -1,5 +1,7 @@
use super::riscv::register::*;
pub use self::context::*;
use ::memory::{InactivePageTable0, memory_set_record};
use memory::MemorySet;
#[path = "context.rs"]
mod context;
@ -67,7 +69,6 @@ pub unsafe fn restore(flags: usize) {
pub extern fn rust_trap(tf: &mut TrapFrame) {
use super::riscv::register::scause::{Trap, Interrupt as I, Exception as E};
trace!("Interrupt: {:?}", tf.scause.cause());
// page should be processed here but not now
match tf.scause.cause() {
Trap::Interrupt(I::SupervisorTimer) => timer(),
Trap::Exception(E::IllegalInstruction) => illegal_inst(tf),

@ -156,6 +156,10 @@ impl ActivePageTable {
// Unmap the page
self.unmap(0xcafebabe);
}
pub fn token() -> usize {
satp::read().frame().number() | (1 << 31)
}
}
/// implementation for the Entry trait in /crate/memory/src/paging/mod.rs
impl Entry for PageEntry {

@ -4,6 +4,10 @@ use alloc::boxed::Box;
use arch::driver::ide;
use spin::Mutex;
use ::memory::{InactivePageTable0, memory_set_record};
use memory::MemorySet;
use process::context::memory_set_map_swappable;
// Hard link user program
#[cfg(target_arch = "riscv32")]
global_asm!(r#"
@ -48,7 +52,11 @@ pub fn shell() {
if let Ok(file) = root.borrow().lookup(name.as_str()) {
use process::*;
let len = file.borrow().read_at(0, &mut *buf).unwrap();
let pid = processor().add(Context::new_user(&buf[..len]));
let mut new_context = Context::new_user(&buf[..len]);
//memory_set_record().push_back((new_context.get_memory_set_mut() as *mut MemorySet) as usize);
let pid = processor().add(new_context);
// map swappable for the new user process's memroy areas (only for the page which has been allocated)
memory_set_map_swappable(processor().get_context_mut(pid).get_memory_set_mut());
processor().current_wait_for(pid);
} else {
println!("Program not exist");

@ -5,11 +5,12 @@ use consts::MEMORY_OFFSET;
use super::HEAP_ALLOCATOR;
use ucore_memory::{*, paging::PageTable};
use ucore_memory::cow::CowExt;
pub use ucore_memory::memory_set::{MemoryArea, MemoryAttr, MemorySet as MemorySet_, Stack};
pub use ucore_memory::memory_set::{MemoryArea, MemoryAttr, MemorySet as MemorySet_, Stack, InactivePageTable};
use ucore_memory::swap::*;
use process::{processor, PROCESSOR};
use sync::{SpinNoIrqLock, SpinNoIrq, MutexGuard};
use ucore_memory::paging::Entry;
use alloc::collections::VecDeque;
pub type MemorySet = MemorySet_<InactivePageTable0>;
@ -24,7 +25,15 @@ pub type FrameAlloc = BitAlloc4K;
lazy_static! {
pub static ref FRAME_ALLOCATOR: SpinNoIrqLock<FrameAlloc> = SpinNoIrqLock::new(FrameAlloc::default());
}
// record the user memory set for pagefault function (swap in/out and frame delayed allocate) temporarily when page fault in new_user() or fork() function
// after the process is set we can use use processor() to get the inactive page table
lazy_static! {
pub static ref MEMORY_SET_RECORD: SpinNoIrqLock<VecDeque<usize>> = SpinNoIrqLock::new(VecDeque::default());
}
pub fn memory_set_record() -> MutexGuard<'static, VecDeque<usize>, SpinNoIrq> {
MEMORY_SET_RECORD.lock()
}
lazy_static! {
static ref ACTIVE_TABLE: SpinNoIrqLock<CowExt<ActivePageTable>> = SpinNoIrqLock::new(unsafe {
@ -86,49 +95,44 @@ pub fn alloc_stack() -> Stack {
* Return true to continue, false to halt
*/
pub fn page_fault_handler(addr: usize) -> bool {
// since some page fault for frame delayed allocating may occur in the building of the process, we can't use processor() to get the real memset for it
// unless we write a mmset manager for all the memory set, which is a little bit hard.
/*
info!("come in to page fault handler.");
{
info!("1");
unsafe{ PROCESSOR.try().unwrap().force_unlock();}
let mut temp_proc = processor();
info!("2");
let mmset = temp_proc.current_context_mut().get_memory_set_mut();
let target_area = mmset.find_area(addr);
// check whether the virtual address is valid
info!("checked the valid virtual address");
match target_area {
None => {
info!("invalid virtual address: {:x?}", addr);
return false;
},
Some(area)=>{
// Handle delayed frame allocate and copy on write (the second not being used now)
unsafe { ACTIVE_TABLE.force_unlock(); }
if active_table().page_fault_handler(addr, || alloc_frame().unwrap()) {
return true;
}
},
}
}
*/
info!("start handling swap in/out page fault");
unsafe { ACTIVE_TABLE_SWAP.force_unlock(); }
unsafe {PROCESSOR.try().unwrap().force_unlock();}
// Handle delayed frame allocate and copy on write (the second not being used now)
let mut temp_proc = processor();
debug!("active page table token in pg fault is {:x?}", ActivePageTable::token());
let id = memory_set_record().iter()
.position(|x| unsafe{(*(x.clone() as *mut MemorySet)).get_page_table_mut().token() == ActivePageTable::token()});
let mut mmsets = memory_set_record();
match id {
Some(targetid) => {
debug!("get id from memroy set recorder.");
let mmset_ptr = mmsets.get(targetid);
let pt = unsafe { (*(mmset_ptr.unwrap().clone() as *mut MemorySet)).get_page_table_mut() };
if active_table_swap().page_fault_handler(pt as *mut InactivePageTable0, addr, false, || alloc_frame().unwrap()){
return true;
}
},
None => {
debug!("get pt from processor()");
let pt = temp_proc.current_context_mut().get_memory_set_mut().get_page_table_mut();
if active_table_swap().page_fault_handler(pt as *mut InactivePageTable0, addr, true, || alloc_frame().unwrap()){
return true;
}
},
};
// handle the swap in/out
//info!("start handling swap in/out page fault");
//let mut temp_proc = processor();
//let pt = temp_proc.current_context_mut().get_memory_set_mut().get_page_table_mut();
//let pt = unsafe { (*(mmset_ptr.unwrap().clone() as *mut MemorySet)).get_page_table_mut() };
// Handle copy on write (not being used now)
unsafe { ACTIVE_TABLE.force_unlock(); }
if active_table().page_fault_handler(addr, || alloc_frame().unwrap()){
info!("general page fault handle successfully!");
return true;
}
// handle the swap in/out
info!("start handling swap in/out page fault");
let mut temp_proc = processor();
let pt = temp_proc.current_context_mut().get_memory_set_mut().get_page_table_mut();
unsafe { ACTIVE_TABLE_SWAP.force_unlock(); }
if active_table_swap().page_fault_handler(pt as *mut InactivePageTable0, addr, || alloc_frame().unwrap()){
return true;
}
false

@ -3,7 +3,8 @@ use memory::{MemoryArea, MemoryAttr, MemorySet, active_table_swap, alloc_frame};
use xmas_elf::{ElfFile, header, program::{Flags, ProgramHeader, Type}};
use core::fmt::{Debug, Error, Formatter};
use ucore_memory::{Page};
use ::memory::{InactivePageTable0};
use ::memory::{InactivePageTable0, memory_set_record};
use ucore_memory::memory_set::*;
pub struct Context {
arch: ArchContext,
@ -61,7 +62,7 @@ impl Context {
* the new user thread Context
*/
pub fn new_user(data: &[u8]) -> Self {
info!("come into new user");
debug!("come into new user");
// Parse elf
let elf = ElfFile::new(data).expect("failed to read elf");
let is32 = match elf.header.pt2 {
@ -71,7 +72,6 @@ impl Context {
assert_eq!(elf.header.pt2.type_().as_type(), header::Type::Executable, "ELF is not executable");
// User stack
info!("start building suer stack");
use consts::{USER_STACK_OFFSET, USER_STACK_SIZE, USER32_STACK_OFFSET};
let (user_stack_buttom, user_stack_top) = match is32 {
true => (USER32_STACK_OFFSET, USER32_STACK_OFFSET + USER_STACK_SIZE),
@ -79,16 +79,20 @@ impl Context {
};
// Make page table
info!("make page table!");
let mut memory_set = memory_set_from(&elf);
info!("start to push user stack to the mmset");
// add the new memory set to the recorder
let mmset_ptr = ((&mut memory_set) as * mut MemorySet) as usize;
memory_set_record().push_back(mmset_ptr);
//let id = memory_set_record().iter()
// .position(|x| unsafe { info!("current memory set record include {:x?}, {:x?}", x, (*(x.clone() as *mut MemorySet)).get_page_table_mut().token()); false });
memory_set.push(MemoryArea::new(user_stack_buttom, user_stack_top, MemoryAttr::default().user(), "user_stack"));
trace!("{:#x?}", memory_set);
let entry_addr = elf.header.pt2.entry_point() as usize;
// Temporary switch to it, in order to copy data
info!("starting copy data.");
unsafe {
memory_set.with(|| {
for ph in elf.program_iter() {
@ -98,13 +102,9 @@ impl Context {
if file_size == 0 {
return;
}
info!("file virtaddr: {:x?}, file size: {:x?}", virt_addr, file_size);
use core::slice;
info!("starting copy!");
let target = unsafe { slice::from_raw_parts_mut(virt_addr as *mut u8, file_size) };
info!("target got!");
target.copy_from_slice(&data[offset..offset + file_size]);
info!("finish copy!");
}
if is32 {
unsafe {
@ -115,11 +115,12 @@ impl Context {
}
});
}
info!("ending copy data.");
//set the user Memory pages in the memory set swappable
//memory_set_map_swappable(&mut memory_set);
let id = memory_set_record().iter()
.position(|x| x.clone() == mmset_ptr).unwrap();
memory_set_record().remove(id);
Context {
arch: unsafe {
ArchContext::new_user_thread(
@ -131,9 +132,15 @@ impl Context {
/// Fork
pub fn fork(&self, tf: &TrapFrame) -> Self {
debug!("Come in to fork!");
// Clone memory set, make a new page table
let mut memory_set = self.memory_set.clone();
// add the new memory set to the recorder
debug!("fork! new page table token: {:x?}", memory_set.token());
let mmset_ptr = ((&mut memory_set) as * mut MemorySet) as usize;
memory_set_record().push_back(mmset_ptr);
// Copy data to temp space
use alloc::vec::Vec;
let datas: Vec<Vec<u8>> = memory_set.iter().map(|area| {
@ -148,9 +155,11 @@ impl Context {
}
});
}
// map the memory set swappable
memory_set_map_swappable(&mut memory_set);
// remove the raw pointer for the memory set since it will
let id = memory_set_record().iter()
.position(|x| x.clone() == mmset_ptr).unwrap();
memory_set_record().remove(id);
Context {
arch: unsafe { ArchContext::new_fork(tf, memory_set.kstack_top(), memory_set.token()) },
memory_set,
@ -165,7 +174,16 @@ impl Context {
impl Drop for Context{
fn drop(&mut self){
// remove the new memory set to the recorder (deprecated in the latest version)
/*
let id = memory_set_record().iter()
.position(|x| unsafe{(*(x.clone() as *mut MemorySet)).token() == self.memory_set.token()});
if id.is_some(){
info!("remove id {:x?}", id.unwrap());
memory_set_record().remove(id.unwrap());
}
*/
//set the user Memory pages in the memory set unswappable
let Self {ref mut arch, ref mut memory_set} = self;
let pt = {
@ -179,8 +197,8 @@ impl Drop for Context{
}
}
}
info!("Finishing setting pages unswappable");
*/
debug!("Finishing setting pages unswappable");
}
}
@ -199,7 +217,7 @@ impl Debug for Context {
* the new memory set
*/
fn memory_set_from<'a>(elf: &'a ElfFile<'a>) -> MemorySet {
info!("come in to memory_set_from");
debug!("come in to memory_set_from");
let mut set = MemorySet::new();
for ph in elf.program_iter() {
if ph.get_type() != Ok(Type::Load) {
@ -209,7 +227,6 @@ fn memory_set_from<'a>(elf: &'a ElfFile<'a>) -> MemorySet {
ProgramHeader::Ph32(ph) => (ph.virtual_addr as usize, ph.mem_size as usize, ph.flags),
ProgramHeader::Ph64(ph) => (ph.virtual_addr as usize, ph.mem_size as usize, ph.flags),//???
};
info!("push!");
set.push(MemoryArea::new(virt_addr, virt_addr + mem_size, memory_attr_from(flags), ""));
}
@ -229,15 +246,15 @@ fn memory_attr_from(elf_flags: Flags) -> MemoryAttr {
* @brief:
* map the memory area in the memory_set swappalbe, specially for the user process
*/
fn memory_set_map_swappable(memory_set: &mut MemorySet){
pub fn memory_set_map_swappable(memory_set: &mut MemorySet){
let pt = unsafe {
memory_set.get_page_table_mut() as *mut InactivePageTable0
};
for area in memory_set.iter(){
for page in Page::range_of(area.get_start_addr(), area.get_end_addr()) {
let addr = page.start_address();
active_table_swap().set_swappable(pt, addr);
unsafe { active_table_swap().set_swappable(pt, addr); }
}
}
info!("Finishing setting pages swappable");
debug!("Finishing setting pages swappable");
}

@ -5,7 +5,7 @@ pub use ucore_process::processor::{*, Context as _whatever};
pub use ucore_process::scheduler::*;
pub use ucore_process::thread::*;
mod context;
pub mod context;
type Processor = Processor_<Context, StrideScheduler>;

@ -7,6 +7,8 @@ use process::*;
use thread;
use util;
use process::context::memory_set_map_swappable;
/// 系统调用入口点
///
/// 当发生系统调用中断时,中断服务例程将控制权转移到这里。
@ -61,6 +63,8 @@ fn sys_fork(tf: &TrapFrame) -> i32 {
let mut processor = processor();
let context = processor.current_context().fork(tf);
let pid = processor.add(context);
// map swappable for the forked process's memroy areas (only for the page which has been allocated)
memory_set_map_swappable(processor.get_context_mut(pid).get_memory_set_mut());
info!("fork: {} -> {}", processor.current_pid(), pid);
pid as i32
}

Loading…
Cancel
Save