Enlarge the physical and kernel heap memory. Fix the bug in map the kernel.

master
lcy1996 6 years ago
parent 990ce51007
commit 5ee44588e0

@ -190,7 +190,7 @@ impl MemoryArea {
}
info!("finish map delayed!");
}
}
};
}
/*
** @brief unmap the memory area from the physice address in a page table
@ -410,6 +410,7 @@ impl<T: InactivePageTable> Clone for MemorySet<T> {
area.map::<T>(pt);
}
});
info!("finish map in clone!");
MemorySet {
areas: self.areas.clone(),
page_table,

@ -40,7 +40,7 @@ pub fn current() -> Thread {
/// Puts the current thread to sleep for the specified amount of time.
pub fn sleep(dur: Duration) {
let time = dur_to_ticks(dur);
info!("sleep: {:?} ticks", time);
trace!("sleep: {:?} ticks", time);
processor().manager().sleep(current().id(), time);
park();
@ -58,7 +58,7 @@ pub fn spawn<F, T>(f: F) -> JoinHandle<T>
F: Send + 'static + FnOnce() -> T,
T: Send + 'static,
{
info!("spawn:");
trace!("spawn:");
// 注意到下面的问题:
// Processor只能从入口地址entry+参数arg创建新线程
@ -108,13 +108,13 @@ pub fn spawn<F, T>(f: F) -> JoinHandle<T>
/// Cooperatively gives up a timeslice to the OS scheduler.
pub fn yield_now() {
info!("yield:");
trace!("yield:");
processor().yield_now();
}
/// Blocks unless or until the current thread's token is made available.
pub fn park() {
info!("park:");
trace!("park:");
processor().manager().sleep(current().id(), 0);
processor().yield_now();
}

@ -5,10 +5,12 @@ const P2_MASK: usize = 0x3ff << 22;
pub const RECURSIVE_INDEX: usize = 0x3fe;
pub const KERNEL_OFFSET: usize = 0;
pub const KERNEL_P2_INDEX: usize = 0x8000_0000 >> 22;
pub const KERNEL_HEAP_OFFSET: usize = 0x8020_0000;
pub const KERNEL_HEAP_SIZE: usize = 0x0020_0000;
pub const KERNEL_HEAP_OFFSET: usize = 0x8020_0000; //deprecated now
//pub const KERNEL_HEAP_SIZE: usize = 0x0020_0000;
pub const KERNEL_HEAP_SIZE: usize = 0x00a0_0000;
pub const MEMORY_OFFSET: usize = 0x8000_0000;
pub const MEMORY_END: usize = 0x8080_0000;
//pub const MEMORY_END: usize = 0x8080_0000; //for thinpad not enough now
pub const MEMORY_END: usize = 0x8100_0000;
pub const USER_STACK_OFFSET: usize = 0x70000000;
pub const USER_STACK_SIZE: usize = 0x10000;
pub const USER32_STACK_OFFSET: usize = USER_STACK_OFFSET;

@ -31,9 +31,10 @@ fn init_frame_allocator() {
use consts::{MEMORY_OFFSET, MEMORY_END};
let mut ba = FRAME_ALLOCATOR.lock();
use consts::{KERNEL_HEAP_OFFSET, KERNEL_HEAP_SIZE};
//use consts::{KERNEL_HEAP_OFFSET, KERNEL_HEAP_SIZE};
// keep memory for the kernel heap and set other physical memory available in BitAlloc
ba.insert(to_range(KERNEL_HEAP_OFFSET + KERNEL_HEAP_SIZE, MEMORY_END));
//ba.insert(to_range(KERNEL_HEAP_OFFSET + KERNEL_HEAP_SIZE, MEMORY_END));
ba.insert(to_range(end as usize + PAGE_SIZE, MEMORY_END));
info!("FrameAllocator init end");
/*

@ -28,6 +28,7 @@ pub fn setup_page_table(frame: Frame) {
// 0x80000000 ~ 8K area
p2.map_identity(KERNEL_P2_INDEX, EF::VALID | EF::READABLE | EF::WRITABLE | EF::EXECUTABLE);
p2.map_identity(KERNEL_P2_INDEX + 1, EF::VALID | EF::READABLE | EF::WRITABLE | EF::EXECUTABLE);
p2.map_identity(KERNEL_P2_INDEX + 2, EF::VALID | EF::READABLE | EF::WRITABLE | EF::EXECUTABLE);
use super::riscv::register::satp;
unsafe { satp::set(satp::Mode::Sv32, 0, frame); }
@ -356,10 +357,18 @@ impl InactivePageTable0 {
let e0 = table[0x40];
let e1 = table[KERNEL_P2_INDEX];
assert!(!e1.is_unused());
// for larger heap memroy
let e2 = table[KERNEL_P2_INDEX + 1];
assert!(!e2.is_unused());
let e3 = table[KERNEL_P2_INDEX + 2];
assert!(!e2.is_unused());
self.edit(|_| {
table[0x40] = e0;
table[KERNEL_P2_INDEX].set(e1.frame(), EF::VALID | EF::GLOBAL);
// for larger heap memroy
table[KERNEL_P2_INDEX + 1].set(e2.frame(), EF::VALID | EF::GLOBAL);
table[KERNEL_P2_INDEX + 2].set(e3.frame(), EF::VALID | EF::GLOBAL);
});
}
}

@ -20,6 +20,7 @@ _binary_user_riscv_img_end:
pub fn shell() {
// load riscv32/x86_64 user program
info!("come into shell!");
#[cfg(target_arch = "riscv32")]
let device = {
extern {

@ -63,6 +63,7 @@ pub mod arch;
pub mod arch;
pub fn kmain() -> ! {
info!("Come into kmain");
process::processor().run();
// thread::test::local_key();

@ -106,11 +106,6 @@ impl ContextImpl {
let kstack = KernelStack::new();
// map the memory set swappable
//memory_set_map_swappable(&mut memory_set);
//set the user Memory pages in the memory set swappable
//memory_set_map_swappable(&mut memory_set);
let id = memory_set_record().iter()
.position(|x| x.clone() == mmset_ptr).unwrap();
memory_set_record().remove(id);
@ -123,27 +118,32 @@ impl ContextImpl {
memory_set,
kstack,
});
//set the user Memory pages in the memory set swappable
memory_set_map_swappable(ret.get_memory_set_mut());
ret
}
/// Fork
pub fn fork(&self, tf: &TrapFrame) -> Box<Context> {
info!("COME into fork!");
// Clone memory set, make a new page table
let mut memory_set = self.memory_set.clone();
info!("finish mmset clone in fork!");
// add the new memory set to the recorder
debug!("fork! new page table token: {:x?}", memory_set.token());
let mmset_ptr = ((&mut memory_set) as * mut MemorySet) as usize;
memory_set_record().push_back(mmset_ptr);
info!("before copy data to temp space");
// Copy data to temp space
use alloc::vec::Vec;
let datas: Vec<Vec<u8>> = memory_set.iter().map(|area| {
Vec::from(unsafe { area.as_slice() })
}).collect();
// Temporary switch to it, in order to copy data
info!("Finish copy data to temp space.");
// Temporarily switch to it, in order to copy data
unsafe {
memory_set.with(|| {
for (area, data) in memory_set.iter().zip(datas.iter()) {
@ -152,10 +152,9 @@ impl ContextImpl {
});
}
info!("temporary copy data!");
let kstack = KernelStack::new();
// map the memory set swappable
//memory_set_map_swappable(&mut memory_set);
// remove the raw pointer for the memory set since it will
let id = memory_set_record().iter()
.position(|x| x.clone() == mmset_ptr).unwrap();
@ -166,7 +165,9 @@ impl ContextImpl {
memory_set,
kstack,
});
memory_set_map_swappable(ret.get_memory_set_mut());
info!("FORK() finsihed!");
ret
}
@ -187,7 +188,7 @@ impl Drop for ContextImpl{
memory_set_record().remove(id.unwrap());
}
*/
//set the user Memory pages in the memory set unswappable
let Self {ref mut arch, ref mut memory_set, ref mut kstack} = self;
let pt = {
@ -202,7 +203,6 @@ impl Drop for ContextImpl{
}
}
debug!("Finishing setting pages unswappable");
}
}

Loading…
Cancel
Save