HUGE REFACTOR for memory mod: unified to `MemorySet`!

toolchain_update
WangRunji 7 years ago
parent b715cecf8d
commit 2e9ffb84fa

@ -8,15 +8,14 @@ pub mod keyboard;
pub mod pit;
pub mod ide;
pub fn init(mut page_map: impl FnMut(usize)) -> acpi::AcpiResult {
pub fn init(mut page_map: impl FnMut(usize, usize)) -> acpi::AcpiResult {
assert_has_not_been_called!();
// TODO Handle this temp page map.
page_map(0); // EBDA
for addr in (0xE0000 .. 0x100000).step_by(0x1000) {
page_map(addr);
}
page_map(0x7fe1000); // RSDT
page_map(0, 1); // EBDA
page_map(0xe0_000, 0x100 - 0xe0);
page_map(0x07fe1000, 1); // RSDT
page_map(0xfee00000, 1); // LAPIC
page_map(0xfec00000, 1); // IOAPIC
let acpi = acpi::init().expect("Failed to init ACPI");
assert_eq!(acpi.lapic_addr as usize, 0xfee00000);
@ -24,10 +23,6 @@ pub fn init(mut page_map: impl FnMut(usize)) -> acpi::AcpiResult {
if cfg!(feature = "use_apic") {
pic::disable();
page_map(0xfee00000); // LAPIC
page_map(0xFEC00000); // IOAPIC
apic::init(acpi.lapic_addr, acpi.ioapic_id);
} else {
pic::init();

@ -144,7 +144,10 @@ impl ActivePageTable {
pub fn switch(&mut self, new_table: InactivePageTable) -> InactivePageTable {
use x86_64::PhysicalAddress;
use x86_64::registers::control_regs;
debug!("switch table to {:?}", new_table.p4_frame);
debug!("switch table {:?} -> {:?}", Frame::of_addr(control_regs::cr3().0 as usize), new_table.p4_frame);
if new_table.p4_frame.start_address() == control_regs::cr3() {
return new_table;
}
let old_table = InactivePageTable {
p4_frame: Frame::of_addr(control_regs::cr3().0 as usize),

@ -1,6 +1,7 @@
use arch::driver::{acpi::AcpiResult, apic::start_ap};
use memory::{MemoryController, PhysAddr};
use memory::*;
use core::ptr::{read_volatile, write_volatile};
use x86_64::registers::control_regs::cr3;
extern {
fn entryother_start(); // physical addr of entryother
@ -9,26 +10,23 @@ extern {
const ENTRYOTHER_ADDR: u32 = 0x7000;
pub fn start_other_cores(acpi: &AcpiResult, mc: &mut MemoryController) {
mc.map_page_identity(ENTRYOTHER_ADDR as usize - 1);
mc.map_page_identity(ENTRYOTHER_ADDR as usize);
mc.map_page_identity(entryother_start as usize);
mc.map_page_p2v(PhysAddr(0));
pub fn start_other_cores(acpi: &AcpiResult, ms: &mut MemorySet) {
use consts::KERNEL_OFFSET;
ms.push(MemoryArea::new_identity(ENTRYOTHER_ADDR as usize - 1, ENTRYOTHER_ADDR as usize + 1, MemoryAttr::default(), "entry_other"));
ms.push(MemoryArea::new_identity(entryother_start as usize, entryother_start as usize + 1, MemoryAttr::default(), "entry_other"));
ms.push(MemoryArea::new_kernel(KERNEL_OFFSET, KERNEL_OFFSET + 1, MemoryAttr::default(), "entry_other3"));
copy_entryother();
let args = unsafe{ &mut *(ENTRYOTHER_ADDR as *mut EntryArgs).offset(-1) };
let page_table = unsafe{ *(0xFFFF_FFFF_FFFF_FFF8 as *const u32) } & 0xFFFF_F000;
for i in 1 .. acpi.cpu_num {
let apic_id = acpi.cpu_acpi_ids[i as usize];
let kstack = mc.alloc_stack(7).unwrap();
let kstack_top = kstack.top() as u64;
use core::mem::forget;
forget(kstack); // TODO pass this kstack to new AP
let ms = MemorySet::new(7);
*args = EntryArgs {
kstack: kstack_top,
page_table: page_table,
kstack: ms.kstack_top() as u64,
page_table: ms._page_table_addr().0 as u32,
stack: 0x8000, // just enough stack to get us to entry64mp
};
unsafe { MS = Some(ms); }
start_ap(apic_id, ENTRYOTHER_ADDR);
while unsafe { !read_volatile(&STARTED[i as usize]) } {}
}
@ -45,6 +43,7 @@ fn copy_entryother() {
}
#[repr(C)]
#[derive(Debug)]
struct EntryArgs {
kstack: u64,
page_table: u32,
@ -53,7 +52,9 @@ struct EntryArgs {
use consts::MAX_CPU_NUM;
static mut STARTED: [bool; MAX_CPU_NUM] = [false; MAX_CPU_NUM];
static mut MS: Option<MemorySet> = None;
pub unsafe fn notify_started(cpu_id: u8) {
pub unsafe fn notify_started(cpu_id: u8) -> MemorySet {
write_volatile(&mut STARTED[cpu_id as usize], true);
MS.take().unwrap()
}

@ -70,7 +70,7 @@ pub extern "C" fn rust_main(multiboot_information_address: usize) -> ! {
let boot_info = unsafe { multiboot2::load(multiboot_information_address) };
// set up guard page and map the heap pages
let mut memory_controller = memory::init(boot_info);
let mut kernel_memory = memory::init(boot_info);
arch::gdt::init();
@ -79,12 +79,14 @@ pub extern "C" fn rust_main(multiboot_information_address: usize) -> ! {
test!(guard_page);
test!(find_mp);
let acpi = arch::driver::init(
|addr: usize| memory_controller.map_page_identity(addr));
// memory_controller.print_page_table();
use memory::*;
let acpi = arch::driver::init(|addr: usize, count: usize| {
kernel_memory.push(MemoryArea::new_identity(addr, addr + count * 0x1000, MemoryAttr::default(), "acpi"))
});
arch::smp::start_other_cores(&acpi, &mut memory_controller);
process::init(memory_controller);
// FIXME: PageFault when SMP
// arch::smp::start_other_cores(&acpi, &mut kernel_memory);
process::init(kernel_memory);
fs::load_sfs();
@ -127,7 +129,7 @@ pub extern "C" fn other_main() -> ! {
arch::driver::apic::other_init();
let cpu_id = arch::driver::apic::lapic_id();
println!("Hello world! from CPU {}!", arch::driver::apic::lapic_id());
unsafe{ arch::smp::notify_started(cpu_id); }
let ms = unsafe { arch::smp::notify_started(cpu_id) };
// unsafe{ let a = *(0xdeadbeaf as *const u8); } // Page fault
loop {}
}

@ -9,38 +9,38 @@ pub struct MemoryArea {
start_addr: VirtAddr,
end_addr: VirtAddr,
phys_start_addr: Option<PhysAddr>,
flags: u64,
flags: MemoryAttr,
name: &'static str,
}
impl MemoryArea {
pub fn new(start_addr: VirtAddr, end_addr: VirtAddr, flags: EntryFlags, name: &'static str) -> Self {
pub fn new(start_addr: VirtAddr, end_addr: VirtAddr, flags: MemoryAttr, name: &'static str) -> Self {
assert!(start_addr <= end_addr, "invalid memory area");
MemoryArea {
start_addr,
end_addr,
phys_start_addr: None,
flags: flags.bits(),
flags,
name,
}
}
pub fn new_identity(start_addr: VirtAddr, end_addr: VirtAddr, flags: EntryFlags, name: &'static str) -> Self {
pub fn new_identity(start_addr: VirtAddr, end_addr: VirtAddr, flags: MemoryAttr, name: &'static str) -> Self {
assert!(start_addr <= end_addr, "invalid memory area");
MemoryArea {
start_addr,
end_addr,
phys_start_addr: Some(PhysAddr(start_addr as u64)),
flags: flags.bits(),
flags,
name,
}
}
pub fn new_kernel(start_addr: VirtAddr, end_addr: VirtAddr, flags: EntryFlags, name: &'static str) -> Self {
pub fn new_kernel(start_addr: VirtAddr, end_addr: VirtAddr, flags: MemoryAttr, name: &'static str) -> Self {
assert!(start_addr <= end_addr, "invalid memory area");
MemoryArea {
start_addr,
end_addr,
phys_start_addr: Some(PhysAddr::from_kernel_virtual(start_addr)),
flags: flags.bits(),
flags,
name,
}
}
@ -62,21 +62,70 @@ impl MemoryArea {
let p3 = Page::of_addr(other.end_addr - 1) + 1;
!(p1 <= p2 || p0 >= p3)
}
fn map(&self, pt: &mut Mapper) {
match self.phys_start_addr {
Some(phys_start) => {
for page in Page::range_of(self.start_addr, self.end_addr) {
let frame = Frame::of_addr(phys_start.get() + page.start_address() - self.start_addr);
pt.map_to(page, frame.clone(), self.flags.0);
}
}
None => {
for page in Page::range_of(self.start_addr, self.end_addr) {
pt.map(page, self.flags.0);
}
}
}
}
fn unmap(&self, pt: &mut Mapper) {
for page in Page::range_of(self.start_addr, self.end_addr) {
pt.unmap(page);
}
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub struct MemoryAttr(EntryFlags);
impl Default for MemoryAttr {
fn default() -> Self {
MemoryAttr(EntryFlags::PRESENT | EntryFlags::NO_EXECUTE | EntryFlags::WRITABLE)
}
}
impl MemoryAttr {
pub fn user(mut self) -> Self {
self.0 |= EntryFlags::USER_ACCESSIBLE;
self
}
pub fn readonly(mut self) -> Self {
self.0.remove(EntryFlags::WRITABLE);
self
}
pub fn execute(mut self) -> Self {
self.0.remove(EntryFlags::NO_EXECUTE);
self
}
pub fn hide(mut self) -> Self {
self.0.remove(EntryFlags::PRESENT);
self
}
}
/// 内存空间集合,包含若干段连续空间
/// 对应ucore中 `mm_struct`
#[derive(Clone)]
pub struct MemorySet {
areas: Vec<MemoryArea>,
// page_table: Option<InactivePageTable>,
page_table: InactivePageTable,
kstack: Option<Stack>,
}
impl MemorySet {
pub fn new() -> Self {
pub fn new(stack_size_in_pages: usize) -> Self {
MemorySet {
areas: Vec::<MemoryArea>::new(),
// page_table: None,
page_table: new_page_table_with_kernel(),
kstack: Some(alloc_stack(stack_size_in_pages)),
}
}
/// Used for remap_kernel() where heap alloc is unavailable
@ -85,7 +134,8 @@ impl MemorySet {
let cap = slice.len() / size_of::<MemoryArea>();
MemorySet {
areas: Vec::<MemoryArea>::from_raw_parts(slice.as_ptr() as *mut MemoryArea, 0, cap),
// page_table: None,
page_table: new_page_table(),
kstack: None,
}
}
pub fn find_area(&self, addr: VirtAddr) -> Option<&MemoryArea> {
@ -93,36 +143,65 @@ impl MemorySet {
}
pub fn push(&mut self, area: MemoryArea) {
assert!(self.areas.iter()
.find(|other| area.is_overlap_with(other))
.find(|other| area.is_overlap_with(other))
.is_none(), "memory area overlap");
active_table().with(&mut self.page_table, |mapper| area.map(mapper));
self.areas.push(area);
}
pub fn map(&self, pt: &mut Mapper) {
for area in self.areas.iter() {
match area.phys_start_addr {
Some(phys_start) => {
for page in Page::range_of(area.start_addr, area.end_addr) {
let frame = Frame::of_addr(phys_start.get() + page.start_address() - area.start_addr);
pt.map_to(page, frame.clone(), EntryFlags::from_bits(area.flags.into()).unwrap());
}
},
None => {
for page in Page::range_of(area.start_addr, area.end_addr) {
pt.map(page, EntryFlags::from_bits(area.flags.into()).unwrap());
}
},
}
}
pub fn iter(&self) -> impl Iterator<Item=&MemoryArea> {
self.areas.iter()
}
pub fn unmap(&self, pt: &mut Mapper) {
for area in self.areas.iter() {
for page in Page::range_of(area.start_addr, area.end_addr) {
pt.unmap(page);
pub fn with(&self, mut f: impl FnMut()) {
use core::{ptr, mem};
let page_table = unsafe { ptr::read(&self.page_table as *const InactivePageTable) };
let mut active_table = active_table();
let backup = active_table.switch(page_table);
f();
mem::forget(active_table.switch(backup));
}
pub fn switch(&self) {
use core::{ptr, mem};
let page_table = unsafe { ptr::read(&self.page_table as *const InactivePageTable) };
mem::forget(active_table().switch(page_table));
}
pub fn set_kstack(&mut self, stack: Stack) {
assert!(self.kstack.is_none());
self.kstack = Some(stack);
}
pub fn kstack_top(&self) -> usize {
self.kstack.as_ref().unwrap().top()
}
pub fn clone(&self, stack_size_in_pages: usize) -> Self {
let mut page_table = new_page_table_with_kernel();
active_table().with(&mut page_table, |mapper| {
for area in self.areas.iter() {
area.map(mapper);
}
});
MemorySet {
areas: self.areas.clone(),
page_table,
kstack: Some(alloc_stack(stack_size_in_pages)),
}
}
pub fn iter(&self) -> impl Iterator<Item=&MemoryArea> {
self.areas.iter()
/// Only for SMP
pub fn _page_table_addr(&self) -> PhysAddr {
use core::mem;
unsafe { mem::transmute_copy::<_, Frame>(&self.page_table) }.start_address()
}
}
impl Drop for MemorySet {
fn drop(&mut self) {
debug!("MemorySet dropping");
let Self { ref mut page_table, ref areas, .. } = self;
active_table().with(page_table, |mapper| {
for area in areas.iter() {
area.unmap(mapper);
}
})
}
}
@ -134,44 +213,25 @@ impl Debug for MemorySet {
}
}
#[cfg(test)]
mod test {
use super::*;
fn new_page_table() -> InactivePageTable {
let frame = alloc_frame();
let mut active_table = active_table();
InactivePageTable::new(frame, &mut active_table)
}
#[test]
fn push_and_find() {
let mut ms = MemorySet::new();
ms.push(MemoryArea {
start_addr: 0x0,
end_addr: 0x8,
flags: 0x0,
name: "code",
});
ms.push(MemoryArea {
start_addr: 0x8,
end_addr: 0x10,
flags: 0x1,
name: "data",
});
assert_eq!(ms.find_area(0x6).unwrap().name, "code");
assert_eq!(ms.find_area(0x11), None);
}
fn new_page_table_with_kernel() -> InactivePageTable {
let frame = alloc_frame();
let mut active_table = active_table();
let mut page_table = InactivePageTable::new(frame, &mut active_table);
#[test]
#[should_panic]
fn push_overlap() {
let mut ms = MemorySet::new();
ms.push(MemoryArea {
start_addr: 0x0,
end_addr: 0x8,
flags: 0x0,
name: "code",
});
ms.push(MemoryArea {
start_addr: 0x4,
end_addr: 0x10,
flags: 0x1,
name: "data",
});
}
use consts::{KERNEL_HEAP_PML4, KERNEL_PML4};
let e510 = active_table.p4()[KERNEL_PML4].clone();
let e509 = active_table.p4()[KERNEL_HEAP_PML4].clone();
active_table.with(&mut page_table, |pt: &mut Mapper| {
pt.p4_mut()[KERNEL_PML4] = e510;
pt.p4_mut()[KERNEL_HEAP_PML4] = e509;
pt.identity_map(Frame::of_addr(0xfee00000), EntryFlags::WRITABLE); // LAPIC
});
page_table
}

@ -1,6 +1,6 @@
pub use self::area_frame_allocator::AreaFrameAllocator;
pub use arch::paging::*;
pub use self::stack_allocator::Stack;
pub use self::stack_allocator::*;
pub use self::address::*;
pub use self::frame::*;
pub use self::memory_set::*;
@ -8,7 +8,7 @@ pub use self::memory_set::*;
use multiboot2::BootInformation;
use consts::KERNEL_OFFSET;
use arch::paging;
use spin::Mutex;
use spin::{Mutex, MutexGuard};
use super::HEAP_ALLOCATOR;
mod memory_set;
@ -19,6 +19,7 @@ mod address;
mod frame;
pub static FRAME_ALLOCATOR: Mutex<Option<AreaFrameAllocator>> = Mutex::new(None);
pub static STACK_ALLOCATOR: Mutex<Option<StackAllocator>> = Mutex::new(None);
pub fn alloc_frame() -> Frame {
FRAME_ALLOCATOR.lock()
@ -26,18 +27,49 @@ pub fn alloc_frame() -> Frame {
.allocate_frame().expect("no more frame")
}
fn alloc_stack(size_in_pages: usize) -> Stack {
let mut active_table = active_table();
STACK_ALLOCATOR.lock()
.as_mut().expect("stack allocator is not initialized")
.alloc_stack(&mut active_table, size_in_pages).expect("no more stack")
}
/// The only way to get active page table
fn active_table() -> MutexGuard<'static, ActivePageTable> {
static ACTIVE_TABLE: Mutex<ActivePageTable> = Mutex::new(unsafe { ActivePageTable::new() });
ACTIVE_TABLE.lock()
}
// Return true to continue, false to halt
pub fn page_fault_handler(addr: VirtAddr) -> bool {
// Handle copy on write
let mut page_table = unsafe { ActivePageTable::new() };
page_table.try_copy_on_write(addr)
active_table().try_copy_on_write(addr)
}
pub fn init(boot_info: BootInformation) -> MemoryController {
pub fn init(boot_info: BootInformation) -> MemorySet {
assert_has_not_been_called!("memory::init must be called only once");
info!("{:?}", boot_info);
init_frame_allocator(&boot_info);
let kernel_memory = remap_the_kernel(boot_info);
use self::paging::Page;
use consts::{KERNEL_HEAP_OFFSET, KERNEL_HEAP_SIZE};
unsafe { HEAP_ALLOCATOR.lock().init(KERNEL_HEAP_OFFSET, KERNEL_HEAP_SIZE); }
*STACK_ALLOCATOR.lock() = Some({
let stack_alloc_range = Page::range_of(KERNEL_HEAP_OFFSET + KERNEL_HEAP_SIZE,
KERNEL_HEAP_OFFSET + KERNEL_HEAP_SIZE + 0x1000000);
stack_allocator::StackAllocator::new(stack_alloc_range)
});
kernel_memory
}
fn init_frame_allocator(boot_info: &BootInformation) {
let memory_map_tag = boot_info.memory_map_tag().expect(
"Memory map tag required");
let elf_sections_tag = boot_info.elf_sections_tag().expect(
@ -54,53 +86,40 @@ pub fn init(boot_info: BootInformation) -> MemoryController {
*FRAME_ALLOCATOR.lock() = Some(AreaFrameAllocator::new(
kernel_start, kernel_end,
boot_info_start, boot_info_end,
memory_map_tag.memory_areas()
memory_map_tag.memory_areas(),
));
let kernel_stack = remap_the_kernel(boot_info);
use self::paging::Page;
use consts::{KERNEL_HEAP_OFFSET, KERNEL_HEAP_SIZE};
unsafe { HEAP_ALLOCATOR.lock().init(KERNEL_HEAP_OFFSET, KERNEL_HEAP_SIZE); }
let stack_allocator = {
let stack_alloc_range = Page::range_of(KERNEL_HEAP_OFFSET + KERNEL_HEAP_SIZE,
KERNEL_HEAP_OFFSET + KERNEL_HEAP_SIZE + 0x1000000);
stack_allocator::StackAllocator::new(stack_alloc_range)
};
MemoryController {
kernel_stack: Some(kernel_stack),
active_table: unsafe { ActivePageTable::new() },
stack_allocator,
}
}
pub fn remap_the_kernel(boot_info: BootInformation) -> Stack {
let mut active_table = unsafe { ActivePageTable::new() };
fn remap_the_kernel(boot_info: BootInformation) -> MemorySet {
let mut memory_set = MemorySet::from(boot_info.elf_sections_tag().unwrap());
use consts::{KERNEL_OFFSET, KERNEL_HEAP_OFFSET, KERNEL_HEAP_SIZE};
memory_set.push(MemoryArea::new_kernel(KERNEL_OFFSET + 0xb8000, KERNEL_OFFSET + 0xb9000, EntryFlags::WRITABLE, "VGA"));
memory_set.push(MemoryArea::new(KERNEL_HEAP_OFFSET, KERNEL_HEAP_OFFSET + KERNEL_HEAP_SIZE, EntryFlags::WRITABLE, "kernel_heap"));
memory_set.push(MemoryArea::new_kernel(KERNEL_OFFSET + 0xb8000, KERNEL_OFFSET + 0xb9000, MemoryAttr::default(), "VGA"));
memory_set.push(MemoryArea::new(KERNEL_HEAP_OFFSET, KERNEL_HEAP_OFFSET + KERNEL_HEAP_SIZE, MemoryAttr::default(), "kernel_heap"));
let mut page_table = InactivePageTable::new(alloc_frame(), &mut active_table);
active_table.with(&mut page_table, |pt| memory_set.map(pt));
debug!("{:#x?}", memory_set);
let old_table = active_table.switch(page_table);
memory_set.switch();
info!("NEW TABLE!!!");
// turn the stack bottom into a guard page
let kstack = get_init_kstack_and_set_guard_page();
memory_set.set_kstack(kstack);
memory_set
}
fn get_init_kstack_and_set_guard_page() -> Stack {
assert_has_not_been_called!();
extern { fn stack_bottom(); }
let stack_bottom = PhysAddr(stack_bottom as u64).to_kernel_virtual();
let stack_bottom_page = Page::of_addr(stack_bottom);
active_table.unmap(stack_bottom_page);
let kernel_stack = Stack::new(stack_bottom + 8 * PAGE_SIZE, stack_bottom + 1 * PAGE_SIZE);
// turn the stack bottom into a guard page
active_table().unmap(stack_bottom_page);
debug!("guard page at {:#x}", stack_bottom_page.start_address());
kernel_stack
Stack::new(stack_bottom + 8 * PAGE_SIZE, stack_bottom + 1 * PAGE_SIZE)
}
use multiboot2::{ElfSectionsTag, ElfSection, ElfSectionFlags};
@ -111,11 +130,7 @@ impl From<ElfSectionsTag> for MemorySet {
// WARNING: must ensure it's large enough
static mut SPACE: [u8; 0x1000] = [0; 0x1000];
let mut set = unsafe { MemorySet::new_from_raw_space(&mut SPACE) };
for section in sections.sections() {
if !section.is_allocated() {
// section is not loaded to memory
continue;
}
for section in sections.sections().filter(|s| s.is_allocated()) {
set.push(MemoryArea::from(section));
}
set
@ -133,76 +148,17 @@ impl From<ElfSection> for MemoryArea {
start_addr += KERNEL_OFFSET;
end_addr += KERNEL_OFFSET;
}
MemoryArea::new_kernel(start_addr, end_addr, EntryFlags::from(section.flags()), name)
MemoryArea::new_kernel(start_addr, end_addr, MemoryAttr::from(section.flags()), name)
}
}
impl From<ElfSectionFlags> for EntryFlags {
impl From<ElfSectionFlags> for MemoryAttr {
fn from(elf_flags: ElfSectionFlags) -> Self {
let mut flags = EntryFlags::empty();
let mut flags = MemoryAttr::default();
if elf_flags.contains(ElfSectionFlags::ALLOCATED) {
// section is loaded to memory
flags = flags | EntryFlags::PRESENT;
}
if elf_flags.contains(ElfSectionFlags::WRITABLE) {
flags = flags | EntryFlags::WRITABLE;
}
if !elf_flags.contains(ElfSectionFlags::EXECUTABLE) {
flags = flags | EntryFlags::NO_EXECUTE;
}
if !elf_flags.contains(ElfSectionFlags::ALLOCATED) { flags = flags.hide(); }
if !elf_flags.contains(ElfSectionFlags::WRITABLE) { flags = flags.readonly(); }
if elf_flags.contains(ElfSectionFlags::EXECUTABLE) { flags = flags.execute(); }
flags
}
}
pub struct MemoryController {
pub kernel_stack: Option<Stack>,
active_table: paging::ActivePageTable,
stack_allocator: stack_allocator::StackAllocator,
}
impl MemoryController {
pub fn alloc_stack(&mut self, size_in_pages: usize) -> Option<Stack> {
let &mut MemoryController { ref mut kernel_stack,
ref mut active_table,
ref mut stack_allocator } = self;
stack_allocator.alloc_stack(active_table, size_in_pages)
}
pub fn new_page_table(&mut self) -> InactivePageTable {
let frame = alloc_frame();
let page_table = InactivePageTable::new(frame, &mut self.active_table);
page_table
}
pub fn map_page_identity(&mut self, addr: usize) {
let frame = Frame::of_addr(addr);
let flags = EntryFlags::WRITABLE;
self.active_table.identity_map(frame, flags);
}
pub fn map_page_p2v(&mut self, addr: PhysAddr) {
let page = Page::of_addr(addr.to_kernel_virtual());
let frame = Frame::of_addr(addr.get());
let flags = EntryFlags::WRITABLE;
self.active_table.map_to(page, frame, flags);
}
pub fn make_page_table(&mut self, set: &MemorySet) -> InactivePageTable {
let mut page_table = InactivePageTable::new(alloc_frame(), &mut self.active_table);
use consts::{KERNEL_HEAP_PML4, KERNEL_PML4};
let e510 = self.active_table.p4()[KERNEL_PML4].clone();
let e509 = self.active_table.p4()[KERNEL_HEAP_PML4].clone();
self.active_table.with(&mut page_table, |pt: &mut Mapper| {
set.map(pt);
pt.p4_mut()[KERNEL_PML4] = e510;
pt.p4_mut()[KERNEL_HEAP_PML4] = e509;
pt.identity_map(Frame::of_addr(0xfee00000), EntryFlags::WRITABLE); // LAPIC
});
page_table
}
pub fn with(&mut self, page_table: InactivePageTable, mut f: impl FnMut()) -> InactivePageTable {
let backup = self.active_table.switch(page_table);
f();
self.active_table.switch(backup)
}
}

@ -73,13 +73,6 @@ impl Stack {
pub fn bottom(&self) -> usize {
self.bottom
}
/// Push a value of type `T` at the top of the stack, return the rsp after.
pub fn push_at_top<T>(&self, value: T) -> usize {
let ptr = unsafe { (self.top as *mut T).offset(-1) };
unsafe { *ptr = value; }
ptr as usize
}
}
impl Drop for Stack {

@ -1,4 +1,4 @@
use memory::MemoryController;
use memory::MemorySet;
use spin::Once;
use sync::SpinNoIrqLock;
use core::slice;
@ -12,21 +12,19 @@ mod processor;
mod scheduler;
pub fn init(mut mc: MemoryController) {
pub fn init(mut ms: MemorySet) {
PROCESSOR.call_once(|| {
SpinNoIrqLock::new({
let initproc = Process::new_init(&mut mc);
let idleproc = Process::new("idle", idle_thread, 0, &mut mc);
let initproc = Process::new_init(ms);
let idleproc = Process::new("idle", idle_thread, 0);
let mut processor = Processor::new();
processor.add(initproc);
processor.add(idleproc);
processor
})});
MC.call_once(|| SpinNoIrqLock::new(mc));
}
pub static PROCESSOR: Once<SpinNoIrqLock<Processor>> = Once::new();
pub static MC: Once<SpinNoIrqLock<MemoryController>> = Once::new();
extern fn idle_thread(arg: usize) -> ! {
loop {
@ -40,16 +38,14 @@ extern fn idle_thread(arg: usize) -> ! {
pub fn add_user_process(name: impl AsRef<str>, data: &[u8]) {
let mut processor = PROCESSOR.try().unwrap().lock();
let mut mc = MC.try().unwrap().lock();
let mut new = Process::new_user(data, &mut mc);
let mut new = Process::new_user(data);
new.name = String::from(name.as_ref());
processor.add(new);
}
pub fn add_kernel_process(entry: extern fn(usize) -> !, arg: usize) -> Pid {
let mut processor = PROCESSOR.try().unwrap().lock();
let mut mc = MC.try().unwrap().lock();
let mut new = Process::new("", entry, arg, &mut mc);
let mut new = Process::new("", entry, arg);
processor.add(new)
}

@ -1,5 +1,5 @@
use super::*;
use memory::{self, Stack, InactivePageTable};
use memory::{self, Stack, InactivePageTable, MemoryAttr};
use xmas_elf::{ElfFile, program::{Flags, ProgramHeader}, header::HeaderPt2};
use core::slice;
use alloc::{rc::Rc, String};
@ -10,9 +10,7 @@ pub struct Process {
pub(in process) pid: Pid,
pub(in process) parent: Pid,
pub(in process) name: String,
kstack: Stack,
pub(in process) memory_set: Option<MemorySet>,
pub(in process) page_table: Option<InactivePageTable>,
pub(in process) memory_set: MemorySet,
pub(in process) status: Status,
pub(in process) context: Context,
pub(in process) is_user: bool,
@ -32,18 +30,16 @@ pub enum Status {
impl Process {
/// Make a new kernel thread
pub fn new(name: &str, entry: extern fn(usize) -> !, arg: usize, mc: &mut MemoryController) -> Self {
let kstack = mc.alloc_stack(7).unwrap();
let data = InitStack::new_kernel_thread(entry, arg, kstack.top());
let context = unsafe { data.push_at(kstack.top()) };
pub fn new(name: &str, entry: extern fn(usize) -> !, arg: usize) -> Self {
let ms = MemorySet::new(7);
let data = InitStack::new_kernel_thread(entry, arg, ms.kstack_top());
let context = unsafe { data.push_at(ms.kstack_top()) };
Process {
pid: 0,
parent: 0,
name: String::from(name),
kstack,
memory_set: None,
page_table: None,
memory_set: ms,
status: Status::Ready,
context,
is_user: false,
@ -52,15 +48,13 @@ impl Process {
/// Make the first kernel thread `initproc`
/// Should be called only once
pub fn new_init(mc: &mut MemoryController) -> Self {
pub fn new_init(ms: MemorySet) -> Self {
assert_has_not_been_called!();
Process {
pid: 0,
parent: 0,
name: String::from("init"),
kstack: mc.kernel_stack.take().unwrap(),
memory_set: None,
page_table: None,
memory_set: ms,
status: Status::Running,
context: unsafe { Context::null() }, // will be set at first schedule
is_user: false,
@ -70,7 +64,7 @@ impl Process {
/// Make a new user thread
/// The program elf data is placed at [begin, end)
/// uCore x86 32bit program is planned to be supported.
pub fn new_user(data: &[u8], mc: &mut MemoryController) -> Self {
pub fn new_user(data: &[u8]) -> Self {
// Parse elf
let begin = data.as_ptr() as usize;
let elf = ElfFile::new(data).expect("failed to read elf");
@ -88,9 +82,7 @@ impl Process {
// Make page table
let mut memory_set = MemorySet::from(&elf);
memory_set.push(MemoryArea::new(user_stack_buttom, user_stack_top,
EntryFlags::WRITABLE | EntryFlags::NO_EXECUTE | EntryFlags::USER_ACCESSIBLE, "user_stack"));
let page_table = mc.make_page_table(&memory_set);
memory_set.push(MemoryArea::new(user_stack_buttom, user_stack_top, MemoryAttr::default().user(), "user_stack"));
trace!("{:#x?}", memory_set);
let entry_addr = match elf.header.pt2 {
@ -99,7 +91,7 @@ impl Process {
};
// Temporary switch to it, in order to copy data
let page_table = mc.with(page_table, || {
memory_set.with(|| {
for ph in elf.program_iter() {
let (virt_addr, offset, file_size) = match ph {
ProgramHeader::Ph32(ph) => (ph.virtual_addr as usize, ph.offset as usize, ph.file_size as usize),
@ -119,17 +111,14 @@ impl Process {
// Allocate kernel stack and push trap frame
let kstack = mc.alloc_stack(7).unwrap();
let data = InitStack::new_user_thread(entry_addr, user_stack_top - 8, is32);
let context = unsafe { data.push_at(kstack.top()) };
let context = unsafe { data.push_at(memory_set.kstack_top()) };
Process {
pid: 0,
parent: 0,
name: String::new(),
kstack,
memory_set: Some(memory_set),
page_table: Some(page_table),
memory_set,
status: Status::Ready,
context,
is_user: true,
@ -137,12 +126,11 @@ impl Process {
}
/// Fork
pub fn fork(&self, tf: &TrapFrame, mc: &mut MemoryController) -> Self {
pub fn fork(&self, tf: &TrapFrame) -> Self {
assert!(self.is_user);
// Clone memory set, make a new page table
let memory_set = self.memory_set.as_ref().unwrap().clone();
let page_table = mc.make_page_table(&memory_set);
let memory_set = self.memory_set.clone(7);
// Copy data to temp space
use alloc::Vec;
@ -151,24 +139,21 @@ impl Process {
}).collect();
// Temporary switch to it, in order to copy data
let page_table = mc.with(page_table, || {
memory_set.with(|| {
for (area, data) in memory_set.iter().zip(datas.iter()) {
unsafe { area.as_slice_mut() }.copy_from_slice(data.as_slice())
}
});
// Allocate kernel stack and push trap frame
let kstack = mc.alloc_stack(7).unwrap();
let data = InitStack::new_fork(tf);
let context = unsafe { data.push_at(kstack.top()) };
let context = unsafe { data.push_at(memory_set.kstack_top()) };
Process {
pid: 0,
parent: self.pid,
name: self.name.clone() + "_fork",
kstack,
memory_set: Some(memory_set),
page_table: Some(page_table),
memory_set,
status: Status::Ready,
context,
is_user: true,
@ -187,29 +172,23 @@ use memory::{MemorySet, MemoryArea, PhysAddr, FromToVirtualAddress, EntryFlags};
impl<'a> From<&'a ElfFile<'a>> for MemorySet {
fn from(elf: &'a ElfFile<'a>) -> Self {
let mut set = MemorySet::new();
let mut set = MemorySet::new(7);
for ph in elf.program_iter() {
let (virt_addr, mem_size, flags) = match ph {
ProgramHeader::Ph32(ph) => (ph.virtual_addr as usize, ph.mem_size as usize, ph.flags),
ProgramHeader::Ph64(ph) => (ph.virtual_addr as usize, ph.mem_size as usize, ph.flags),
};
set.push(MemoryArea::new(virt_addr, virt_addr + mem_size, EntryFlags::from(flags), ""));
set.push(MemoryArea::new(virt_addr, virt_addr + mem_size, MemoryAttr::from(flags), ""));
}
set
}
}
impl From<Flags> for EntryFlags {
impl From<Flags> for MemoryAttr {
fn from(elf_flags: Flags) -> Self {
let mut flags = EntryFlags::PRESENT | EntryFlags::USER_ACCESSIBLE;
// if elf_flags.is_write() {
let mut flags = MemoryAttr::default().user();
// TODO: handle readonly
if true {
flags = flags | EntryFlags::WRITABLE;
}
if !elf_flags.is_execute() {
flags = flags | EntryFlags::NO_EXECUTE;
}
if elf_flags.is_execute() { flags = flags.execute(); }
flags
}
}

@ -11,9 +11,6 @@ pub struct Processor {
procs: BTreeMap<Pid, Process>,
current_pid: Pid,
event_hub: EventHub<Event>,
/// All kernel threads share one page table.
/// When running user process, it will be stored here.
kernel_page_table: Option<InactivePageTable>,
/// Choose what on next schedule ?
next: Option<Pid>,
// WARNING: if MAX_PROCESS_NUM is too large, will cause stack overflow
@ -27,7 +24,6 @@ impl Processor {
procs: BTreeMap::<Pid, Process>::new(),
current_pid: 0,
event_hub: EventHub::new(),
kernel_page_table: None,
next: None,
// NOTE: max_time_slice <= 5 to ensure 'priority' test pass
scheduler: RRScheduler::new(5),
@ -139,19 +135,7 @@ impl Processor {
self.scheduler.remove(pid);
// switch page table
if from.is_user || to.is_user {
let (from_pt, to_pt) = match (from.is_user, to.is_user) {
(true, true) => (&mut from.page_table, &mut to.page_table),
(true, false) => (&mut from.page_table, &mut self.kernel_page_table),
(false, true) => (&mut self.kernel_page_table, &mut to.page_table),
_ => panic!(),
};
assert!(from_pt.is_none());
assert!(to_pt.is_some());
let mut active_table = unsafe { ActivePageTable::new() };
let old_table = active_table.switch(to_pt.take().unwrap());
*from_pt = Some(old_table);
}
to.memory_set.switch();
info!("switch from {} to {}\n rsp: ??? -> {:?}", pid0, pid, to.context);
unsafe {

@ -86,8 +86,7 @@ fn sys_close(fd: usize) -> i32 {
/// Fork the current process. Return the child's PID.
fn sys_fork(tf: &TrapFrame) -> i32 {
let mut processor = PROCESSOR.try().unwrap().lock();
let mut mc = MC.try().unwrap().lock();
let new = processor.current().fork(tf, &mut mc);
let new = processor.current().fork(tf);
let pid = processor.add(new);
info!("fork: {} -> {}", processor.current_pid(), pid);
pid as i32

Loading…
Cancel
Save