Fix compile

master
WangRunji 6 years ago
parent aecb85d5e7
commit d3ed84ba61

@ -1,6 +1,6 @@
#![no_std]
#![feature(alloc)]
#![feature(universal_impl_trait)]
#![feature(universal_impl_trait, conservative_impl_trait)]
#![feature(match_default_bindings)]
extern crate alloc;

@ -1,6 +1,5 @@
use alloc::vec::Vec;
use core::fmt::{Debug, Error, Formatter};
use core::marker::PhantomData;
use super::*;
use paging::*;
@ -8,11 +7,14 @@ pub trait InactivePageTable {
type Active: PageTable;
fn new() -> Self;
fn new_bare() -> Self;
fn edit(&mut self, f: impl FnOnce(&mut Self::Active));
unsafe fn activate(&self) -> Self;
unsafe fn activate(&self);
unsafe fn with(&self, f: impl FnOnce());
fn alloc_frame() -> Option<PhysAddr>;
fn dealloc_frame(target: PhysAddr);
fn alloc_stack(size_in_pages: usize) -> Stack;
}
/// 一片连续内存空间,有相同的访问权限
@ -35,9 +37,11 @@ impl MemoryArea {
assert!(start_addr <= end_addr, "invalid memory area");
MemoryArea { start_addr, end_addr, phys_start_addr: Some(start_addr), flags, name }
}
pub fn new_offset(start_addr: VirtAddr, end_addr: VirtAddr, offset: isize, flags: MemoryAttr, name: &'static str) -> Self {
pub fn new_physical(phys_start_addr: PhysAddr, phys_end_addr: PhysAddr, offset: usize, flags: MemoryAttr, name: &'static str) -> Self {
let start_addr = phys_start_addr + offset;
let end_addr = phys_end_addr + offset;
assert!(start_addr <= end_addr, "invalid memory area");
let phys_start_addr = Some((start_addr as isize + offset) as usize);
let phys_start_addr = Some(phys_start_addr);
MemoryArea { start_addr, end_addr, phys_start_addr, flags, name }
}
pub unsafe fn as_slice(&self) -> &[u8] {
@ -127,6 +131,7 @@ impl MemoryAttr {
pub struct MemorySet<T: InactivePageTable> {
areas: Vec<MemoryArea>,
page_table: T,
kstack: Stack,
}
impl<T: InactivePageTable> MemorySet<T> {
@ -134,15 +139,17 @@ impl<T: InactivePageTable> MemorySet<T> {
MemorySet {
areas: Vec::<MemoryArea>::new(),
page_table: T::new(),
kstack: T::alloc_stack(7),
}
}
/// Used for remap_kernel() where heap alloc is unavailable
pub unsafe fn new_from_raw_space(slice: &mut [u8]) -> Self {
pub unsafe fn new_from_raw_space(slice: &mut [u8], kstack: Stack) -> Self {
use core::mem::size_of;
let cap = slice.len() / size_of::<MemoryArea>();
MemorySet {
areas: Vec::<MemoryArea>::from_raw_parts(slice.as_ptr() as *mut MemoryArea, 0, cap),
page_table: T::new(),
page_table: T::new_bare(),
kstack,
}
}
pub fn find_area(&self, addr: VirtAddr) -> Option<&MemoryArea> {
@ -159,13 +166,14 @@ impl<T: InactivePageTable> MemorySet<T> {
self.areas.iter()
}
pub unsafe fn with(&self, f: impl FnOnce()) {
let old = self.page_table.activate();
f();
old.activate();
self.page_table.with(f);
}
pub unsafe fn activate(&self) {
self.page_table.activate();
}
pub fn kstack_top(&self) -> usize {
self.kstack.top
}
pub fn clone(&self) -> Self {
let mut page_table = T::new();
page_table.edit(|pt| {
@ -176,6 +184,7 @@ impl<T: InactivePageTable> MemorySet<T> {
MemorySet {
areas: self.areas.clone(),
page_table,
kstack: T::alloc_stack(7),
}
}
pub fn clear(&mut self) {
@ -188,3 +197,23 @@ impl<T: InactivePageTable> MemorySet<T> {
areas.clear();
}
}
impl<T: InactivePageTable> Drop for MemorySet<T> {
fn drop(&mut self) {
self.clear();
}
}
impl<T: InactivePageTable> Debug for MemorySet<T> {
fn fmt(&self, f: &mut Formatter) -> Result<(), Error> {
f.debug_list()
.entries(self.areas.iter())
.finish()
}
}
#[derive(Debug)]
pub struct Stack {
pub top: usize,
pub bottom: usize,
}

@ -1,15 +1,24 @@
use memory::*;
pub use ucore_memory::cow::CowExt;
pub use ucore_memory::paging::{Entry, PageTable};
use ucore_memory::PAGE_SIZE;
use ucore_memory::cow::CowExt;
use ucore_memory::paging::*;
use ucore_memory::memory_set::*;
use x86_64::instructions::tlb;
use x86_64::registers::control::{Cr3, Cr3Flags};
use x86_64::structures::paging::{Mapper, PageTable as x86PageTable, PageTableEntry, PageTableFlags as EF, RecursivePageTable};
pub use x86_64::structures::paging::{FrameAllocator, FrameDeallocator, Page, PageRange, PhysFrame as Frame, Size4KiB};
use x86_64::structures::paging::{FrameAllocator, FrameDeallocator, Page, PageRange, PhysFrame as Frame, Size4KiB};
use x86_64::ux::u9;
use x86_64::PhysAddr;
use bit_allocator::{BitAlloc, BitAlloc64K};
use spin::{Mutex, MutexGuard};
// Depends on kernel
use memory::{active_table, alloc_frame, dealloc_frame, alloc_stack};
pub trait PageExt {
fn of_addr(address: VirtAddr) -> Self;
fn range_of(begin: VirtAddr, end: VirtAddr) -> PageRange;
fn of_addr(address: usize) -> Self;
fn range_of(begin: usize, end: usize) -> PageRange;
}
impl PageExt for Page {
@ -17,7 +26,7 @@ impl PageExt for Page {
use x86_64;
Page::containing_address(x86_64::VirtAddr::new(address as u64))
}
fn range_of(begin: usize, end: usize) -> PageRange<Size4KiB> {
fn range_of(begin: usize, end: usize) -> PageRange {
Page::range(Page::of_addr(begin), Page::of_addr(end - 1) + 1)
}
}
@ -41,7 +50,7 @@ impl PageTable for ActivePageTable {
fn map(&mut self, addr: usize, target: usize) -> &mut PageEntry {
let flags = EF::PRESENT | EF::WRITABLE | EF::NO_EXECUTE;
self.0.map_to(Page::of_addr(addr), Frame::of_addr(target), flags, &mut frame_allocator())
self.0.map_to(Page::of_addr(addr), Frame::of_addr(target), flags, &mut FrameAllocatorForX86)
.unwrap().flush();
self.get_entry(addr)
}
@ -58,7 +67,7 @@ impl PageTable for ActivePageTable {
unsafe { &mut *(entry_addr as *mut PageEntry) }
}
fn get_page_slice_mut<'a, 'b>(&'a mut self, addr: VirtAddr) -> &'b mut [u8] {
fn get_page_slice_mut<'a, 'b>(&'a mut self, addr: usize) -> &'b mut [u8] {
use core::slice;
unsafe { slice::from_raw_parts_mut((addr & !0xfffusize) as *mut u8, PAGE_SIZE) }
}
@ -76,24 +85,17 @@ impl ActivePageTable {
pub unsafe fn new() -> Self {
ActivePageTable(RecursivePageTable::new(&mut *(0xffffffff_fffff000 as *mut _)).unwrap())
}
pub fn with(&mut self, table: &mut InactivePageTable, f: impl FnOnce(&mut ActivePageTable)) {
with_temporary_map(self, &Cr3::read().0, |active_table, p4_table: &mut x86PageTable| {
let backup = p4_table[0o777].clone();
// overwrite recursive mapping
p4_table[0o777].set_frame(table.p4_frame.clone(), EF::PRESENT | EF::WRITABLE);
tlb::flush_all();
// execute f in the new context
f(active_table);
// restore recursive mapping to original p4 table
p4_table[0o777] = backup;
tlb::flush_all();
});
}
pub fn map_to(&mut self, page: Page, frame: Frame) -> &mut PageEntry {
self.map(page.start_address().as_u64() as usize, frame.start_address().as_u64() as usize)
fn with_temporary_map(&mut self, frame: &Frame, f: impl FnOnce(&mut ActivePageTable, &mut x86PageTable)) {
// Create a temporary page
let page = Page::of_addr(0xcafebabe);
assert!(self.0.translate_page(page).is_none(), "temporary page is already mapped");
// Map it to table
self.map(page.start_address().as_u64() as usize, frame.start_address().as_u64() as usize);
// Call f
let table = unsafe { &mut *page.start_address().as_mut_ptr() };
f(self, table);
// Unmap the page
self.unmap(0xcafebabe);
}
}
@ -150,58 +152,112 @@ impl PageEntry {
}
#[derive(Debug)]
pub struct InactivePageTable {
pub struct InactivePageTable0 {
p4_frame: Frame,
}
impl InactivePageTable {
pub fn new(frame: Frame, active_table: &mut ActivePageTable) -> InactivePageTable {
with_temporary_map(active_table, &frame, |_, table: &mut x86PageTable| {
impl InactivePageTable for InactivePageTable0 {
type Active = ActivePageTable;
fn new() -> Self {
let mut pt = Self::new_bare();
pt.map_kernel();
pt
}
fn new_bare() -> Self {
let frame = Self::alloc_frame().map(|target| Frame::of_addr(target))
.expect("failed to allocate frame");
active_table().with_temporary_map(&frame, |_, table: &mut x86PageTable| {
table.zero();
// set up recursive mapping for the table
table[511].set_frame(frame.clone(), EF::PRESENT | EF::WRITABLE);
});
InactivePageTable { p4_frame: frame }
InactivePageTable0 { p4_frame: frame }
}
pub fn map_kernel(&mut self, active_table: &mut ActivePageTable) {
let mut table = unsafe { &mut *(0xffffffff_fffff000 as *mut x86PageTable) };
let e510 = table[510].clone();
let e509 = table[509].clone();
active_table.with(self, |pt: &mut ActivePageTable| {
table[510] = e510;
table[509] = e509;
fn edit(&mut self, f: impl FnOnce(&mut Self::Active)) {
active_table().with_temporary_map(&Cr3::read().0, |active_table, p4_table: &mut x86PageTable| {
let backup = p4_table[0o777].clone();
// overwrite recursive mapping
p4_table[0o777].set_frame(self.p4_frame.clone(), EF::PRESENT | EF::WRITABLE);
tlb::flush_all();
// execute f in the new context
f(active_table);
// restore recursive mapping to original p4 table
p4_table[0o777] = backup;
tlb::flush_all();
});
}
pub fn switch(&self) {
unsafe fn activate(&self) {
let old_frame = Cr3::read().0;
let new_frame = self.p4_frame.clone();
debug!("switch table {:?} -> {:?}", old_frame, new_frame);
if old_frame != new_frame {
Cr3::write(new_frame, Cr3Flags::empty());
}
}
unsafe fn with(&self, f: impl FnOnce()) {
let old_frame = Cr3::read().0;
let new_frame = self.p4_frame.clone();
debug!("switch table {:?} -> {:?}", old_frame, new_frame);
if old_frame != new_frame {
unsafe { Cr3::write(new_frame, Cr3Flags::empty()); }
Cr3::write(new_frame, Cr3Flags::empty());
}
f();
debug!("switch table {:?} -> {:?}", new_frame, old_frame);
if old_frame != new_frame {
Cr3::write(old_frame, Cr3Flags::empty());
}
}
pub unsafe fn from_cr3() -> Self {
InactivePageTable { p4_frame: Cr3::read().0 }
fn alloc_frame() -> Option<usize> {
alloc_frame()
}
fn dealloc_frame(target: usize) {
dealloc_frame(target)
}
fn alloc_stack(size_in_pages: usize) -> Stack {
alloc_stack(size_in_pages)
}
}
impl InactivePageTable0 {
fn map_kernel(&mut self) {
let mut table = unsafe { &mut *(0xffffffff_fffff000 as *mut x86PageTable) };
let e510 = table[510].clone();
let e509 = table[509].clone();
self.edit(|_| {
table[510] = e510;
table[509] = e509;
});
}
}
impl Drop for InactivePageTable {
impl Drop for InactivePageTable0 {
fn drop(&mut self) {
info!("PageTable dropping: {:?}", self);
dealloc_frame(self.p4_frame.clone());
Self::dealloc_frame(self.p4_frame.start_address().as_u64() as usize);
}
}
struct FrameAllocatorForX86;
impl FrameAllocator<Size4KiB> for FrameAllocatorForX86 {
fn alloc(&mut self) -> Option<Frame> {
alloc_frame().map(|addr| Frame::of_addr(addr))
}
}
fn with_temporary_map(active_table: &mut ActivePageTable, frame: &Frame, f: impl FnOnce(&mut ActivePageTable, &mut x86PageTable)) {
// Create a temporary page
let page = Page::of_addr(0xcafebabe);
assert!(active_table.0.translate_page(page).is_none(), "temporary page is already mapped");
// Map it to table
active_table.map_to(page, frame.clone());
// Call f
let table = unsafe { &mut *page.start_address().as_mut_ptr() };
f(active_table, table);
// Unmap the page
active_table.unmap(0xcafebabe);
impl FrameDeallocator<Size4KiB> for FrameAllocatorForX86 {
fn dealloc(&mut self, frame: Frame) {
dealloc_frame(frame.start_address().as_u64() as usize);
}
}

@ -9,12 +9,12 @@ const ENTRYOTHER_ADDR: u32 = 0x7000;
pub fn start_other_cores(acpi: &AcpiResult, ms: &mut MemorySet) {
use consts::KERNEL_OFFSET;
ms.push(MemoryArea::new_identity(ENTRYOTHER_ADDR as usize, ENTRYOTHER_ADDR as usize + 1, MemoryAttr::default().execute(), "entry_other.text"));
ms.push(MemoryArea::new_kernel(KERNEL_OFFSET, KERNEL_OFFSET + 1, MemoryAttr::default(), "entry_other.ctrl"));
ms.push(MemoryArea::new_physical(0, 4096, KERNEL_OFFSET, MemoryAttr::default(), "entry_other.ctrl"));
let args = unsafe { &mut *(0x8000 as *mut EntryArgs).offset(-1) };
for i in 1 .. acpi.cpu_num {
let apic_id = acpi.cpu_acpi_ids[i as usize];
let ms = MemorySet::new(7);
let ms = MemorySet::new();
*args = EntryArgs {
kstack: ms.kstack_top() as u64,
page_table: Cr3::read().0.start_address().as_u64() as u32,

@ -145,7 +145,7 @@ pub extern "C" fn other_main() -> ! {
arch::driver::apic::other_init();
let cpu_id = arch::driver::apic::lapic_id();
let ms = unsafe { arch::smp::notify_started(cpu_id) };
ms.switch();
unsafe { ms.activate(); }
println!("Hello world! from CPU {}!", arch::driver::apic::lapic_id());
// unsafe{ let a = *(0xdeadbeaf as *const u8); } // Page fault
loop {}

@ -1,28 +0,0 @@
use consts::{KERNEL_OFFSET, KERNEL_SIZE};
pub use x86_64::PhysAddr;
pub type VirtAddr = usize;
pub trait FromToVirtualAddress {
fn get(&self) -> usize;
fn to_identity_virtual(&self) -> VirtAddr;
fn to_kernel_virtual(&self) -> VirtAddr;
fn from_kernel_virtual(addr: VirtAddr) -> Self;
}
impl FromToVirtualAddress for PhysAddr {
fn get(&self) -> usize {
self.as_u64() as usize
}
fn to_identity_virtual(&self) -> VirtAddr {
self.get()
}
fn to_kernel_virtual(&self) -> VirtAddr {
assert!(self.get() < KERNEL_SIZE);
self.get() + KERNEL_OFFSET
}
fn from_kernel_virtual(addr: VirtAddr) -> Self {
assert!(addr >= KERNEL_OFFSET && addr < KERNEL_OFFSET + KERNEL_SIZE);
PhysAddr::new((addr - KERNEL_OFFSET) as u64)
}
}

@ -3,40 +3,33 @@ use bit_allocator::{BitAlloc, BitAlloc64K};
use consts::KERNEL_OFFSET;
use multiboot2::{ElfSection, ElfSectionFlags, ElfSectionsTag};
use multiboot2::BootInformation;
pub use self::address::*;
pub use self::memory_set::*;
pub use self::stack_allocator::*;
use spin::{Mutex, MutexGuard};
use super::HEAP_ALLOCATOR;
use ucore_memory::paging::PageTable;
use ucore_memory::{*, paging::PageTable, cow::CowExt};
pub use ucore_memory::memory_set::{MemoryAttr, MemoryArea, MemorySet as MemorySet_, Stack};
mod memory_set;
mod stack_allocator;
mod address;
pub type MemorySet = MemorySet_<InactivePageTable0>;
pub const PAGE_SIZE: usize = 1 << 12;
mod stack_allocator;
lazy_static! {
static ref FRAME_ALLOCATOR: Mutex<BitAlloc64K> = Mutex::new(BitAlloc64K::default());
}
static STACK_ALLOCATOR: Mutex<Option<StackAllocator>> = Mutex::new(None);
pub fn alloc_frame() -> Frame {
let frame = frame_allocator().alloc().expect("no more frame");
trace!("alloc: {:?}", frame);
frame
pub fn alloc_frame() -> Option<usize> {
FRAME_ALLOCATOR.lock().alloc().map(|id| id * PAGE_SIZE)
}
pub fn dealloc_frame(frame: Frame) {
trace!("dealloc: {:?}", frame);
frame_allocator().dealloc(frame);
pub fn dealloc_frame(target: usize) {
FRAME_ALLOCATOR.lock().dealloc(target / PAGE_SIZE);
}
fn alloc_stack(size_in_pages: usize) -> Stack {
let mut active_table = active_table();
pub fn alloc_stack(size_in_pages: usize) -> Stack {
STACK_ALLOCATOR.lock()
.as_mut().expect("stack allocator is not initialized")
.alloc_stack(&mut active_table, size_in_pages).expect("no more stack")
.alloc_stack(size_in_pages).expect("no more stack")
}
lazy_static! {
@ -46,19 +39,15 @@ lazy_static! {
}
/// The only way to get active page table
fn active_table() -> MutexGuard<'static, CowExt<ActivePageTable>> {
pub fn active_table() -> MutexGuard<'static, CowExt<ActivePageTable>> {
ACTIVE_TABLE.lock()
}
pub fn frame_allocator() -> BitAllocGuard {
BitAllocGuard(FRAME_ALLOCATOR.lock())
}
// Return true to continue, false to halt
pub fn page_fault_handler(addr: VirtAddr) -> bool {
pub fn page_fault_handler(addr: usize) -> bool {
// Handle copy on write
unsafe { ACTIVE_TABLE.force_unlock(); }
active_table().page_fault_handler(addr, || alloc_frame().start_address().as_u64() as usize)
active_table().page_fault_handler(addr, || alloc_frame().unwrap())
}
pub fn init(boot_info: BootInformation) -> MemorySet {
@ -75,6 +64,7 @@ pub fn init(boot_info: BootInformation) -> MemorySet {
unsafe { HEAP_ALLOCATOR.lock().init(KERNEL_HEAP_OFFSET, KERNEL_HEAP_SIZE); }
*STACK_ALLOCATOR.lock() = Some({
use ucore_memory::Page;
let stack_alloc_range = Page::range_of(KERNEL_HEAP_OFFSET + KERNEL_HEAP_SIZE,
KERNEL_HEAP_OFFSET + KERNEL_HEAP_SIZE + 0x1000000);
stack_allocator::StackAllocator::new(stack_alloc_range)
@ -83,20 +73,6 @@ pub fn init(boot_info: BootInformation) -> MemorySet {
kernel_memory
}
pub struct BitAllocGuard(MutexGuard<'static, BitAlloc64K>);
impl FrameAllocator<Size4KiB> for BitAllocGuard {
fn alloc(&mut self) -> Option<Frame> {
self.0.alloc().map(|x| Frame::of_addr(x * PAGE_SIZE))
}
}
impl FrameDeallocator<Size4KiB> for BitAllocGuard {
fn dealloc(&mut self, frame: Frame) {
self.0.dealloc(frame.start_address().as_u64() as usize / PAGE_SIZE);
}
}
fn init_frame_allocator(boot_info: &BootInformation) {
let memory_areas = boot_info.memory_map_tag().expect("Memory map tag required")
.memory_areas();
@ -132,72 +108,61 @@ fn init_frame_allocator(boot_info: &BootInformation) {
}
fn remap_the_kernel(boot_info: BootInformation) -> MemorySet {
let mut memory_set = MemorySet::from(boot_info.elf_sections_tag().unwrap());
extern { fn stack_bottom(); }
let stack_bottom = stack_bottom as usize + KERNEL_OFFSET;
let kstack = Stack {
top: stack_bottom + 8 * PAGE_SIZE,
bottom: stack_bottom + 1 * PAGE_SIZE,
};
let mut memory_set = memory_set_from(boot_info.elf_sections_tag().unwrap(), kstack);
use consts::{KERNEL_OFFSET, KERNEL_HEAP_OFFSET, KERNEL_HEAP_SIZE};
memory_set.push(MemoryArea::new_kernel(KERNEL_OFFSET + 0xb8000, KERNEL_OFFSET + 0xb9000, MemoryAttr::default(), "VGA"));
memory_set.push(MemoryArea::new_kernel(KERNEL_OFFSET + 0xfee00000, KERNEL_OFFSET + 0xfee01000, MemoryAttr::default(), "LAPIC"));
memory_set.push(MemoryArea::new_physical(0xb8000, 0xb9000, KERNEL_OFFSET, MemoryAttr::default(), "VGA"));
memory_set.push(MemoryArea::new_physical(0xfee00000, 0xfee01000, KERNEL_OFFSET, MemoryAttr::default(), "LAPIC"));
memory_set.push(MemoryArea::new(KERNEL_HEAP_OFFSET, KERNEL_HEAP_OFFSET + KERNEL_HEAP_SIZE, MemoryAttr::default(), "kernel_heap"));
debug!("{:#x?}", memory_set);
memory_set.switch();
unsafe { memory_set.activate(); }
info!("NEW TABLE!!!");
let kstack = get_init_kstack_and_set_guard_page();
memory_set.set_kstack(kstack);
memory_set
}
fn get_init_kstack_and_set_guard_page() -> Stack {
assert_has_not_been_called!();
extern { fn stack_bottom(); }
let stack_bottom = PhysAddr::new(stack_bottom as u64).to_kernel_virtual();
// turn the stack bottom into a guard page
active_table().unmap(stack_bottom);
debug!("guard page at {:?}", stack_bottom);
Stack::new(stack_bottom + 8 * PAGE_SIZE, stack_bottom + 1 * PAGE_SIZE)
memory_set
}
impl From<ElfSectionsTag> for MemorySet {
fn from(sections: ElfSectionsTag) -> Self {
assert_has_not_been_called!();
// WARNING: must ensure it's large enough
static mut SPACE: [u8; 0x1000] = [0; 0x1000];
let mut set = unsafe { MemorySet::new_from_raw_space(&mut SPACE) };
for section in sections.sections().filter(|s| s.is_allocated()) {
set.push(MemoryArea::from(section));
}
set
fn memory_set_from(sections: ElfSectionsTag, kstack: Stack) -> MemorySet {
assert_has_not_been_called!();
// WARNING: must ensure it's large enough
static mut SPACE: [u8; 0x1000] = [0; 0x1000];
let mut set = unsafe { MemorySet::new_from_raw_space(&mut SPACE, kstack) };
for section in sections.sections().filter(|s| s.is_allocated()) {
set.push(memory_area_from(section));
}
set
}
impl From<ElfSection> for MemoryArea {
fn from(section: ElfSection) -> Self {
let mut start_addr = section.start_address() as usize;
let mut end_addr = section.end_address() as usize;
assert_eq!(start_addr % PAGE_SIZE, 0, "sections need to be page aligned");
let name = unsafe { &*(section.name() as *const str) };
if start_addr < KERNEL_OFFSET {
start_addr += KERNEL_OFFSET;
end_addr += KERNEL_OFFSET;
}
MemoryArea::new_kernel(start_addr, end_addr, MemoryAttr::from(section.flags()), name)
fn memory_area_from(section: ElfSection) -> MemoryArea {
let mut start_addr = section.start_address() as usize;
let mut end_addr = section.end_address() as usize;
assert_eq!(start_addr % PAGE_SIZE, 0, "sections need to be page aligned");
let name = unsafe { &*(section.name() as *const str) };
if start_addr >= KERNEL_OFFSET {
start_addr -= KERNEL_OFFSET;
end_addr -= KERNEL_OFFSET;
}
MemoryArea::new_physical(start_addr, end_addr, KERNEL_OFFSET, memory_attr_from(section.flags()), name)
}
impl From<ElfSectionFlags> for MemoryAttr {
fn from(elf_flags: ElfSectionFlags) -> Self {
let mut flags = MemoryAttr::default();
fn memory_attr_from(elf_flags: ElfSectionFlags) -> MemoryAttr {
let mut flags = MemoryAttr::default();
if !elf_flags.contains(ElfSectionFlags::ALLOCATED) { flags = flags.hide(); }
if !elf_flags.contains(ElfSectionFlags::WRITABLE) { flags = flags.readonly(); }
if elf_flags.contains(ElfSectionFlags::EXECUTABLE) { flags = flags.execute(); }
flags
}
if !elf_flags.contains(ElfSectionFlags::ALLOCATED) { flags = flags.hide(); }
if !elf_flags.contains(ElfSectionFlags::WRITABLE) { flags = flags.readonly(); }
if elf_flags.contains(ElfSectionFlags::EXECUTABLE) { flags = flags.execute(); }
flags
}
pub mod test {

@ -1,5 +1,7 @@
use memory::PAGE_SIZE;
use super::*;
use ucore_memory::*;
use ucore_memory::paging::PageTable;
use ucore_memory::memory_set::Stack;
use memory::{alloc_frame, active_table};
// TODO: use BitAllocator & alloc fixed size stack
pub struct StackAllocator {
@ -13,8 +15,7 @@ impl StackAllocator {
}
impl StackAllocator {
pub fn alloc_stack(&mut self, active_table: &mut ActivePageTable,
size_in_pages: usize) -> Option<Stack> {
pub fn alloc_stack(&mut self, size_in_pages: usize) -> Option<Stack> {
if size_in_pages == 0 {
return None; /* a zero sized stack makes no sense */
}
@ -39,46 +40,19 @@ impl StackAllocator {
self.range = range;
// map stack pages to physical frames
for page in Page::range_inclusive(start, end) {
let frame = alloc_frame();
active_table.map_to(page, frame);
for page in Page::range_of(start.start_address(), end.start_address() + PAGE_SIZE) {
let frame = alloc_frame().unwrap();
active_table().map(page.start_address(), frame);
}
// create a new stack
let top_of_stack = end.start_address() + PAGE_SIZE;
Some(Stack::new(top_of_stack.as_u64() as usize, start.start_address().as_u64() as usize))
Some(Stack {
top: top_of_stack,
bottom: start.start_address(),
})
}
_ => None, /* not enough pages */
}
}
}
#[derive(Debug)]
pub struct Stack {
top: usize,
bottom: usize,
}
impl Stack {
pub(super) fn new(top: usize, bottom: usize) -> Stack {
assert!(top > bottom);
Stack {
top,
bottom,
}
}
pub fn top(&self) -> usize {
self.top
}
pub fn bottom(&self) -> usize {
self.bottom
}
}
impl Drop for Stack {
fn drop(&mut self) {
warn!("stack leak: {:x?}", self);
}
}

@ -29,7 +29,7 @@ pub enum Status {
impl Process {
/// Make a new kernel thread
pub fn new(name: &str, entry: extern fn(usize) -> !, arg: usize) -> Self {
let ms = MemorySet::new(7);
let ms = MemorySet::new();
let data = InitStack::new_kernel_thread(entry, arg, ms.kstack_top());
let context = unsafe { data.push_at(ms.kstack_top()) };
@ -78,7 +78,7 @@ impl Process {
};
// Make page table
let mut memory_set = MemorySet::from(&elf);
let mut memory_set = memory_set_from(&elf);
memory_set.push(MemoryArea::new(user_stack_buttom, user_stack_top, MemoryAttr::default().user(), "user_stack"));
trace!("{:#x?}", memory_set);
@ -88,24 +88,26 @@ impl Process {
};
// Temporary switch to it, in order to copy data
memory_set.with(|| {
for ph in elf.program_iter() {
let (virt_addr, offset, file_size) = match ph {
ProgramHeader::Ph32(ph) => (ph.virtual_addr as usize, ph.offset as usize, ph.file_size as usize),
ProgramHeader::Ph64(ph) => (ph.virtual_addr as usize, ph.offset as usize, ph.file_size as usize),
};
use core::slice;
let target = unsafe { slice::from_raw_parts_mut(virt_addr as *mut u8, file_size) };
target.copy_from_slice(&data[offset..offset + file_size]);
}
if is32 {
unsafe {
// TODO: full argc & argv
*(user_stack_top as *mut u32).offset(-1) = 0; // argv
*(user_stack_top as *mut u32).offset(-2) = 0; // argc
unsafe {
memory_set.with(|| {
for ph in elf.program_iter() {
let (virt_addr, offset, file_size) = match ph {
ProgramHeader::Ph32(ph) => (ph.virtual_addr as usize, ph.offset as usize, ph.file_size as usize),
ProgramHeader::Ph64(ph) => (ph.virtual_addr as usize, ph.offset as usize, ph.file_size as usize),
};
use core::slice;
let target = unsafe { slice::from_raw_parts_mut(virt_addr as *mut u8, file_size) };
target.copy_from_slice(&data[offset..offset + file_size]);
}
}
});
if is32 {
unsafe {
// TODO: full argc & argv
*(user_stack_top as *mut u32).offset(-1) = 0; // argv
*(user_stack_top as *mut u32).offset(-2) = 0; // argc
}
}
});
}
// Allocate kernel stack and push trap frame
@ -128,7 +130,7 @@ impl Process {
assert!(self.is_user);
// Clone memory set, make a new page table
let memory_set = self.memory_set.clone(7);
let memory_set = self.memory_set.clone();
// Copy data to temp space
use alloc::Vec;
@ -137,11 +139,13 @@ impl Process {
}).collect();
// Temporary switch to it, in order to copy data
memory_set.with(|| {
for (area, data) in memory_set.iter().zip(datas.iter()) {
unsafe { area.as_slice_mut() }.copy_from_slice(data.as_slice())
}
});
unsafe {
memory_set.with(|| {
for (area, data) in memory_set.iter().zip(datas.iter()) {
unsafe { area.as_slice_mut() }.copy_from_slice(data.as_slice())
}
});
}
// Allocate kernel stack and push trap frame
let data = InitStack::new_fork(tf);
@ -166,25 +170,21 @@ impl Process {
}
}
impl<'a> From<&'a ElfFile<'a>> for MemorySet {
fn from(elf: &'a ElfFile<'a>) -> Self {
let mut set = MemorySet::new(7);
for ph in elf.program_iter() {
let (virt_addr, mem_size, flags) = match ph {
ProgramHeader::Ph32(ph) => (ph.virtual_addr as usize, ph.mem_size as usize, ph.flags),
ProgramHeader::Ph64(ph) => (ph.virtual_addr as usize, ph.mem_size as usize, ph.flags),
};
set.push(MemoryArea::new(virt_addr, virt_addr + mem_size, MemoryAttr::from(flags), ""));
}
set
fn memory_set_from<'a>(elf: &'a ElfFile<'a>) -> MemorySet {
let mut set = MemorySet::new();
for ph in elf.program_iter() {
let (virt_addr, mem_size, flags) = match ph {
ProgramHeader::Ph32(ph) => (ph.virtual_addr as usize, ph.mem_size as usize, ph.flags),
ProgramHeader::Ph64(ph) => (ph.virtual_addr as usize, ph.mem_size as usize, ph.flags),
};
set.push(MemoryArea::new(virt_addr, virt_addr + mem_size, memory_attr_from(flags), ""));
}
set
}
impl From<Flags> for MemoryAttr {
fn from(elf_flags: Flags) -> Self {
let mut flags = MemoryAttr::default().user();
// TODO: handle readonly
if elf_flags.is_execute() { flags = flags.execute(); }
flags
}
fn memory_attr_from(elf_flags: Flags) -> MemoryAttr {
let mut flags = MemoryAttr::default().user();
// TODO: handle readonly
if elf_flags.is_execute() { flags = flags.execute(); }
flags
}

@ -131,11 +131,10 @@ impl Processor {
to.status = Status::Running;
self.scheduler.remove(pid);
// switch page table
to.memory_set.switch();
info!("switch from {} to {}\n rsp: ??? -> {:?}", pid0, pid, to.context);
unsafe {
// switch page table
to.memory_set.activate();
// FIXME: safely pass MutexGuard
use core::mem::forget;
super::PROCESSOR.try().unwrap().force_unlock();

Loading…
Cancel
Save