|
|
@ -3,40 +3,33 @@ use bit_allocator::{BitAlloc, BitAlloc64K};
|
|
|
|
use consts::KERNEL_OFFSET;
|
|
|
|
use consts::KERNEL_OFFSET;
|
|
|
|
use multiboot2::{ElfSection, ElfSectionFlags, ElfSectionsTag};
|
|
|
|
use multiboot2::{ElfSection, ElfSectionFlags, ElfSectionsTag};
|
|
|
|
use multiboot2::BootInformation;
|
|
|
|
use multiboot2::BootInformation;
|
|
|
|
pub use self::address::*;
|
|
|
|
|
|
|
|
pub use self::memory_set::*;
|
|
|
|
|
|
|
|
pub use self::stack_allocator::*;
|
|
|
|
pub use self::stack_allocator::*;
|
|
|
|
use spin::{Mutex, MutexGuard};
|
|
|
|
use spin::{Mutex, MutexGuard};
|
|
|
|
use super::HEAP_ALLOCATOR;
|
|
|
|
use super::HEAP_ALLOCATOR;
|
|
|
|
use ucore_memory::paging::PageTable;
|
|
|
|
use ucore_memory::{*, paging::PageTable, cow::CowExt};
|
|
|
|
|
|
|
|
pub use ucore_memory::memory_set::{MemoryAttr, MemoryArea, MemorySet as MemorySet_, Stack};
|
|
|
|
|
|
|
|
|
|
|
|
mod memory_set;
|
|
|
|
pub type MemorySet = MemorySet_<InactivePageTable0>;
|
|
|
|
mod stack_allocator;
|
|
|
|
|
|
|
|
mod address;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
pub const PAGE_SIZE: usize = 1 << 12;
|
|
|
|
mod stack_allocator;
|
|
|
|
|
|
|
|
|
|
|
|
lazy_static! {
|
|
|
|
lazy_static! {
|
|
|
|
static ref FRAME_ALLOCATOR: Mutex<BitAlloc64K> = Mutex::new(BitAlloc64K::default());
|
|
|
|
static ref FRAME_ALLOCATOR: Mutex<BitAlloc64K> = Mutex::new(BitAlloc64K::default());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
static STACK_ALLOCATOR: Mutex<Option<StackAllocator>> = Mutex::new(None);
|
|
|
|
static STACK_ALLOCATOR: Mutex<Option<StackAllocator>> = Mutex::new(None);
|
|
|
|
|
|
|
|
|
|
|
|
pub fn alloc_frame() -> Frame {
|
|
|
|
pub fn alloc_frame() -> Option<usize> {
|
|
|
|
let frame = frame_allocator().alloc().expect("no more frame");
|
|
|
|
FRAME_ALLOCATOR.lock().alloc().map(|id| id * PAGE_SIZE)
|
|
|
|
trace!("alloc: {:?}", frame);
|
|
|
|
|
|
|
|
frame
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
pub fn dealloc_frame(frame: Frame) {
|
|
|
|
pub fn dealloc_frame(target: usize) {
|
|
|
|
trace!("dealloc: {:?}", frame);
|
|
|
|
FRAME_ALLOCATOR.lock().dealloc(target / PAGE_SIZE);
|
|
|
|
frame_allocator().dealloc(frame);
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
fn alloc_stack(size_in_pages: usize) -> Stack {
|
|
|
|
pub fn alloc_stack(size_in_pages: usize) -> Stack {
|
|
|
|
let mut active_table = active_table();
|
|
|
|
|
|
|
|
STACK_ALLOCATOR.lock()
|
|
|
|
STACK_ALLOCATOR.lock()
|
|
|
|
.as_mut().expect("stack allocator is not initialized")
|
|
|
|
.as_mut().expect("stack allocator is not initialized")
|
|
|
|
.alloc_stack(&mut active_table, size_in_pages).expect("no more stack")
|
|
|
|
.alloc_stack(size_in_pages).expect("no more stack")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
lazy_static! {
|
|
|
|
lazy_static! {
|
|
|
@ -46,19 +39,15 @@ lazy_static! {
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/// The only way to get active page table
|
|
|
|
/// The only way to get active page table
|
|
|
|
fn active_table() -> MutexGuard<'static, CowExt<ActivePageTable>> {
|
|
|
|
pub fn active_table() -> MutexGuard<'static, CowExt<ActivePageTable>> {
|
|
|
|
ACTIVE_TABLE.lock()
|
|
|
|
ACTIVE_TABLE.lock()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
pub fn frame_allocator() -> BitAllocGuard {
|
|
|
|
|
|
|
|
BitAllocGuard(FRAME_ALLOCATOR.lock())
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Return true to continue, false to halt
|
|
|
|
// Return true to continue, false to halt
|
|
|
|
pub fn page_fault_handler(addr: VirtAddr) -> bool {
|
|
|
|
pub fn page_fault_handler(addr: usize) -> bool {
|
|
|
|
// Handle copy on write
|
|
|
|
// Handle copy on write
|
|
|
|
unsafe { ACTIVE_TABLE.force_unlock(); }
|
|
|
|
unsafe { ACTIVE_TABLE.force_unlock(); }
|
|
|
|
active_table().page_fault_handler(addr, || alloc_frame().start_address().as_u64() as usize)
|
|
|
|
active_table().page_fault_handler(addr, || alloc_frame().unwrap())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
pub fn init(boot_info: BootInformation) -> MemorySet {
|
|
|
|
pub fn init(boot_info: BootInformation) -> MemorySet {
|
|
|
@ -75,6 +64,7 @@ pub fn init(boot_info: BootInformation) -> MemorySet {
|
|
|
|
unsafe { HEAP_ALLOCATOR.lock().init(KERNEL_HEAP_OFFSET, KERNEL_HEAP_SIZE); }
|
|
|
|
unsafe { HEAP_ALLOCATOR.lock().init(KERNEL_HEAP_OFFSET, KERNEL_HEAP_SIZE); }
|
|
|
|
|
|
|
|
|
|
|
|
*STACK_ALLOCATOR.lock() = Some({
|
|
|
|
*STACK_ALLOCATOR.lock() = Some({
|
|
|
|
|
|
|
|
use ucore_memory::Page;
|
|
|
|
let stack_alloc_range = Page::range_of(KERNEL_HEAP_OFFSET + KERNEL_HEAP_SIZE,
|
|
|
|
let stack_alloc_range = Page::range_of(KERNEL_HEAP_OFFSET + KERNEL_HEAP_SIZE,
|
|
|
|
KERNEL_HEAP_OFFSET + KERNEL_HEAP_SIZE + 0x1000000);
|
|
|
|
KERNEL_HEAP_OFFSET + KERNEL_HEAP_SIZE + 0x1000000);
|
|
|
|
stack_allocator::StackAllocator::new(stack_alloc_range)
|
|
|
|
stack_allocator::StackAllocator::new(stack_alloc_range)
|
|
|
@ -83,20 +73,6 @@ pub fn init(boot_info: BootInformation) -> MemorySet {
|
|
|
|
kernel_memory
|
|
|
|
kernel_memory
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
pub struct BitAllocGuard(MutexGuard<'static, BitAlloc64K>);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
impl FrameAllocator<Size4KiB> for BitAllocGuard {
|
|
|
|
|
|
|
|
fn alloc(&mut self) -> Option<Frame> {
|
|
|
|
|
|
|
|
self.0.alloc().map(|x| Frame::of_addr(x * PAGE_SIZE))
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
impl FrameDeallocator<Size4KiB> for BitAllocGuard {
|
|
|
|
|
|
|
|
fn dealloc(&mut self, frame: Frame) {
|
|
|
|
|
|
|
|
self.0.dealloc(frame.start_address().as_u64() as usize / PAGE_SIZE);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
fn init_frame_allocator(boot_info: &BootInformation) {
|
|
|
|
fn init_frame_allocator(boot_info: &BootInformation) {
|
|
|
|
let memory_areas = boot_info.memory_map_tag().expect("Memory map tag required")
|
|
|
|
let memory_areas = boot_info.memory_map_tag().expect("Memory map tag required")
|
|
|
|
.memory_areas();
|
|
|
|
.memory_areas();
|
|
|
@ -132,72 +108,61 @@ fn init_frame_allocator(boot_info: &BootInformation) {
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
fn remap_the_kernel(boot_info: BootInformation) -> MemorySet {
|
|
|
|
fn remap_the_kernel(boot_info: BootInformation) -> MemorySet {
|
|
|
|
let mut memory_set = MemorySet::from(boot_info.elf_sections_tag().unwrap());
|
|
|
|
extern { fn stack_bottom(); }
|
|
|
|
|
|
|
|
let stack_bottom = stack_bottom as usize + KERNEL_OFFSET;
|
|
|
|
|
|
|
|
let kstack = Stack {
|
|
|
|
|
|
|
|
top: stack_bottom + 8 * PAGE_SIZE,
|
|
|
|
|
|
|
|
bottom: stack_bottom + 1 * PAGE_SIZE,
|
|
|
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
let mut memory_set = memory_set_from(boot_info.elf_sections_tag().unwrap(), kstack);
|
|
|
|
|
|
|
|
|
|
|
|
use consts::{KERNEL_OFFSET, KERNEL_HEAP_OFFSET, KERNEL_HEAP_SIZE};
|
|
|
|
use consts::{KERNEL_OFFSET, KERNEL_HEAP_OFFSET, KERNEL_HEAP_SIZE};
|
|
|
|
memory_set.push(MemoryArea::new_kernel(KERNEL_OFFSET + 0xb8000, KERNEL_OFFSET + 0xb9000, MemoryAttr::default(), "VGA"));
|
|
|
|
memory_set.push(MemoryArea::new_physical(0xb8000, 0xb9000, KERNEL_OFFSET, MemoryAttr::default(), "VGA"));
|
|
|
|
memory_set.push(MemoryArea::new_kernel(KERNEL_OFFSET + 0xfee00000, KERNEL_OFFSET + 0xfee01000, MemoryAttr::default(), "LAPIC"));
|
|
|
|
memory_set.push(MemoryArea::new_physical(0xfee00000, 0xfee01000, KERNEL_OFFSET, MemoryAttr::default(), "LAPIC"));
|
|
|
|
memory_set.push(MemoryArea::new(KERNEL_HEAP_OFFSET, KERNEL_HEAP_OFFSET + KERNEL_HEAP_SIZE, MemoryAttr::default(), "kernel_heap"));
|
|
|
|
memory_set.push(MemoryArea::new(KERNEL_HEAP_OFFSET, KERNEL_HEAP_OFFSET + KERNEL_HEAP_SIZE, MemoryAttr::default(), "kernel_heap"));
|
|
|
|
debug!("{:#x?}", memory_set);
|
|
|
|
debug!("{:#x?}", memory_set);
|
|
|
|
|
|
|
|
|
|
|
|
memory_set.switch();
|
|
|
|
unsafe { memory_set.activate(); }
|
|
|
|
info!("NEW TABLE!!!");
|
|
|
|
info!("NEW TABLE!!!");
|
|
|
|
|
|
|
|
|
|
|
|
let kstack = get_init_kstack_and_set_guard_page();
|
|
|
|
|
|
|
|
memory_set.set_kstack(kstack);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
memory_set
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
fn get_init_kstack_and_set_guard_page() -> Stack {
|
|
|
|
|
|
|
|
assert_has_not_been_called!();
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
extern { fn stack_bottom(); }
|
|
|
|
|
|
|
|
let stack_bottom = PhysAddr::new(stack_bottom as u64).to_kernel_virtual();
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// turn the stack bottom into a guard page
|
|
|
|
// turn the stack bottom into a guard page
|
|
|
|
active_table().unmap(stack_bottom);
|
|
|
|
active_table().unmap(stack_bottom);
|
|
|
|
debug!("guard page at {:?}", stack_bottom);
|
|
|
|
debug!("guard page at {:?}", stack_bottom);
|
|
|
|
|
|
|
|
|
|
|
|
Stack::new(stack_bottom + 8 * PAGE_SIZE, stack_bottom + 1 * PAGE_SIZE)
|
|
|
|
memory_set
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
impl From<ElfSectionsTag> for MemorySet {
|
|
|
|
fn memory_set_from(sections: ElfSectionsTag, kstack: Stack) -> MemorySet {
|
|
|
|
fn from(sections: ElfSectionsTag) -> Self {
|
|
|
|
assert_has_not_been_called!();
|
|
|
|
assert_has_not_been_called!();
|
|
|
|
// WARNING: must ensure it's large enough
|
|
|
|
// WARNING: must ensure it's large enough
|
|
|
|
static mut SPACE: [u8; 0x1000] = [0; 0x1000];
|
|
|
|
static mut SPACE: [u8; 0x1000] = [0; 0x1000];
|
|
|
|
let mut set = unsafe { MemorySet::new_from_raw_space(&mut SPACE, kstack) };
|
|
|
|
let mut set = unsafe { MemorySet::new_from_raw_space(&mut SPACE) };
|
|
|
|
for section in sections.sections().filter(|s| s.is_allocated()) {
|
|
|
|
for section in sections.sections().filter(|s| s.is_allocated()) {
|
|
|
|
set.push(memory_area_from(section));
|
|
|
|
set.push(MemoryArea::from(section));
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
set
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
set
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
impl From<ElfSection> for MemoryArea {
|
|
|
|
fn memory_area_from(section: ElfSection) -> MemoryArea {
|
|
|
|
fn from(section: ElfSection) -> Self {
|
|
|
|
let mut start_addr = section.start_address() as usize;
|
|
|
|
let mut start_addr = section.start_address() as usize;
|
|
|
|
let mut end_addr = section.end_address() as usize;
|
|
|
|
let mut end_addr = section.end_address() as usize;
|
|
|
|
assert_eq!(start_addr % PAGE_SIZE, 0, "sections need to be page aligned");
|
|
|
|
assert_eq!(start_addr % PAGE_SIZE, 0, "sections need to be page aligned");
|
|
|
|
let name = unsafe { &*(section.name() as *const str) };
|
|
|
|
let name = unsafe { &*(section.name() as *const str) };
|
|
|
|
if start_addr >= KERNEL_OFFSET {
|
|
|
|
if start_addr < KERNEL_OFFSET {
|
|
|
|
start_addr -= KERNEL_OFFSET;
|
|
|
|
start_addr += KERNEL_OFFSET;
|
|
|
|
end_addr -= KERNEL_OFFSET;
|
|
|
|
end_addr += KERNEL_OFFSET;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
MemoryArea::new_kernel(start_addr, end_addr, MemoryAttr::from(section.flags()), name)
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
MemoryArea::new_physical(start_addr, end_addr, KERNEL_OFFSET, memory_attr_from(section.flags()), name)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
impl From<ElfSectionFlags> for MemoryAttr {
|
|
|
|
fn memory_attr_from(elf_flags: ElfSectionFlags) -> MemoryAttr {
|
|
|
|
fn from(elf_flags: ElfSectionFlags) -> Self {
|
|
|
|
let mut flags = MemoryAttr::default();
|
|
|
|
let mut flags = MemoryAttr::default();
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if !elf_flags.contains(ElfSectionFlags::ALLOCATED) { flags = flags.hide(); }
|
|
|
|
if !elf_flags.contains(ElfSectionFlags::ALLOCATED) { flags = flags.hide(); }
|
|
|
|
if !elf_flags.contains(ElfSectionFlags::WRITABLE) { flags = flags.readonly(); }
|
|
|
|
if !elf_flags.contains(ElfSectionFlags::WRITABLE) { flags = flags.readonly(); }
|
|
|
|
if elf_flags.contains(ElfSectionFlags::EXECUTABLE) { flags = flags.execute(); }
|
|
|
|
if elf_flags.contains(ElfSectionFlags::EXECUTABLE) { flags = flags.execute(); }
|
|
|
|
flags
|
|
|
|
flags
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
pub mod test {
|
|
|
|
pub mod test {
|
|
|
|