Allocate kernel stack from heap, remove stack allocator, remove guard page.

toolchain_update
WangRunji 7 years ago
parent 81ff6f13e5
commit 95ab3a2f3b

@ -14,7 +14,7 @@ pub trait InactivePageTable {
fn alloc_frame() -> Option<PhysAddr>;
fn dealloc_frame(target: PhysAddr);
fn alloc_stack(size_in_pages: usize) -> Stack;
fn alloc_stack() -> Stack;
}
/// 一片连续内存空间,有相同的访问权限
@ -139,7 +139,7 @@ impl<T: InactivePageTable> MemorySet<T> {
MemorySet {
areas: Vec::<MemoryArea>::new(),
page_table: T::new(),
kstack: T::alloc_stack(7),
kstack: T::alloc_stack(),
}
}
/// Used for remap_kernel() where heap alloc is unavailable
@ -184,7 +184,7 @@ impl<T: InactivePageTable> MemorySet<T> {
MemorySet {
areas: self.areas.clone(),
page_table,
kstack: T::alloc_stack(7),
kstack: T::alloc_stack(),
}
}
pub fn clear(&mut self) {

@ -1 +1 @@
Subproject commit 48c7d74041c38e4f230df02075ad7519c1bf816d
Subproject commit 08d4a2e96399cef5985a85a035e1ac222f8d5bef

@ -33,6 +33,7 @@ fn init_frame_allocator() {
}
fn remap_the_kernel() {
use consts::{KERNEL_HEAP_OFFSET, KERNEL_HEAP_SIZE};
let kstack = Stack {
top: bootstacktop as usize,
bottom: bootstack as usize + PAGE_SIZE,
@ -43,6 +44,7 @@ fn remap_the_kernel() {
ms.push(MemoryArea::new_identity(sdata as usize, edata as usize, MemoryAttr::default(), "data"));
ms.push(MemoryArea::new_identity(srodata as usize, erodata as usize, MemoryAttr::default().readonly(), "rodata"));
ms.push(MemoryArea::new_identity(sbss as usize, ebss as usize, MemoryAttr::default(), "bss"));
ms.push(MemoryArea::new(KERNEL_HEAP_OFFSET, KERNEL_HEAP_OFFSET + KERNEL_HEAP_SIZE, MemoryAttr::default(), "kernel_heap"));
unsafe { ms.activate(); }
info!("kernel remap end");
}

@ -202,8 +202,8 @@ impl InactivePageTable for InactivePageTable0 {
dealloc_frame(target)
}
fn alloc_stack(size_in_pages: usize) -> Stack {
alloc_stack(size_in_pages)
fn alloc_stack() -> Stack {
alloc_stack()
}
}

@ -1,5 +1,6 @@
global start
global stack_bottom
global stack_top
extern long_mode_start
section .text

@ -54,10 +54,10 @@ fn init_frame_allocator(boot_info: &BootInformation) {
fn remap_the_kernel(boot_info: &BootInformation) -> MemorySet {
extern { fn stack_bottom(); }
let stack_bottom = stack_bottom as usize + KERNEL_OFFSET;
extern { fn stack_top(); }
let kstack = Stack {
top: stack_bottom + 8 * PAGE_SIZE,
bottom: stack_bottom + 1 * PAGE_SIZE,
top: stack_top as usize + KERNEL_OFFSET,
bottom: stack_bottom as usize + PAGE_SIZE + KERNEL_OFFSET,
};
let mut memory_set = memory_set_from(boot_info.elf_sections_tag().unwrap(), kstack);
@ -76,10 +76,6 @@ fn remap_the_kernel(boot_info: &BootInformation) -> MemorySet {
unsafe { memory_set.activate(); }
info!("NEW TABLE!!!");
// turn the stack bottom into a guard page
active_table().unmap(stack_bottom);
debug!("guard page at {:x?}", stack_bottom);
memory_set
}

@ -1,20 +1,17 @@
use ucore_memory::PAGE_SIZE;
use bit_allocator::{BitAlloc, BitAlloc64K};
// Depends on kernel
use memory::{active_table, alloc_frame, alloc_stack, dealloc_frame};
use spin::{Mutex, MutexGuard};
use ucore_memory::cow::CowExt;
use ucore_memory::paging::*;
use ucore_memory::memory_set::*;
use ucore_memory::PAGE_SIZE;
use ucore_memory::paging::*;
use x86_64::instructions::tlb;
use x86_64::PhysAddr;
use x86_64::registers::control::{Cr3, Cr3Flags};
use x86_64::structures::paging::{Mapper, PageTable as x86PageTable, PageTableEntry, PageTableFlags as EF, RecursivePageTable};
use x86_64::structures::paging::{FrameAllocator, FrameDeallocator, Page, PageRange, PhysFrame as Frame, Size4KiB};
use x86_64::ux::u9;
use x86_64::PhysAddr;
use bit_allocator::{BitAlloc, BitAlloc64K};
use spin::{Mutex, MutexGuard};
// Depends on kernel
use memory::{active_table, alloc_frame, dealloc_frame, alloc_stack};
pub trait PageExt {
fn of_addr(address: usize) -> Self;
@ -224,8 +221,8 @@ impl InactivePageTable for InactivePageTable0 {
dealloc_frame(target)
}
fn alloc_stack(size_in_pages: usize) -> Stack {
alloc_stack(size_in_pages)
fn alloc_stack() -> Stack {
alloc_stack()
}
}

@ -19,10 +19,6 @@ mod riscv {
pub const KERNEL_PML4: usize = 0x8040_0000 >> 22;
pub const KERNEL_HEAP_OFFSET: usize = 0x8050_0000;
pub const KERNEL_HEAP_SIZE: usize = 1 * 1024 * 1024;
// 1 MB
pub const KERNEL_STACK_OFFSET: usize = KERNEL_HEAP_OFFSET + KERNEL_HEAP_SIZE;
pub const KERNEL_STACK_SIZE: usize = 64 * 1024;
// 64 KB
pub const MEMORY_OFFSET: usize = 0x8000_0000;
pub const MEMORY_END: usize = 0x8080_0000;
}
@ -56,9 +52,6 @@ mod x86_64 {
/// Size of kernel heap
pub const KERNEL_HEAP_SIZE: usize = 8 * 1024 * 1024; // 8 MB
pub const KERNEL_STACK_OFFSET: usize = KERNEL_HEAP_OFFSET + KERNEL_HEAP_SIZE;
pub const KERNEL_STACK_SIZE: usize = 1 * 1024 * 1024; // 1 MB
pub const MEMORY_OFFSET: usize = 0;
/// Offset to kernel percpu variables

@ -1,22 +1,18 @@
pub use arch::paging::*;
use bit_allocator::{BitAlloc, BitAlloc64K};
use consts::MEMORY_OFFSET;
use self::stack_allocator::*;
use spin::{Mutex, MutexGuard};
use super::HEAP_ALLOCATOR;
use ucore_memory::{*, paging::PageTable};
pub use ucore_memory::memory_set::{MemoryArea, MemoryAttr, MemorySet as MemorySet_, Stack};
#[cfg(target_arch = "x86_64")]
use ucore_memory::paging::CowExt;
use ucore_memory::cow::CowExt;
pub use ucore_memory::memory_set::{MemoryArea, MemoryAttr, MemorySet as MemorySet_, Stack};
pub type MemorySet = MemorySet_<InactivePageTable0>;
mod stack_allocator;
lazy_static! {
pub static ref FRAME_ALLOCATOR: Mutex<BitAlloc64K> = Mutex::new(BitAlloc64K::default());
}
pub static STACK_ALLOCATOR: Mutex<Option<StackAllocator>> = Mutex::new(None);
pub fn alloc_frame() -> Option<usize> {
FRAME_ALLOCATOR.lock().alloc().map(|id| id * PAGE_SIZE + MEMORY_OFFSET)
@ -26,10 +22,16 @@ pub fn dealloc_frame(target: usize) {
FRAME_ALLOCATOR.lock().dealloc((target - MEMORY_OFFSET) / PAGE_SIZE);
}
pub fn alloc_stack(size_in_pages: usize) -> Stack {
STACK_ALLOCATOR.lock()
.as_mut().expect("stack allocator is not initialized")
.alloc_stack(size_in_pages).expect("no more stack")
// alloc from heap
pub fn alloc_stack() -> Stack {
use alloc::boxed::Box;
const STACK_SIZE: usize = 8 * 4096;
#[repr(align(4096))]
struct StackData([u8; STACK_SIZE]);
let data = Box::new(StackData([0; STACK_SIZE]));
let bottom = Box::into_raw(data) as usize;
let top = bottom + STACK_SIZE;
Stack { top, bottom }
}
#[cfg(target_arch = "x86_64")]
@ -71,14 +73,9 @@ pub fn page_fault_handler(addr: usize) -> bool {
}
pub fn init_heap() {
use consts::{KERNEL_HEAP_OFFSET, KERNEL_HEAP_SIZE, KERNEL_STACK_OFFSET, KERNEL_STACK_SIZE};
use consts::{KERNEL_HEAP_OFFSET, KERNEL_HEAP_SIZE};
unsafe { HEAP_ALLOCATOR.lock().init(KERNEL_HEAP_OFFSET, KERNEL_HEAP_SIZE); }
*STACK_ALLOCATOR.lock() = Some({
use ucore_memory::Page;
StackAllocator::new(Page::range_of(KERNEL_STACK_OFFSET, KERNEL_STACK_OFFSET + KERNEL_STACK_SIZE))
});
info!("heap init end");
}
//pub mod test {

@ -1,58 +0,0 @@
use ucore_memory::*;
use ucore_memory::paging::PageTable;
use ucore_memory::memory_set::Stack;
use memory::{alloc_frame, active_table};
// TODO: use BitAllocator & alloc fixed size stack
pub struct StackAllocator {
range: PageRange,
}
impl StackAllocator {
pub fn new(page_range: PageRange) -> StackAllocator {
StackAllocator { range: page_range }
}
}
impl StackAllocator {
pub fn alloc_stack(&mut self, size_in_pages: usize) -> Option<Stack> {
if size_in_pages == 0 {
return None; /* a zero sized stack makes no sense */
}
// clone the range, since we only want to change it on success
let mut range = self.range.clone();
// try to allocate the stack pages and a guard page
let guard_page = range.next();
let stack_start = range.next();
let stack_end = if size_in_pages == 1 {
stack_start
} else {
// choose the (size_in_pages-2)th element, since index
// starts at 0 and we already allocated the start page
range.nth(size_in_pages - 2)
};
match (guard_page, stack_start, stack_end) {
(Some(_), Some(start), Some(end)) => {
// success! write back updated range
self.range = range;
// map stack pages to physical frames
for page in Page::range_of(start.start_address(), end.start_address() + PAGE_SIZE) {
let frame = alloc_frame().unwrap();
active_table().map(page.start_address(), frame);
}
// create a new stack
let top_of_stack = end.start_address() + PAGE_SIZE;
Some(Stack {
top: top_of_stack,
bottom: start.start_address(),
})
}
_ => None, /* not enough pages */
}
}
}
Loading…
Cancel
Save