Make FrameAllocator global!

master
WangRunji 7 years ago
parent 5707dfd00a
commit 5075abc5b0

@ -14,7 +14,7 @@ user_object_files := $(wildcard user/*.o)
qemu_opts := -cdrom $(iso) -smp 4 -serial mon:stdio
features := use_apic
link_user = 1
#link_user = 1
ifdef link_user
features := $(features) link_user_program

@ -40,8 +40,8 @@ impl Mapper {
// address must be 1GiB aligned
assert_eq!(start_frame.start_address().get() % (ENTRY_COUNT * ENTRY_COUNT * PAGE_SIZE), 0);
return Some(Frame::of_addr(
start_frame.start_address().get() +
(page.p2_index() * ENTRY_COUNT + page.p1_index()) * PAGE_SIZE
start_frame.start_address().get() +
(page.p2_index() * ENTRY_COUNT + page.p1_index()) * PAGE_SIZE
));
}
}
@ -63,41 +63,35 @@ impl Mapper {
};
p3.and_then(|p3| p3.next_table(page.p3_index()))
.and_then(|p2| p2.next_table(page.p2_index()))
.and_then(|p1| p1[page.p1_index()].pointed_frame())
.or_else(huge_page)
.and_then(|p2| p2.next_table(page.p2_index()))
.and_then(|p1| p1[page.p1_index()].pointed_frame())
.or_else(huge_page)
}
pub fn map_to<A>(&mut self, page: Page, frame: Frame, flags: EntryFlags,
allocator: &mut A)
where A: FrameAllocator
pub fn map_to(&mut self, page: Page, frame: Frame, flags: EntryFlags)
{
let p4 = self.p4_mut();
let mut p3 = p4.next_table_create(page.p4_index(), allocator);
let mut p2 = p3.next_table_create(page.p3_index(), allocator);
let mut p1 = p2.next_table_create(page.p2_index(), allocator);
let mut p3 = p4.next_table_create(page.p4_index());
let mut p2 = p3.next_table_create(page.p3_index());
let mut p1 = p2.next_table_create(page.p2_index());
assert!(p1[page.p1_index()].is_unused());
// TODO: Remove USER_ACCESSIBLE
p1[page.p1_index()].set(frame, flags | EntryFlags::PRESENT | EntryFlags::USER_ACCESSIBLE);
}
pub fn map<A>(&mut self, page: Page, flags: EntryFlags, allocator: &mut A)
where A: FrameAllocator
pub fn map(&mut self, page: Page, flags: EntryFlags)
{
let frame = allocator.allocate_frame().expect("out of memory");
self.map_to(page, frame, flags, allocator)
self.map_to(page, alloc_frame(), flags)
}
pub fn identity_map<A>(&mut self, frame: Frame, flags: EntryFlags, allocator: &mut A)
where A: FrameAllocator
pub fn identity_map(&mut self, frame: Frame, flags: EntryFlags)
{
let page = Page::of_addr(frame.start_address().to_identity_virtual());
self.map_to(page, frame, flags, allocator)
self.map_to(page, frame, flags)
}
pub fn unmap<A>(&mut self, page: Page, allocator: &mut A)
where A: FrameAllocator
pub fn unmap(&mut self, page: Page)
{
use x86_64::instructions::tlb;
use x86_64::VirtualAddress;
@ -105,23 +99,13 @@ impl Mapper {
assert!(self.translate(page.start_address()).is_some());
let p1 = self.p4_mut()
.next_table_mut(page.p4_index())
.and_then(|p3| p3.next_table_mut(page.p3_index()))
.and_then(|p2| p2.next_table_mut(page.p2_index()))
.expect("mapping code does not support huge pages");
.next_table_mut(page.p4_index())
.and_then(|p3| p3.next_table_mut(page.p3_index()))
.and_then(|p2| p2.next_table_mut(page.p2_index()))
.expect("mapping code does not support huge pages");
let frame = p1[page.p1_index()].pointed_frame().unwrap();
p1[page.p1_index()].set_unused();
tlb::flush(VirtualAddress(page.start_address()));
// TODO free p(1,2,3) table if empty
//allocator.deallocate_frame(frame);
}
}
use core::fmt;
use core::fmt::Debug;
impl Debug for Mapper {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(f, "{:?}", self.p4())
}
}

@ -171,14 +171,4 @@ impl InactivePageTable {
InactivePageTable { p4_frame: frame }
}
}
use core::fmt;
use core::fmt::Debug;
impl Debug for ActivePageTable {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(f, "ActivePageTable:\n")?;
write!(f, "{:?}", &self.mapper)
}
}

@ -1,6 +1,6 @@
use core::marker::PhantomData;
use core::ops::{Index, IndexMut};
use memory::FrameAllocator;
use memory::alloc_frame;
use super::entry::*;
use super::ENTRY_COUNT;
@ -40,16 +40,12 @@ impl<L> Table<L> where L: HierarchicalLevel {
.map(|address| unsafe { &mut *(address as *mut _) })
}
pub fn next_table_create<A>(&mut self,
index: usize,
allocator: &mut A)
-> &mut Table<L::NextLevel>
where A: FrameAllocator
pub fn next_table_create(&mut self, index: usize) -> &mut Table<L::NextLevel>
{
if self.next_table(index).is_none() {
assert!(!self.entries[index].flags().contains(EntryFlags::HUGE_PAGE),
"mapping code does not support huge pages");
let frame = allocator.allocate_frame().expect("no frames available");
let frame = alloc_frame();
// TODO: Remove USER_ACCESSIBLE
self.entries[index].set(frame, EntryFlags::PRESENT | EntryFlags::WRITABLE | EntryFlags::USER_ACCESSIBLE);
self.next_table_mut(index).unwrap().zero();
@ -98,52 +94,4 @@ impl HierarchicalLevel for Level3 {
impl HierarchicalLevel for Level2 {
type NextLevel = Level1;
}
use core::fmt;
use core::fmt::Debug;
impl Debug for Table<Level4> {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
// Ignore the 511th recursive entry
let entries = self.entries.iter().enumerate().filter(|(i, e)| !e.is_unused() && *i != 511usize);
for (i, e) in entries {
write!(f, "{:3X}: {:?}\n", i, e)?;
write!(f, "{:?}", self.next_table(i).unwrap())?;
}
Ok(())
}
}
impl Debug for Table<Level3> {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
let entries = self.entries.iter().enumerate().filter(|(i, e)| !e.is_unused());
for (i, e) in entries {
write!(f, " {:3X}: {:?}\n", i, e)?;
write!(f, "{:?}", self.next_table(i).unwrap())?;
}
Ok(())
}
}
impl Debug for Table<Level2> {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
let entries = self.entries.iter().enumerate().filter(|(i, e)| !e.is_unused());
for (i, e) in entries {
write!(f, " {:3X}: {:?}\n", i, e)?;
write!(f, "{:?}", self.next_table(i).unwrap())?;
}
Ok(())
}
}
impl Debug for Table<Level1> {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
let entries = self.entries.iter().enumerate().filter(|(i, e)| !e.is_unused());
for (i, e) in entries {
write!(f, " {:3X}: {:?}\n", i, e)?;
}
Ok(())
}
}

@ -4,76 +4,37 @@ use memory::{Frame, FrameAllocator, VirtAddr};
pub struct TemporaryPage {
page: Page,
allocator: TinyAllocator,
}
impl TemporaryPage {
pub fn new<A>(page: Page, allocator: &mut A) -> TemporaryPage
where A: FrameAllocator
{
TemporaryPage {
page,
allocator: TinyAllocator::new(allocator),
}
pub fn new(page: Page) -> TemporaryPage {
TemporaryPage { page }
}
/// Maps the temporary page to the given frame in the active table.
/// Returns the start address of the temporary page.
pub fn map(&mut self, frame: Frame, active_table: &mut ActivePageTable)
pub fn map(&self, frame: Frame, active_table: &mut ActivePageTable)
-> VirtAddr
{
use super::entry::EntryFlags;
assert!(active_table.translate_page(self.page).is_none(),
"temporary page is already mapped");
active_table.map_to(self.page, frame, EntryFlags::WRITABLE, &mut self.allocator);
active_table.map_to(self.page, frame, EntryFlags::WRITABLE);
self.page.start_address()
}
/// Unmaps the temporary page in the active table.
pub fn unmap(&mut self, active_table: &mut ActivePageTable) {
active_table.unmap(self.page, &mut self.allocator)
pub fn unmap(&self, active_table: &mut ActivePageTable) {
active_table.unmap(self.page)
}
/// Maps the temporary page to the given page table frame in the active
/// table. Returns a reference to the now mapped table.
pub fn map_table_frame(&mut self,
pub fn map_table_frame(&self,
frame: Frame,
active_table: &mut ActivePageTable)
-> &mut Table<Level1> {
unsafe { &mut *(self.map(frame, active_table) as *mut Table<Level1>) }
}
}
struct TinyAllocator([Option<Frame>; 3]);
impl TinyAllocator {
fn new<A>(allocator: &mut A) -> TinyAllocator
where A: FrameAllocator
{
let mut f = || allocator.allocate_frame();
let frames = [f(), f(), f()];
TinyAllocator(frames)
}
}
impl FrameAllocator for TinyAllocator {
fn allocate_frame(&mut self) -> Option<Frame> {
for frame_option in &mut self.0 {
if frame_option.is_some() {
return frame_option.take();
}
}
None
}
fn deallocate_frame(&mut self, frame: Frame) {
for frame_option in &mut self.0 {
if frame_option.is_none() {
*frame_option = Some(frame);
return;
}
}
panic!("Tiny allocator can hold only 3 frames.");
}
}

@ -11,6 +11,9 @@ pub struct AreaFrameAllocator {
multiboot_end: Frame,
}
// 必须写这句否则不能放在Mutex中
unsafe impl Send for AreaFrameAllocator {}
impl FrameAllocator for AreaFrameAllocator {
fn allocate_frame(&mut self) -> Option<Frame> {
if let Some(area) = self.current_area {

@ -1,4 +1,5 @@
use super::address::PhysAddr;
use memory::FRAME_ALLOCATOR;
pub const PAGE_SIZE: usize = 4096;

@ -0,0 +1,95 @@
use alloc::vec::Vec;
use super::*;
/// 一片连续内存空间,有相同的访问权限
/// 对应ucore中 `vma_struct`
#[derive(Debug, Eq, PartialEq)]
pub struct MemoryArea {
pub start_addr: VirtAddr,
pub end_addr: VirtAddr,
pub flags: u32,
pub name: &'static str,
pub mapped: bool,
}
impl MemoryArea {
pub fn contains(&self, addr: VirtAddr) -> bool {
addr >= self.start_addr && addr < self.end_addr
}
fn is_overlap_with(&self, other: &MemoryArea) -> bool {
!(self.end_addr <= other.start_addr || self.start_addr >= other.end_addr)
}
}
/// 内存空间集合,包含若干段连续空间
/// 对应ucore中 `mm_struct`
pub struct MemorySet {
areas: Vec<MemoryArea>,
page_table: InactivePageTable,
}
impl MemorySet {
pub fn new(mc: &mut MemoryController) -> Self {
MemorySet {
areas: Vec::<MemoryArea>::new(),
page_table: mc.new_page_table(),
}
}
pub fn find_area(&self, addr: VirtAddr) -> Option<&MemoryArea> {
self.areas.iter().find(|area| area.contains(addr))
}
pub fn push(&mut self, area: MemoryArea) {
debug_assert!(area.start_addr <= area.end_addr, "invalid memory area");
if self.areas.iter()
.find(|other| area.is_overlap_with(other))
.is_some() {
panic!("memory area overlap");
}
self.areas.push(area);
}
pub fn map(&mut self, mc: &mut MemoryController) {
// mc.active_table.with(self.page_table, )
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn push_and_find() {
let mut ms = MemorySet::new();
ms.push(MemoryArea {
start_addr: 0x0,
end_addr: 0x8,
flags: 0x0,
name: "code",
});
ms.push(MemoryArea {
start_addr: 0x8,
end_addr: 0x10,
flags: 0x1,
name: "data",
});
assert_eq!(ms.find_area(0x6).unwrap().name, "code");
assert_eq!(ms.find_area(0x11), None);
}
#[test]
#[should_panic]
fn push_overlap() {
let mut ms = MemorySet::new();
ms.push(MemoryArea {
start_addr: 0x0,
end_addr: 0x8,
flags: 0x0,
name: "code",
});
ms.push(MemoryArea {
start_addr: 0x4,
end_addr: 0x10,
flags: 0x1,
name: "data",
});
}
}

@ -3,17 +3,29 @@ pub use arch::paging::*;
pub use self::stack_allocator::Stack;
pub use self::address::*;
pub use self::frame::*;
pub use self::memory_set::*;
use multiboot2::BootInformation;
use consts::KERNEL_OFFSET;
use arch::paging;
use arch::paging::EntryFlags;
use spin::Mutex;
mod memory_set;
mod area_frame_allocator;
pub mod heap_allocator;
mod stack_allocator;
mod address;
mod frame;
pub static FRAME_ALLOCATOR: Mutex<Option<AreaFrameAllocator>> = Mutex::new(None);
pub fn alloc_frame() -> Frame {
FRAME_ALLOCATOR.lock()
.as_mut().expect("frame allocator is not initialized")
.allocate_frame().expect("no more frame")
}
pub fn init(boot_info: &BootInformation) -> MemoryController {
assert_has_not_been_called!("memory::init must be called only once");
@ -39,14 +51,15 @@ pub fn init(boot_info: &BootInformation) -> MemoryController {
println!("memory area:");
for area in memory_map_tag.memory_areas() {
println!(" addr: {:#x}, size: {:#x}", area.base_addr, area.length);
}
}
let mut frame_allocator = AreaFrameAllocator::new(
*FRAME_ALLOCATOR.lock() = Some(AreaFrameAllocator::new(
kernel_start, kernel_end,
boot_info_start, boot_info_end,
memory_map_tag.memory_areas());
memory_map_tag.memory_areas()
));
let (mut active_table, kernel_stack) = remap_the_kernel(&mut frame_allocator, boot_info);
let (mut active_table, kernel_stack) = remap_the_kernel(boot_info);
use self::paging::Page;
use consts::{KERNEL_HEAP_OFFSET, KERNEL_HEAP_SIZE};
@ -55,7 +68,7 @@ pub fn init(boot_info: &BootInformation) -> MemoryController {
let heap_end_page = Page::of_addr(KERNEL_HEAP_OFFSET + KERNEL_HEAP_SIZE-1);
for page in Page::range_inclusive(heap_start_page, heap_end_page) {
active_table.map(page, EntryFlags::WRITABLE, &mut frame_allocator);
active_table.map(page, EntryFlags::WRITABLE);
}
let stack_allocator = {
@ -69,22 +82,17 @@ pub fn init(boot_info: &BootInformation) -> MemoryController {
MemoryController {
kernel_stack: Some(kernel_stack),
active_table,
frame_allocator,
stack_allocator,
}
}
pub fn remap_the_kernel<A>(allocator: &mut A, boot_info: &BootInformation)
-> (ActivePageTable, Stack)
where A: FrameAllocator
pub fn remap_the_kernel(boot_info: &BootInformation) -> (ActivePageTable, Stack)
{
let mut temporary_page = TemporaryPage::new(Page::of_addr(0xcafebabe), allocator);
let mut temporary_page = TemporaryPage::new(Page::of_addr(0xcafebabe));
let mut active_table = unsafe { ActivePageTable::new() };
let mut new_table = {
let frame = allocator.allocate_frame().expect("no more frames");
InactivePageTable::new(frame, &mut active_table, &mut temporary_page)
};
let mut new_table =
InactivePageTable::new(alloc_frame(), &mut active_table, &mut temporary_page);
active_table.with(&mut new_table, &mut temporary_page, |mapper| {
let elf_sections_tag = boot_info.elf_sections_tag()
@ -113,19 +121,19 @@ pub fn remap_the_kernel<A>(allocator: &mut A, boot_info: &BootInformation)
for frame in Frame::range_inclusive(start_frame, end_frame) {
let page = Page::of_addr(frame.start_address().to_kernel_virtual());
mapper.map_to(page, frame, flags, allocator);
mapper.map_to(page, frame, flags);
}
}
// identity map the VGA text buffer
let vga_buffer_frame = Frame::of_addr(0xb8000);
mapper.identity_map(vga_buffer_frame, EntryFlags::WRITABLE, allocator);
mapper.identity_map(vga_buffer_frame, EntryFlags::WRITABLE);
// identity map the multiboot info structure
let multiboot_start = Frame::of_addr(boot_info.start_address());
let multiboot_end = Frame::of_addr(boot_info.end_address() - 1);
for frame in Frame::range_inclusive(multiboot_start, multiboot_end) {
mapper.identity_map(frame, EntryFlags::PRESENT, allocator);
mapper.identity_map(frame, EntryFlags::PRESENT);
}
});
@ -136,7 +144,7 @@ pub fn remap_the_kernel<A>(allocator: &mut A, boot_info: &BootInformation)
extern { fn stack_bottom(); }
let stack_bottom = PhysAddr(stack_bottom as u64).to_kernel_virtual();
let stack_bottom_page = Page::of_addr(stack_bottom);
active_table.unmap(stack_bottom_page, allocator);
active_table.unmap(stack_bottom_page);
let kernel_stack = Stack::new(stack_bottom + 8 * PAGE_SIZE, stack_bottom + 1 * PAGE_SIZE);
println!("guard page at {:#x}", stack_bottom_page.start_address());
@ -146,7 +154,6 @@ pub fn remap_the_kernel<A>(allocator: &mut A, boot_info: &BootInformation)
pub struct MemoryController {
pub kernel_stack: Option<Stack>,
active_table: paging::ActivePageTable,
frame_allocator: AreaFrameAllocator,
stack_allocator: stack_allocator::StackAllocator,
}
@ -154,23 +161,24 @@ impl MemoryController {
pub fn alloc_stack(&mut self, size_in_pages: usize) -> Option<Stack> {
let &mut MemoryController { ref mut kernel_stack,
ref mut active_table,
ref mut frame_allocator,
ref mut stack_allocator } = self;
stack_allocator.alloc_stack(active_table, frame_allocator,
size_in_pages)
stack_allocator.alloc_stack(active_table, size_in_pages)
}
pub fn new_page_table(&mut self) -> InactivePageTable {
let mut temporary_page = TemporaryPage::new(Page::of_addr(0xcafebabe));
let frame = alloc_frame();
let page_table = InactivePageTable::new(frame, &mut self.active_table, &mut temporary_page);
page_table
}
pub fn map_page_identity(&mut self, addr: usize) {
let frame = Frame::of_addr(addr);
let flags = EntryFlags::WRITABLE;
self.active_table.identity_map(frame, flags, &mut self.frame_allocator);
self.active_table.identity_map(frame, flags);
}
pub fn map_page_p2v(&mut self, addr: PhysAddr) {
let page = Page::of_addr(addr.to_kernel_virtual());
let frame = Frame::of_addr(addr.get());
let flags = EntryFlags::WRITABLE;
self.active_table.map_to(page, frame, flags, &mut self.frame_allocator);
}
pub fn print_page_table(&self) {
debug!("{:?}", self.active_table);
self.active_table.map_to(page, frame, flags);
}
}

@ -1,5 +1,5 @@
use memory::paging::{Page, PageIter, ActivePageTable, EntryFlags};
use memory::{PAGE_SIZE, FrameAllocator};
use memory::PAGE_SIZE;
pub struct StackAllocator {
range: PageIter,
@ -12,11 +12,8 @@ impl StackAllocator {
}
impl StackAllocator {
pub fn alloc_stack<FA: FrameAllocator>(&mut self,
active_table: &mut ActivePageTable,
frame_allocator: &mut FA,
size_in_pages: usize)
-> Option<Stack> {
pub fn alloc_stack(&mut self, active_table: &mut ActivePageTable,
size_in_pages: usize) -> Option<Stack> {
if size_in_pages == 0 {
return None; /* a zero sized stack makes no sense */
}
@ -42,7 +39,7 @@ impl StackAllocator {
// map stack pages to physical frames
for page in Page::range_inclusive(start, end) {
active_table.map(page, EntryFlags::WRITABLE, frame_allocator);
active_table.map(page, EntryFlags::WRITABLE);
}
// create a new stack

Loading…
Cancel
Save