Use x86_64 paging! Remove original paging mod.

master
WangRunji 7 years ago
parent 60ed3a2ed4
commit f4b9308f2c

@ -1,51 +0,0 @@
use memory::Frame;
#[derive(Copy, Clone)]
pub struct Entry(u64);
impl Entry {
pub fn is_unused(&self) -> bool {
self.0 == 0
}
pub fn set_unused(&mut self) {
self.0 = 0;
}
pub fn flags(&self) -> EntryFlags {
EntryFlags::from_bits_truncate(self.0)
}
pub fn pointed_frame(&self) -> Option<Frame> {
if self.flags().contains(EntryFlags::PRESENT) {
Some(Frame::of_addr(
self.0 as usize & 0x000fffff_fffff000
))
} else {
None
}
}
pub fn set(&mut self, frame: Frame, flags: EntryFlags) {
assert_eq!(frame.start_address().as_u64() & !0x000fffff_fffff000, 0);
self.0 = (frame.start_address().as_u64()) | flags.bits();
}
}
bitflags! {
pub struct EntryFlags: u64 {
const PRESENT = 1 << 0;
const WRITABLE = 1 << 1;
const USER_ACCESSIBLE = 1 << 2;
const WRITE_THROUGH = 1 << 3;
const NO_CACHE = 1 << 4;
const ACCESSED = 1 << 5;
const DIRTY = 1 << 6;
const HUGE_PAGE = 1 << 7;
const GLOBAL = 1 << 8;
const NO_EXECUTE = 1 << 63;
// Types at bit 9-11
const SHARED = 1 << 9;
const COW = 2 << 9;
}
}

@ -1,115 +0,0 @@
use core::ptr::Unique;
use memory::*;
use super::{ENTRY_COUNT, EntryFlags, Page};
use super::table::{self, Level1, Level4, Table};
pub struct Mapper {
p4: Unique<Table<Level4>>,
}
impl Mapper {
pub const unsafe fn new() -> Mapper {
Mapper {
p4: Unique::new_unchecked(table::P4),
}
}
pub fn p4(&self) -> &Table<Level4> {
unsafe { self.p4.as_ref() }
}
pub fn p4_mut(&mut self) -> &mut Table<Level4> {
unsafe { self.p4.as_mut() }
}
pub fn translate(&self, virtual_address: VirtAddr) -> Option<PhysAddr> {
let offset = virtual_address % PAGE_SIZE;
self.translate_page(Page::of_addr(virtual_address))
.map(|frame| PhysAddr::new((frame.start_address().get() + offset) as u64))
}
pub fn translate_page(&self, page: Page) -> Option<Frame> {
let p3 = self.p4().next_table(page.p4_index());
let huge_page = || {
p3.and_then(|p3| {
let p3_entry = &p3[page.p3_index()];
// 1GiB page?
if let Some(start_frame) = p3_entry.pointed_frame() {
if p3_entry.flags().contains(EntryFlags::HUGE_PAGE) {
// address must be 1GiB aligned
assert_eq!(start_frame.start_address().get() % (ENTRY_COUNT * ENTRY_COUNT * PAGE_SIZE), 0);
return Some(Frame::of_addr(
start_frame.start_address().get() +
(page.p2_index() * ENTRY_COUNT + page.p1_index()) * PAGE_SIZE
));
}
}
if let Some(p2) = p3.next_table(page.p3_index()) {
let p2_entry = &p2[page.p2_index()];
// 2MiB page?
if let Some(start_frame) = p2_entry.pointed_frame() {
if p2_entry.flags().contains(EntryFlags::HUGE_PAGE) {
// address must be 2MiB aligned
assert_eq!(start_frame.start_address().get() % ENTRY_COUNT, 0);
return Some(Frame::of_addr(
start_frame.start_address().get() + page.p1_index() * PAGE_SIZE
));
}
}
}
None
})
};
p3.and_then(|p3| p3.next_table(page.p3_index()))
.and_then(|p2| p2.next_table(page.p2_index()))
.and_then(|p1| p1[page.p1_index()].pointed_frame())
.or_else(huge_page)
}
pub(super) fn entry_mut(&mut self, page: Page) -> &mut Entry {
use core::ops::IndexMut;
let p4 = self.p4_mut();
let p3 = p4.next_table_create(page.p4_index());
let p2 = p3.next_table_create(page.p3_index());
let p1 = p2.next_table_create(page.p2_index());
p1.index_mut(page.p1_index())
}
pub fn map_to(&mut self, page: Page, frame: Frame, flags: EntryFlags) {
let entry = self.entry_mut(page);
assert!(entry.is_unused());
entry.set(frame, flags | EntryFlags::PRESENT);
}
pub fn map(&mut self, page: Page, flags: EntryFlags)
{
self.map_to(page, alloc_frame(), flags)
}
pub fn identity_map(&mut self, frame: Frame, flags: EntryFlags)
{
let page = Page::of_addr(frame.start_address().to_identity_virtual());
self.map_to(page, frame, flags)
}
pub fn unmap(&mut self, page: Page) -> Frame
{
use x86_64::instructions::tlb;
use x86_64::VirtAddr;
assert!(self.translate(page.start_address()).is_some());
let p1 = self.p4_mut()
.next_table_mut(page.p4_index())
.and_then(|p3| p3.next_table_mut(page.p3_index()))
.and_then(|p2| p2.next_table_mut(page.p2_index()))
.expect("mapping code does not support huge pages");
let frame = p1[page.p1_index()].pointed_frame().unwrap();
p1[page.p1_index()].set_unused();
tlb::flush(VirtAddr::new(page.start_address() as u64));
// TODO free p(1,2,3) table if empty
frame
}
}

@ -1,163 +1,80 @@
use core::ops::{Add, Deref, DerefMut};
use memory::*;
pub use self::cow::*;
pub use self::entry::*;
pub use self::mapper::Mapper;
pub use self::temporary_page::TemporaryPage;
//pub use self::cow::*;
use x86_64::structures::paging::*;
use x86_64::registers::control::{Cr3, Cr3Flags};
use x86_64::instructions::tlb;
use x86_64::ux::u9;
mod entry;
mod table;
mod temporary_page;
mod mapper;
mod cow;
pub type Frame = PhysFrame;
pub type EntryFlags = PageTableFlags;
pub type ActivePageTable = RecursivePageTable<'static>;
const ENTRY_COUNT: usize = 512;
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub struct Page {
number: usize,
}
pub use x86_64::structures::paging::{Page, PageRange, Mapper, FrameAllocator, FrameDeallocator, Size4KiB, PageTable};
impl Page {
pub fn of_addr(address: VirtAddr) -> Page {
assert!(address < 0x0000_8000_0000_0000 ||
address >= 0xffff_8000_0000_0000,
"invalid address: 0x{:x}", address);
Page { number: address / PAGE_SIZE }
}
pub fn start_address(&self) -> usize {
self.number * PAGE_SIZE
}
fn p4_index(&self) -> usize {
(self.number >> 27) & 0o777
}
fn p3_index(&self) -> usize {
(self.number >> 18) & 0o777
}
fn p2_index(&self) -> usize {
(self.number >> 9) & 0o777
}
fn p1_index(&self) -> usize {
(self.number >> 0) & 0o777
}
//mod cow;
pub fn range_inclusive(start: Page, end: Page) -> PageRange {
PageRange {
start,
end,
}
}
const ENTRY_COUNT: usize = 512;
/// Iterate pages of address [begin, end)
pub fn range_of(begin: VirtAddr, end: VirtAddr) -> PageRange {
PageRange {
start: Page::of_addr(begin),
end: Page::of_addr(end - 1),
}
}
pub trait PageExt {
fn of_addr(address: VirtAddr) -> Self;
fn range_of(begin: VirtAddr, end: VirtAddr) -> PageRange;
}
impl Add<usize> for Page {
type Output = Page;
fn add(self, rhs: usize) -> Page {
Page { number: self.number + rhs }
impl PageExt for Page {
fn of_addr(address: usize) -> Self {
use x86_64;
Page::containing_address(x86_64::VirtAddr::new(address as u64))
}
}
#[derive(Clone)]
pub struct PageRange {
start: Page,
end: Page,
}
impl Iterator for PageRange {
type Item = Page;
fn next(&mut self) -> Option<Page> {
if self.start <= self.end {
let page = self.start;
self.start.number += 1;
Some(page)
} else {
None
}
fn range_of(begin: usize, end: usize) -> PageRange<Size4KiB> {
Page::range(Page::of_addr(begin), Page::of_addr(end - 1) + 1)
}
}
pub struct ActivePageTable {
mapper: Mapper,
pub trait FrameExt {
fn of_addr(address: usize) -> Self;
}
impl Deref for ActivePageTable {
type Target = Mapper;
fn deref(&self) -> &Mapper {
&self.mapper
impl FrameExt for Frame {
fn of_addr(address: usize) -> Self {
Frame::containing_address(PhysAddr::new(address as u64))
}
}
impl DerefMut for ActivePageTable {
fn deref_mut(&mut self) -> &mut Mapper {
&mut self.mapper
}
pub trait ActiveTableExt {
fn with(&mut self, table: &mut InactivePageTable, f: impl FnOnce(&mut ActivePageTable));
fn map_to_(&mut self, page: Page, frame: Frame, flags: EntryFlags);
}
impl ActivePageTable {
pub const unsafe fn new() -> ActivePageTable {
ActivePageTable {
mapper: Mapper::new(),
}
}
pub fn with(&mut self, table: &mut InactivePageTable, f: impl FnOnce(&mut Mapper))
{
use x86_64::instructions::tlb;
use x86_64::registers::control;
let temporary_page = TemporaryPage::new();
{
let backup = Frame::of_addr(control::Cr3::read().0.start_address().get());
// map temporary_page to current p4 table
let p4_table = temporary_page.map_table_frame(backup.clone(), self);
impl ActiveTableExt for ActivePageTable {
fn with(&mut self, table: &mut InactivePageTable, f: impl FnOnce(&mut ActivePageTable)) {
with_temporary_map(self, &Cr3::read().0, |active_table, p4_table: &mut PageTable| {
let backup = p4_table[0o777].clone();
// overwrite recursive mapping
self.p4_mut()[511].set(table.p4_frame.clone(), EntryFlags::PRESENT | EntryFlags::WRITABLE);
p4_table[0o777].set_frame(table.p4_frame.clone(), EntryFlags::PRESENT | EntryFlags::WRITABLE);
tlb::flush_all();
// execute f in the new context
f(self);
f(active_table);
// restore recursive mapping to original p4 table
p4_table[511].set(backup, EntryFlags::PRESENT | EntryFlags::WRITABLE);
p4_table[0o777] = backup;
tlb::flush_all();
});
}
fn map_to_(&mut self, page: Page<Size4KiB>, frame: PhysFrame<Size4KiB>, flags: EntryFlags) {
self.map_to(page, frame, flags, &mut frame_allocator()).unwrap().flush();
// Set user bit for p1-p4 entry
// It's a workaround since x86_64 PageTable do not set user bit.
if flags.contains(EntryFlags::USER_ACCESSIBLE) {
let mut addr = page.start_address().as_u64();
for _ in 0..4 {
addr = ((addr >> 9) & 0o777_777_777_7770) | 0xffffff80_00000000;
// set USER_ACCESSIBLE
unsafe { (*(addr as *mut EntryFlags)).insert(EntryFlags::USER_ACCESSIBLE) };
}
}
temporary_page.unmap(self);
}
pub fn switch(&mut self, new_table: InactivePageTable) -> InactivePageTable {
use x86_64::structures::paging::PhysFrame;
use x86_64::registers::control::{Cr3, Cr3Flags};
debug!("switch table {:?} -> {:?}", Frame::of_addr(Cr3::read().0.start_address().get()), new_table.p4_frame);
if new_table.p4_frame.start_address() == Cr3::read().0.start_address() {
return new_table;
}
let old_table = InactivePageTable {
p4_frame: Frame::of_addr(Cr3::read().0.start_address().get()),
};
unsafe {
Cr3::write(PhysFrame::containing_address(new_table.p4_frame.start_address()),
Cr3Flags::empty());
}
use core::mem::forget;
forget(new_table);
old_table
}
}
@ -168,19 +85,24 @@ pub struct InactivePageTable {
impl InactivePageTable {
pub fn new(frame: Frame, active_table: &mut ActivePageTable) -> InactivePageTable {
let temporary_page = TemporaryPage::new();
{
let table = temporary_page.map_table_frame(frame.clone(),
active_table);
// now we are able to zero the table
with_temporary_map(active_table, &frame, |_, table: &mut PageTable| {
table.zero();
// set up recursive mapping for the table
table[511].set(frame.clone(), EntryFlags::PRESENT | EntryFlags::WRITABLE);
}
temporary_page.unmap(active_table);
table[511].set_frame(frame.clone(), EntryFlags::PRESENT | EntryFlags::WRITABLE);
});
InactivePageTable { p4_frame: frame }
}
pub fn switch(&self) {
let old_frame = Cr3::read().0;
let new_frame = self.p4_frame.clone();
debug!("switch table {:?} -> {:?}", old_frame, new_frame);
if old_frame != new_frame {
unsafe { Cr3::write(new_frame, Cr3Flags::empty()); }
}
}
pub unsafe fn from_cr3() -> Self {
InactivePageTable { p4_frame: Cr3::read().0 }
}
}
impl Drop for InactivePageTable {
@ -188,4 +110,17 @@ impl Drop for InactivePageTable {
info!("PageTable dropping: {:?}", self);
dealloc_frame(self.p4_frame.clone());
}
}
fn with_temporary_map(active_table: &mut ActivePageTable, frame: &Frame, f: impl FnOnce(&mut ActivePageTable, &mut PageTable)) {
// Create a temporary page
let page = Page::of_addr(0xcafebabe);
assert!(active_table.translate_page(page).is_none(), "temporary page is already mapped");
// Map it to table
active_table.map_to_(page, frame.clone(), EntryFlags::PRESENT | EntryFlags::WRITABLE);
// Call f
let table = unsafe { &mut *page.start_address().as_mut_ptr() };
f(active_table, table);
// Unmap the page
active_table.unmap(page).unwrap().1.flush();
}

@ -1,97 +0,0 @@
use core::marker::PhantomData;
use core::ops::{Index, IndexMut};
use memory::alloc_frame;
use super::entry::*;
use super::ENTRY_COUNT;
pub const P4: *mut Table<Level4> = 0xffffffff_fffff000 as *mut _;
pub struct Table<L: TableLevel> {
entries: [Entry; ENTRY_COUNT],
level: PhantomData<L>,
}
impl<L> Table<L> where L: TableLevel {
pub fn zero(&mut self) {
for entry in self.entries.iter_mut() {
entry.set_unused();
}
}
}
impl<L> Table<L> where L: HierarchicalLevel {
fn next_table_address(&self, index: usize) -> Option<usize> {
let entry_flags = self[index].flags();
if entry_flags.contains(EntryFlags::PRESENT) && !entry_flags.contains(EntryFlags::HUGE_PAGE) {
let table_address = self as *const _ as usize;
Some((table_address << 9) | (index << 12))
} else {
None
}
}
pub fn next_table(&self, index: usize) -> Option<&Table<L::NextLevel>> {
self.next_table_address(index)
.map(|address| unsafe { &*(address as *const _) })
}
pub fn next_table_mut(&mut self, index: usize) -> Option<&mut Table<L::NextLevel>> {
self.next_table_address(index)
.map(|address| unsafe { &mut *(address as *mut _) })
}
pub fn next_table_create(&mut self, index: usize) -> &mut Table<L::NextLevel>
{
if self.next_table(index).is_none() {
assert!(!self.entries[index].flags().contains(EntryFlags::HUGE_PAGE),
"mapping code does not support huge pages");
let frame = alloc_frame();
// TODO: Remove USER_ACCESSIBLE
self.entries[index].set(frame, EntryFlags::PRESENT | EntryFlags::WRITABLE | EntryFlags::USER_ACCESSIBLE);
self.next_table_mut(index).unwrap().zero();
}
self.next_table_mut(index).unwrap()
}
}
impl<L> Index<usize> for Table<L> where L: TableLevel {
type Output = Entry;
fn index(&self, index: usize) -> &Entry {
&self.entries[index]
}
}
impl<L> IndexMut<usize> for Table<L> where L: TableLevel {
fn index_mut(&mut self, index: usize) -> &mut Entry {
&mut self.entries[index]
}
}
pub trait TableLevel {}
pub enum Level4 {}
pub enum Level3 {}
pub enum Level2 {}
pub enum Level1 {}
impl TableLevel for Level4 {}
impl TableLevel for Level3 {}
impl TableLevel for Level2 {}
impl TableLevel for Level1 {}
pub trait HierarchicalLevel: TableLevel {
type NextLevel: TableLevel;
}
impl HierarchicalLevel for Level4 {
type NextLevel = Level3;
}
impl HierarchicalLevel for Level3 {
type NextLevel = Level2;
}
impl HierarchicalLevel for Level2 {
type NextLevel = Level1;
}

@ -1,34 +0,0 @@
use super::*;
use super::table::{Level1, Table};
pub struct TemporaryPage {
page: Page,
}
impl TemporaryPage {
pub fn new() -> TemporaryPage {
TemporaryPage { page: Page::of_addr(0xcafebabe) }
}
/// Maps the temporary page to the given frame in the active table.
/// Returns the start address of the temporary page.
pub fn map(&self, frame: Frame, active_table: &mut ActivePageTable) -> VirtAddr {
use super::entry::EntryFlags;
assert!(active_table.translate_page(self.page).is_none(),
"temporary page is already mapped");
active_table.map_to(self.page, frame, EntryFlags::WRITABLE);
self.page.start_address()
}
/// Unmaps the temporary page in the active table.
pub fn unmap(&self, active_table: &mut ActivePageTable) -> Frame {
active_table.unmap(self.page)
}
/// Maps the temporary page to the given page table frame in the active
/// table. Returns a reference to the now mapped table.
pub fn map_table_frame(&self, frame: Frame, active_table: &mut ActivePageTable) -> &mut Table<Level1> {
unsafe { &mut *(self.map(frame, active_table) as *mut Table<Level1>) }
}
}

@ -168,6 +168,6 @@ mod test {
pub fn cow() {
use arch;
arch::paging::test_cow();
// arch::paging::test_cow();
}
}

@ -1,27 +0,0 @@
use super::address::PhysAddr;
pub const PAGE_SIZE: usize = 4096;
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord)]
pub struct Frame {
pub(super) number: usize,
}
impl Frame {
pub fn of_addr(address: usize) -> Frame {
Frame{ number: address / PAGE_SIZE }
}
//TODO: Set private
pub fn start_address(&self) -> PhysAddr {
PhysAddr::new((self.number * PAGE_SIZE) as u64)
}
pub fn clone(&self) -> Frame {
Frame { number: self.number }
}
}
pub trait FrameAllocator {
fn allocate_frame(&mut self) -> Option<Frame>;
fn deallocate_frame(&mut self, frame: Frame);
}

@ -62,24 +62,26 @@ impl MemoryArea {
let p3 = Page::of_addr(other.end_addr - 1) + 1;
!(p1 <= p2 || p0 >= p3)
}
fn map(&self, pt: &mut Mapper) {
fn map(&self, pt: &mut ActivePageTable) {
match self.phys_start_addr {
Some(phys_start) => {
for page in Page::range_of(self.start_addr, self.end_addr) {
let frame = Frame::of_addr(phys_start.get() + page.start_address() - self.start_addr);
pt.map_to(page, frame, self.flags.0);
let frame = Frame::of_addr(phys_start.get() + page.start_address().as_u64() as usize - self.start_addr);
pt.map_to_(page, frame, self.flags.0);
}
}
None => {
for page in Page::range_of(self.start_addr, self.end_addr) {
pt.map(page, self.flags.0);
let frame = alloc_frame();
pt.map_to_(page, frame, self.flags.0);
}
}
}
}
fn unmap(&self, pt: &mut Mapper) {
fn unmap(&self, pt: &mut ActivePageTable) {
for page in Page::range_of(self.start_addr, self.end_addr) {
let frame = pt.unmap(page);
let (frame, flush) = pt.unmap(page).unwrap();
flush.flush();
if self.phys_start_addr.is_none() {
dealloc_frame(frame);
}
@ -156,18 +158,16 @@ impl MemorySet {
pub fn iter(&self) -> impl Iterator<Item=&MemoryArea> {
self.areas.iter()
}
pub fn with(&self, mut f: impl FnMut()) {
use core::{ptr, mem};
let page_table = unsafe { ptr::read(&self.page_table as *const InactivePageTable) };
let mut active_table = active_table();
let backup = active_table.switch(page_table);
pub fn with(&self, f: impl FnOnce()) {
let current = unsafe { InactivePageTable::from_cr3() };
self.page_table.switch();
f();
mem::forget(active_table.switch(backup));
current.switch();
use core::mem;
mem::forget(current);
}
pub fn switch(&self) {
use core::{ptr, mem};
let page_table = unsafe { ptr::read(&self.page_table as *const InactivePageTable) };
mem::forget(active_table().switch(page_table));
self.page_table.switch();
}
pub fn set_kstack(&mut self, stack: Stack) {
assert!(self.kstack.is_none());
@ -228,13 +228,14 @@ fn new_page_table_with_kernel() -> InactivePageTable {
let mut page_table = InactivePageTable::new(frame, &mut active_table);
use consts::{KERNEL_HEAP_PML4, KERNEL_PML4};
let e510 = active_table.p4()[KERNEL_PML4].clone();
let e509 = active_table.p4()[KERNEL_HEAP_PML4].clone();
let mut table = unsafe { &mut *(0xffffffff_fffff000 as *mut PageTable) };
let e510 = table[KERNEL_PML4].clone();
let e509 = table[KERNEL_HEAP_PML4].clone();
active_table.with(&mut page_table, |pt: &mut Mapper| {
pt.p4_mut()[KERNEL_PML4] = e510;
pt.p4_mut()[KERNEL_HEAP_PML4] = e509;
pt.identity_map(Frame::of_addr(0xfee00000), EntryFlags::WRITABLE); // LAPIC
active_table.with(&mut page_table, |pt: &mut ActivePageTable| {
table[KERNEL_PML4] = e510;
table[KERNEL_HEAP_PML4] = e509;
pt.identity_map(Frame::of_addr(0xfee00000), EntryFlags::PRESENT | EntryFlags::WRITABLE, &mut frame_allocator()).unwrap().flush(); // LAPIC
});
page_table
}

@ -1,11 +1,9 @@
pub use arch::paging::*;
use arch::paging;
use bit_allocator::{BitAlloc, BitAlloc64K};
use consts::KERNEL_OFFSET;
use multiboot2::{ElfSection, ElfSectionFlags, ElfSectionsTag};
use multiboot2::BootInformation;
pub use self::address::*;
pub use self::frame::*;
pub use self::memory_set::*;
pub use self::stack_allocator::*;
use spin::{Mutex, MutexGuard};
@ -14,7 +12,8 @@ use super::HEAP_ALLOCATOR;
mod memory_set;
mod stack_allocator;
mod address;
mod frame;
const PAGE_SIZE: usize = 1 << 12;
lazy_static! {
static ref FRAME_ALLOCATOR: Mutex<BitAlloc64K> = Mutex::new(BitAlloc64K::default());
@ -22,14 +21,14 @@ lazy_static! {
static STACK_ALLOCATOR: Mutex<Option<StackAllocator>> = Mutex::new(None);
pub fn alloc_frame() -> Frame {
let frame = FRAME_ALLOCATOR.lock().allocate_frame().expect("no more frame");
let frame = frame_allocator().alloc().expect("no more frame");
trace!("alloc: {:?}", frame);
frame
}
pub fn dealloc_frame(frame: Frame) {
trace!("dealloc: {:?}", frame);
FRAME_ALLOCATOR.lock().deallocate_frame(frame);
frame_allocator().dealloc(frame);
}
fn alloc_stack(size_in_pages: usize) -> Stack {
@ -41,14 +40,24 @@ fn alloc_stack(size_in_pages: usize) -> Stack {
/// The only way to get active page table
fn active_table() -> MutexGuard<'static, ActivePageTable> {
static ACTIVE_TABLE: Mutex<ActivePageTable> = Mutex::new(unsafe { ActivePageTable::new() });
lazy_static! {
static ref ACTIVE_TABLE: Mutex<ActivePageTable> = Mutex::new(unsafe {
ActivePageTable::new(&mut *(0xffffffff_fffff000 as *mut _)).unwrap()
});
}
ACTIVE_TABLE.lock()
}
pub fn frame_allocator() -> BitAllocGuard {
BitAllocGuard(FRAME_ALLOCATOR.lock())
}
// Return true to continue, false to halt
pub fn page_fault_handler(addr: VirtAddr) -> bool {
// Handle copy on write
active_table().try_copy_on_write(addr)
false
// FIXME: enable cow
// active_table().try_copy_on_write(addr)
}
pub fn init(boot_info: BootInformation) -> MemorySet {
@ -60,7 +69,6 @@ pub fn init(boot_info: BootInformation) -> MemorySet {
let kernel_memory = remap_the_kernel(boot_info);
use self::paging::Page;
use consts::{KERNEL_HEAP_OFFSET, KERNEL_HEAP_SIZE};
unsafe { HEAP_ALLOCATOR.lock().init(KERNEL_HEAP_OFFSET, KERNEL_HEAP_SIZE); }
@ -74,12 +82,17 @@ pub fn init(boot_info: BootInformation) -> MemorySet {
kernel_memory
}
impl FrameAllocator for BitAlloc64K {
fn allocate_frame(&mut self) -> Option<Frame> {
self.alloc().map(|x| Frame { number: x })
pub struct BitAllocGuard(MutexGuard<'static, BitAlloc64K>);
impl FrameAllocator<Size4KiB> for BitAllocGuard {
fn alloc(&mut self) -> Option<Frame> {
self.0.alloc().map(|x| Frame::of_addr(x * PAGE_SIZE))
}
fn deallocate_frame(&mut self, frame: Frame) {
self.dealloc(frame.number);
}
impl FrameDeallocator<Size4KiB> for BitAllocGuard {
fn dealloc(&mut self, frame: Frame) {
self.0.dealloc(frame.start_address().as_u64() as usize / PAGE_SIZE);
}
}
@ -143,7 +156,7 @@ fn get_init_kstack_and_set_guard_page() -> Stack {
// turn the stack bottom into a guard page
active_table().unmap(stack_bottom_page);
debug!("guard page at {:#x}", stack_bottom_page.start_address());
debug!("guard page at {:?}", stack_bottom_page.start_address());
Stack::new(stack_bottom + 8 * PAGE_SIZE, stack_bottom + 1 * PAGE_SIZE)
}

@ -1,4 +1,4 @@
use arch::paging::{ActivePageTable, EntryFlags, Page, PageRange};
use super::*;
use memory::PAGE_SIZE;
// TODO: use BitAllocator & alloc fixed size stack
@ -40,12 +40,13 @@ impl StackAllocator {
// map stack pages to physical frames
for page in Page::range_inclusive(start, end) {
active_table.map(page, EntryFlags::WRITABLE);
let frame = alloc_frame();
active_table.map_to_(page, frame, EntryFlags::PRESENT | EntryFlags::WRITABLE);
}
// create a new stack
let top_of_stack = end.start_address() + PAGE_SIZE;
Some(Stack::new(top_of_stack, start.start_address()))
Some(Stack::new(top_of_stack.as_u64() as usize, start.start_address().as_u64() as usize))
}
_ => None, /* not enough pages */
}

Loading…
Cancel
Save