diff --git a/src/arch/x86_64/paging/entry.rs b/src/arch/x86_64/paging/entry.rs
deleted file mode 100644
index e89d33c..0000000
--- a/src/arch/x86_64/paging/entry.rs
+++ /dev/null
@@ -1,51 +0,0 @@
-use memory::Frame;
-
-#[derive(Copy, Clone)]
-pub struct Entry(u64);
-
-impl Entry {
- pub fn is_unused(&self) -> bool {
- self.0 == 0
- }
-
- pub fn set_unused(&mut self) {
- self.0 = 0;
- }
-
- pub fn flags(&self) -> EntryFlags {
- EntryFlags::from_bits_truncate(self.0)
- }
-
- pub fn pointed_frame(&self) -> Option {
- if self.flags().contains(EntryFlags::PRESENT) {
- Some(Frame::of_addr(
- self.0 as usize & 0x000fffff_fffff000
- ))
- } else {
- None
- }
- }
-
- pub fn set(&mut self, frame: Frame, flags: EntryFlags) {
- assert_eq!(frame.start_address().as_u64() & !0x000fffff_fffff000, 0);
- self.0 = (frame.start_address().as_u64()) | flags.bits();
- }
-}
-
-bitflags! {
- pub struct EntryFlags: u64 {
- const PRESENT = 1 << 0;
- const WRITABLE = 1 << 1;
- const USER_ACCESSIBLE = 1 << 2;
- const WRITE_THROUGH = 1 << 3;
- const NO_CACHE = 1 << 4;
- const ACCESSED = 1 << 5;
- const DIRTY = 1 << 6;
- const HUGE_PAGE = 1 << 7;
- const GLOBAL = 1 << 8;
- const NO_EXECUTE = 1 << 63;
- // Types at bit 9-11
- const SHARED = 1 << 9;
- const COW = 2 << 9;
- }
-}
diff --git a/src/arch/x86_64/paging/mapper.rs b/src/arch/x86_64/paging/mapper.rs
deleted file mode 100644
index ac1c23c..0000000
--- a/src/arch/x86_64/paging/mapper.rs
+++ /dev/null
@@ -1,115 +0,0 @@
-use core::ptr::Unique;
-use memory::*;
-use super::{ENTRY_COUNT, EntryFlags, Page};
-use super::table::{self, Level1, Level4, Table};
-
-pub struct Mapper {
- p4: Unique
>,
-}
-
-impl Mapper {
- pub const unsafe fn new() -> Mapper {
- Mapper {
- p4: Unique::new_unchecked(table::P4),
- }
- }
-
- pub fn p4(&self) -> &Table {
- unsafe { self.p4.as_ref() }
- }
-
- pub fn p4_mut(&mut self) -> &mut Table {
- unsafe { self.p4.as_mut() }
- }
-
- pub fn translate(&self, virtual_address: VirtAddr) -> Option {
- let offset = virtual_address % PAGE_SIZE;
- self.translate_page(Page::of_addr(virtual_address))
- .map(|frame| PhysAddr::new((frame.start_address().get() + offset) as u64))
- }
-
- pub fn translate_page(&self, page: Page) -> Option {
- let p3 = self.p4().next_table(page.p4_index());
-
- let huge_page = || {
- p3.and_then(|p3| {
- let p3_entry = &p3[page.p3_index()];
- // 1GiB page?
- if let Some(start_frame) = p3_entry.pointed_frame() {
- if p3_entry.flags().contains(EntryFlags::HUGE_PAGE) {
- // address must be 1GiB aligned
- assert_eq!(start_frame.start_address().get() % (ENTRY_COUNT * ENTRY_COUNT * PAGE_SIZE), 0);
- return Some(Frame::of_addr(
- start_frame.start_address().get() +
- (page.p2_index() * ENTRY_COUNT + page.p1_index()) * PAGE_SIZE
- ));
- }
- }
- if let Some(p2) = p3.next_table(page.p3_index()) {
- let p2_entry = &p2[page.p2_index()];
- // 2MiB page?
- if let Some(start_frame) = p2_entry.pointed_frame() {
- if p2_entry.flags().contains(EntryFlags::HUGE_PAGE) {
- // address must be 2MiB aligned
- assert_eq!(start_frame.start_address().get() % ENTRY_COUNT, 0);
- return Some(Frame::of_addr(
- start_frame.start_address().get() + page.p1_index() * PAGE_SIZE
- ));
- }
- }
- }
- None
- })
- };
-
- p3.and_then(|p3| p3.next_table(page.p3_index()))
- .and_then(|p2| p2.next_table(page.p2_index()))
- .and_then(|p1| p1[page.p1_index()].pointed_frame())
- .or_else(huge_page)
- }
-
- pub(super) fn entry_mut(&mut self, page: Page) -> &mut Entry {
- use core::ops::IndexMut;
- let p4 = self.p4_mut();
- let p3 = p4.next_table_create(page.p4_index());
- let p2 = p3.next_table_create(page.p3_index());
- let p1 = p2.next_table_create(page.p2_index());
- p1.index_mut(page.p1_index())
- }
-
- pub fn map_to(&mut self, page: Page, frame: Frame, flags: EntryFlags) {
- let entry = self.entry_mut(page);
- assert!(entry.is_unused());
- entry.set(frame, flags | EntryFlags::PRESENT);
- }
-
- pub fn map(&mut self, page: Page, flags: EntryFlags)
- {
- self.map_to(page, alloc_frame(), flags)
- }
-
- pub fn identity_map(&mut self, frame: Frame, flags: EntryFlags)
- {
- let page = Page::of_addr(frame.start_address().to_identity_virtual());
- self.map_to(page, frame, flags)
- }
-
- pub fn unmap(&mut self, page: Page) -> Frame
- {
- use x86_64::instructions::tlb;
- use x86_64::VirtAddr;
-
- assert!(self.translate(page.start_address()).is_some());
-
- let p1 = self.p4_mut()
- .next_table_mut(page.p4_index())
- .and_then(|p3| p3.next_table_mut(page.p3_index()))
- .and_then(|p2| p2.next_table_mut(page.p2_index()))
- .expect("mapping code does not support huge pages");
- let frame = p1[page.p1_index()].pointed_frame().unwrap();
- p1[page.p1_index()].set_unused();
- tlb::flush(VirtAddr::new(page.start_address() as u64));
- // TODO free p(1,2,3) table if empty
- frame
- }
-}
\ No newline at end of file
diff --git a/src/arch/x86_64/paging/mod.rs b/src/arch/x86_64/paging/mod.rs
index d92d719..eb9b65d 100644
--- a/src/arch/x86_64/paging/mod.rs
+++ b/src/arch/x86_64/paging/mod.rs
@@ -1,163 +1,80 @@
-use core::ops::{Add, Deref, DerefMut};
use memory::*;
-pub use self::cow::*;
-pub use self::entry::*;
-pub use self::mapper::Mapper;
-pub use self::temporary_page::TemporaryPage;
+//pub use self::cow::*;
+use x86_64::structures::paging::*;
+use x86_64::registers::control::{Cr3, Cr3Flags};
+use x86_64::instructions::tlb;
+use x86_64::ux::u9;
-mod entry;
-mod table;
-mod temporary_page;
-mod mapper;
-mod cow;
+pub type Frame = PhysFrame;
+pub type EntryFlags = PageTableFlags;
+pub type ActivePageTable = RecursivePageTable<'static>;
-const ENTRY_COUNT: usize = 512;
-
-#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
-pub struct Page {
- number: usize,
-}
+pub use x86_64::structures::paging::{Page, PageRange, Mapper, FrameAllocator, FrameDeallocator, Size4KiB, PageTable};
-impl Page {
- pub fn of_addr(address: VirtAddr) -> Page {
- assert!(address < 0x0000_8000_0000_0000 ||
- address >= 0xffff_8000_0000_0000,
- "invalid address: 0x{:x}", address);
- Page { number: address / PAGE_SIZE }
- }
-
- pub fn start_address(&self) -> usize {
- self.number * PAGE_SIZE
- }
-
- fn p4_index(&self) -> usize {
- (self.number >> 27) & 0o777
- }
- fn p3_index(&self) -> usize {
- (self.number >> 18) & 0o777
- }
- fn p2_index(&self) -> usize {
- (self.number >> 9) & 0o777
- }
- fn p1_index(&self) -> usize {
- (self.number >> 0) & 0o777
- }
+//mod cow;
- pub fn range_inclusive(start: Page, end: Page) -> PageRange {
- PageRange {
- start,
- end,
- }
- }
+const ENTRY_COUNT: usize = 512;
- /// Iterate pages of address [begin, end)
- pub fn range_of(begin: VirtAddr, end: VirtAddr) -> PageRange {
- PageRange {
- start: Page::of_addr(begin),
- end: Page::of_addr(end - 1),
- }
- }
+pub trait PageExt {
+ fn of_addr(address: VirtAddr) -> Self;
+ fn range_of(begin: VirtAddr, end: VirtAddr) -> PageRange;
}
-impl Add for Page {
- type Output = Page;
-
- fn add(self, rhs: usize) -> Page {
- Page { number: self.number + rhs }
+impl PageExt for Page {
+ fn of_addr(address: usize) -> Self {
+ use x86_64;
+ Page::containing_address(x86_64::VirtAddr::new(address as u64))
}
-}
-
-
-#[derive(Clone)]
-pub struct PageRange {
- start: Page,
- end: Page,
-}
-
-impl Iterator for PageRange {
- type Item = Page;
-
- fn next(&mut self) -> Option {
- if self.start <= self.end {
- let page = self.start;
- self.start.number += 1;
- Some(page)
- } else {
- None
- }
+ fn range_of(begin: usize, end: usize) -> PageRange {
+ Page::range(Page::of_addr(begin), Page::of_addr(end - 1) + 1)
}
}
-pub struct ActivePageTable {
- mapper: Mapper,
+pub trait FrameExt {
+ fn of_addr(address: usize) -> Self;
}
-impl Deref for ActivePageTable {
- type Target = Mapper;
-
- fn deref(&self) -> &Mapper {
- &self.mapper
+impl FrameExt for Frame {
+ fn of_addr(address: usize) -> Self {
+ Frame::containing_address(PhysAddr::new(address as u64))
}
}
-impl DerefMut for ActivePageTable {
- fn deref_mut(&mut self) -> &mut Mapper {
- &mut self.mapper
- }
+pub trait ActiveTableExt {
+ fn with(&mut self, table: &mut InactivePageTable, f: impl FnOnce(&mut ActivePageTable));
+ fn map_to_(&mut self, page: Page, frame: Frame, flags: EntryFlags);
}
-impl ActivePageTable {
- pub const unsafe fn new() -> ActivePageTable {
- ActivePageTable {
- mapper: Mapper::new(),
- }
- }
-
- pub fn with(&mut self, table: &mut InactivePageTable, f: impl FnOnce(&mut Mapper))
- {
- use x86_64::instructions::tlb;
- use x86_64::registers::control;
-
- let temporary_page = TemporaryPage::new();
- {
- let backup = Frame::of_addr(control::Cr3::read().0.start_address().get());
-
- // map temporary_page to current p4 table
- let p4_table = temporary_page.map_table_frame(backup.clone(), self);
+impl ActiveTableExt for ActivePageTable {
+ fn with(&mut self, table: &mut InactivePageTable, f: impl FnOnce(&mut ActivePageTable)) {
+ with_temporary_map(self, &Cr3::read().0, |active_table, p4_table: &mut PageTable| {
+ let backup = p4_table[0o777].clone();
// overwrite recursive mapping
- self.p4_mut()[511].set(table.p4_frame.clone(), EntryFlags::PRESENT | EntryFlags::WRITABLE);
+ p4_table[0o777].set_frame(table.p4_frame.clone(), EntryFlags::PRESENT | EntryFlags::WRITABLE);
tlb::flush_all();
// execute f in the new context
- f(self);
+ f(active_table);
// restore recursive mapping to original p4 table
- p4_table[511].set(backup, EntryFlags::PRESENT | EntryFlags::WRITABLE);
+ p4_table[0o777] = backup;
tlb::flush_all();
+ });
+ }
+ fn map_to_(&mut self, page: Page, frame: PhysFrame, flags: EntryFlags) {
+ self.map_to(page, frame, flags, &mut frame_allocator()).unwrap().flush();
+
+ // Set user bit for p1-p4 entry
+ // It's a workaround since x86_64 PageTable do not set user bit.
+ if flags.contains(EntryFlags::USER_ACCESSIBLE) {
+ let mut addr = page.start_address().as_u64();
+ for _ in 0..4 {
+ addr = ((addr >> 9) & 0o777_777_777_7770) | 0xffffff80_00000000;
+ // set USER_ACCESSIBLE
+ unsafe { (*(addr as *mut EntryFlags)).insert(EntryFlags::USER_ACCESSIBLE) };
+ }
}
-
- temporary_page.unmap(self);
- }
-
- pub fn switch(&mut self, new_table: InactivePageTable) -> InactivePageTable {
- use x86_64::structures::paging::PhysFrame;
- use x86_64::registers::control::{Cr3, Cr3Flags};
- debug!("switch table {:?} -> {:?}", Frame::of_addr(Cr3::read().0.start_address().get()), new_table.p4_frame);
- if new_table.p4_frame.start_address() == Cr3::read().0.start_address() {
- return new_table;
- }
-
- let old_table = InactivePageTable {
- p4_frame: Frame::of_addr(Cr3::read().0.start_address().get()),
- };
- unsafe {
- Cr3::write(PhysFrame::containing_address(new_table.p4_frame.start_address()),
- Cr3Flags::empty());
- }
- use core::mem::forget;
- forget(new_table);
- old_table
}
}
@@ -168,19 +85,24 @@ pub struct InactivePageTable {
impl InactivePageTable {
pub fn new(frame: Frame, active_table: &mut ActivePageTable) -> InactivePageTable {
- let temporary_page = TemporaryPage::new();
- {
- let table = temporary_page.map_table_frame(frame.clone(),
- active_table);
- // now we are able to zero the table
+ with_temporary_map(active_table, &frame, |_, table: &mut PageTable| {
table.zero();
// set up recursive mapping for the table
- table[511].set(frame.clone(), EntryFlags::PRESENT | EntryFlags::WRITABLE);
- }
- temporary_page.unmap(active_table);
-
+ table[511].set_frame(frame.clone(), EntryFlags::PRESENT | EntryFlags::WRITABLE);
+ });
InactivePageTable { p4_frame: frame }
}
+ pub fn switch(&self) {
+ let old_frame = Cr3::read().0;
+ let new_frame = self.p4_frame.clone();
+ debug!("switch table {:?} -> {:?}", old_frame, new_frame);
+ if old_frame != new_frame {
+ unsafe { Cr3::write(new_frame, Cr3Flags::empty()); }
+ }
+ }
+ pub unsafe fn from_cr3() -> Self {
+ InactivePageTable { p4_frame: Cr3::read().0 }
+ }
}
impl Drop for InactivePageTable {
@@ -188,4 +110,17 @@ impl Drop for InactivePageTable {
info!("PageTable dropping: {:?}", self);
dealloc_frame(self.p4_frame.clone());
}
+}
+
+fn with_temporary_map(active_table: &mut ActivePageTable, frame: &Frame, f: impl FnOnce(&mut ActivePageTable, &mut PageTable)) {
+ // Create a temporary page
+ let page = Page::of_addr(0xcafebabe);
+ assert!(active_table.translate_page(page).is_none(), "temporary page is already mapped");
+ // Map it to table
+ active_table.map_to_(page, frame.clone(), EntryFlags::PRESENT | EntryFlags::WRITABLE);
+ // Call f
+ let table = unsafe { &mut *page.start_address().as_mut_ptr() };
+ f(active_table, table);
+ // Unmap the page
+ active_table.unmap(page).unwrap().1.flush();
}
\ No newline at end of file
diff --git a/src/arch/x86_64/paging/table.rs b/src/arch/x86_64/paging/table.rs
deleted file mode 100644
index 97ce369..0000000
--- a/src/arch/x86_64/paging/table.rs
+++ /dev/null
@@ -1,97 +0,0 @@
-use core::marker::PhantomData;
-use core::ops::{Index, IndexMut};
-use memory::alloc_frame;
-use super::entry::*;
-use super::ENTRY_COUNT;
-
-pub const P4: *mut Table = 0xffffffff_fffff000 as *mut _;
-
-pub struct Table {
- entries: [Entry; ENTRY_COUNT],
- level: PhantomData,
-}
-
-impl Table where L: TableLevel {
- pub fn zero(&mut self) {
- for entry in self.entries.iter_mut() {
- entry.set_unused();
- }
- }
-}
-
-impl Table where L: HierarchicalLevel {
- fn next_table_address(&self, index: usize) -> Option {
- let entry_flags = self[index].flags();
- if entry_flags.contains(EntryFlags::PRESENT) && !entry_flags.contains(EntryFlags::HUGE_PAGE) {
- let table_address = self as *const _ as usize;
- Some((table_address << 9) | (index << 12))
- } else {
- None
- }
- }
-
- pub fn next_table(&self, index: usize) -> Option<&Table> {
- self.next_table_address(index)
- .map(|address| unsafe { &*(address as *const _) })
- }
-
- pub fn next_table_mut(&mut self, index: usize) -> Option<&mut Table> {
- self.next_table_address(index)
- .map(|address| unsafe { &mut *(address as *mut _) })
- }
-
- pub fn next_table_create(&mut self, index: usize) -> &mut Table
- {
- if self.next_table(index).is_none() {
- assert!(!self.entries[index].flags().contains(EntryFlags::HUGE_PAGE),
- "mapping code does not support huge pages");
- let frame = alloc_frame();
- // TODO: Remove USER_ACCESSIBLE
- self.entries[index].set(frame, EntryFlags::PRESENT | EntryFlags::WRITABLE | EntryFlags::USER_ACCESSIBLE);
- self.next_table_mut(index).unwrap().zero();
- }
- self.next_table_mut(index).unwrap()
- }
-}
-
-impl Index for Table where L: TableLevel {
- type Output = Entry;
-
- fn index(&self, index: usize) -> &Entry {
- &self.entries[index]
- }
-}
-
-impl IndexMut for Table where L: TableLevel {
- fn index_mut(&mut self, index: usize) -> &mut Entry {
- &mut self.entries[index]
- }
-}
-
-pub trait TableLevel {}
-
-pub enum Level4 {}
-pub enum Level3 {}
-pub enum Level2 {}
-pub enum Level1 {}
-
-impl TableLevel for Level4 {}
-impl TableLevel for Level3 {}
-impl TableLevel for Level2 {}
-impl TableLevel for Level1 {}
-
-pub trait HierarchicalLevel: TableLevel {
- type NextLevel: TableLevel;
-}
-
-impl HierarchicalLevel for Level4 {
- type NextLevel = Level3;
-}
-
-impl HierarchicalLevel for Level3 {
- type NextLevel = Level2;
-}
-
-impl HierarchicalLevel for Level2 {
- type NextLevel = Level1;
-}
\ No newline at end of file
diff --git a/src/arch/x86_64/paging/temporary_page.rs b/src/arch/x86_64/paging/temporary_page.rs
deleted file mode 100644
index 136452d..0000000
--- a/src/arch/x86_64/paging/temporary_page.rs
+++ /dev/null
@@ -1,34 +0,0 @@
-use super::*;
-use super::table::{Level1, Table};
-
-pub struct TemporaryPage {
- page: Page,
-}
-
-impl TemporaryPage {
- pub fn new() -> TemporaryPage {
- TemporaryPage { page: Page::of_addr(0xcafebabe) }
- }
-
- /// Maps the temporary page to the given frame in the active table.
- /// Returns the start address of the temporary page.
- pub fn map(&self, frame: Frame, active_table: &mut ActivePageTable) -> VirtAddr {
- use super::entry::EntryFlags;
-
- assert!(active_table.translate_page(self.page).is_none(),
- "temporary page is already mapped");
- active_table.map_to(self.page, frame, EntryFlags::WRITABLE);
- self.page.start_address()
- }
-
- /// Unmaps the temporary page in the active table.
- pub fn unmap(&self, active_table: &mut ActivePageTable) -> Frame {
- active_table.unmap(self.page)
- }
-
- /// Maps the temporary page to the given page table frame in the active
- /// table. Returns a reference to the now mapped table.
- pub fn map_table_frame(&self, frame: Frame, active_table: &mut ActivePageTable) -> &mut Table {
- unsafe { &mut *(self.map(frame, active_table) as *mut Table) }
- }
-}
diff --git a/src/lib.rs b/src/lib.rs
index bacf020..15c5be2 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -168,6 +168,6 @@ mod test {
pub fn cow() {
use arch;
- arch::paging::test_cow();
+// arch::paging::test_cow();
}
}
\ No newline at end of file
diff --git a/src/memory/frame.rs b/src/memory/frame.rs
deleted file mode 100644
index bb3daf7..0000000
--- a/src/memory/frame.rs
+++ /dev/null
@@ -1,27 +0,0 @@
-use super::address::PhysAddr;
-
-pub const PAGE_SIZE: usize = 4096;
-
-#[derive(Debug, PartialEq, Eq, PartialOrd, Ord)]
-pub struct Frame {
- pub(super) number: usize,
-}
-
-impl Frame {
- pub fn of_addr(address: usize) -> Frame {
- Frame{ number: address / PAGE_SIZE }
- }
- //TODO: Set private
- pub fn start_address(&self) -> PhysAddr {
- PhysAddr::new((self.number * PAGE_SIZE) as u64)
- }
-
- pub fn clone(&self) -> Frame {
- Frame { number: self.number }
- }
-}
-
-pub trait FrameAllocator {
- fn allocate_frame(&mut self) -> Option;
- fn deallocate_frame(&mut self, frame: Frame);
-}
\ No newline at end of file
diff --git a/src/memory/memory_set.rs b/src/memory/memory_set.rs
index 07cda1f..bffd4e9 100644
--- a/src/memory/memory_set.rs
+++ b/src/memory/memory_set.rs
@@ -62,24 +62,26 @@ impl MemoryArea {
let p3 = Page::of_addr(other.end_addr - 1) + 1;
!(p1 <= p2 || p0 >= p3)
}
- fn map(&self, pt: &mut Mapper) {
+ fn map(&self, pt: &mut ActivePageTable) {
match self.phys_start_addr {
Some(phys_start) => {
for page in Page::range_of(self.start_addr, self.end_addr) {
- let frame = Frame::of_addr(phys_start.get() + page.start_address() - self.start_addr);
- pt.map_to(page, frame, self.flags.0);
+ let frame = Frame::of_addr(phys_start.get() + page.start_address().as_u64() as usize - self.start_addr);
+ pt.map_to_(page, frame, self.flags.0);
}
}
None => {
for page in Page::range_of(self.start_addr, self.end_addr) {
- pt.map(page, self.flags.0);
+ let frame = alloc_frame();
+ pt.map_to_(page, frame, self.flags.0);
}
}
}
}
- fn unmap(&self, pt: &mut Mapper) {
+ fn unmap(&self, pt: &mut ActivePageTable) {
for page in Page::range_of(self.start_addr, self.end_addr) {
- let frame = pt.unmap(page);
+ let (frame, flush) = pt.unmap(page).unwrap();
+ flush.flush();
if self.phys_start_addr.is_none() {
dealloc_frame(frame);
}
@@ -156,18 +158,16 @@ impl MemorySet {
pub fn iter(&self) -> impl Iterator- {
self.areas.iter()
}
- pub fn with(&self, mut f: impl FnMut()) {
- use core::{ptr, mem};
- let page_table = unsafe { ptr::read(&self.page_table as *const InactivePageTable) };
- let mut active_table = active_table();
- let backup = active_table.switch(page_table);
+ pub fn with(&self, f: impl FnOnce()) {
+ let current = unsafe { InactivePageTable::from_cr3() };
+ self.page_table.switch();
f();
- mem::forget(active_table.switch(backup));
+ current.switch();
+ use core::mem;
+ mem::forget(current);
}
pub fn switch(&self) {
- use core::{ptr, mem};
- let page_table = unsafe { ptr::read(&self.page_table as *const InactivePageTable) };
- mem::forget(active_table().switch(page_table));
+ self.page_table.switch();
}
pub fn set_kstack(&mut self, stack: Stack) {
assert!(self.kstack.is_none());
@@ -228,13 +228,14 @@ fn new_page_table_with_kernel() -> InactivePageTable {
let mut page_table = InactivePageTable::new(frame, &mut active_table);
use consts::{KERNEL_HEAP_PML4, KERNEL_PML4};
- let e510 = active_table.p4()[KERNEL_PML4].clone();
- let e509 = active_table.p4()[KERNEL_HEAP_PML4].clone();
+ let mut table = unsafe { &mut *(0xffffffff_fffff000 as *mut PageTable) };
+ let e510 = table[KERNEL_PML4].clone();
+ let e509 = table[KERNEL_HEAP_PML4].clone();
- active_table.with(&mut page_table, |pt: &mut Mapper| {
- pt.p4_mut()[KERNEL_PML4] = e510;
- pt.p4_mut()[KERNEL_HEAP_PML4] = e509;
- pt.identity_map(Frame::of_addr(0xfee00000), EntryFlags::WRITABLE); // LAPIC
+ active_table.with(&mut page_table, |pt: &mut ActivePageTable| {
+ table[KERNEL_PML4] = e510;
+ table[KERNEL_HEAP_PML4] = e509;
+ pt.identity_map(Frame::of_addr(0xfee00000), EntryFlags::PRESENT | EntryFlags::WRITABLE, &mut frame_allocator()).unwrap().flush(); // LAPIC
});
page_table
}
diff --git a/src/memory/mod.rs b/src/memory/mod.rs
index d28a022..5f09fed 100644
--- a/src/memory/mod.rs
+++ b/src/memory/mod.rs
@@ -1,11 +1,9 @@
pub use arch::paging::*;
-use arch::paging;
use bit_allocator::{BitAlloc, BitAlloc64K};
use consts::KERNEL_OFFSET;
use multiboot2::{ElfSection, ElfSectionFlags, ElfSectionsTag};
use multiboot2::BootInformation;
pub use self::address::*;
-pub use self::frame::*;
pub use self::memory_set::*;
pub use self::stack_allocator::*;
use spin::{Mutex, MutexGuard};
@@ -14,7 +12,8 @@ use super::HEAP_ALLOCATOR;
mod memory_set;
mod stack_allocator;
mod address;
-mod frame;
+
+const PAGE_SIZE: usize = 1 << 12;
lazy_static! {
static ref FRAME_ALLOCATOR: Mutex = Mutex::new(BitAlloc64K::default());
@@ -22,14 +21,14 @@ lazy_static! {
static STACK_ALLOCATOR: Mutex