move common codes to InactivePageTable trait

master
WangRunji 6 years ago
parent 102866bcf9
commit ff806d4465

@ -9,50 +9,6 @@ use self::handler::MemoryHandler;
pub mod handler;
/// an inactive page table
/// Note: InactivePageTable is not a PageTable
/// but it can be activated and "become" a PageTable
/// Why this trait is in this file?(seems should in paging/mod.rs)
pub trait InactivePageTable {
/// the active version of page table
type Active: PageTable;
/*
** @brief create a inactive page table with kernel memory mapped
** @retval InactivePageTable the created inactive page table
*/
fn new() -> Self;
/*
** @brief create an inactive page table without kernel memory mapped
** @retval InactivePageTable the created inactive page table
*/
fn new_bare() -> Self;
/*
** @brief temporarily active the page table and edit it
** @retval impl FnOnce(&mut Self::Active)
** the function of the editing action,
** which takes a temporarily activated page table as param
** @retval none
*/
fn edit<T>(&mut self, f: impl FnOnce(&mut Self::Active) -> T) -> T;
/*
** @brief activate the inactive page table
** @retval none
*/
unsafe fn activate(&self);
/*
** @brief execute function with this inactive page table
** @param f: impl FnOnce() the function to be executed
** @retval none
*/
unsafe fn with<T>(&self, f: impl FnOnce() -> T) -> T;
/*
** @brief get the token of the inactive page table
** @retval usize the token of the inactive page table
*/
fn token(&self) -> usize;
}
/// a continuous memory space when the same attribute
/// like `vma_struct` in ucore
#[derive(Debug, Clone)]

@ -0,0 +1,15 @@
//! Helper functions
use super::*;
pub trait PageTableExt: PageTable {
const TEMP_PAGE_ADDR: VirtAddr = 0xcafeb000;
fn with_temporary_map<T, D>(&mut self, target: PhysAddr, f: impl FnOnce(&mut Self, &mut D) -> T) -> T {
self.map(Self::TEMP_PAGE_ADDR, target);
let data = unsafe { &mut *(self.get_page_slice_mut(Self::TEMP_PAGE_ADDR).as_ptr() as *mut D) };
let ret = f(self, data);
self.unmap(Self::TEMP_PAGE_ADDR);
ret
}
}

@ -57,14 +57,16 @@ impl Entry for MockEntry {
fn set_user(&mut self, value: bool) { unimplemented!() }
fn execute(&self) -> bool { unimplemented!() }
fn set_execute(&mut self, value: bool) { unimplemented!() }
fn mmio(&self) -> bool { unimplemented!() }
fn set_mmio(&mut self, value: bool) { unimplemented!() }
}
type PageFaultHandler = Box<FnMut(&mut MockPageTable, VirtAddr)>;
impl PageTable for MockPageTable {
type Entry = MockEntry;
// type Entry = MockEntry;
fn map(&mut self, addr: VirtAddr, target: PhysAddr) -> &mut Self::Entry {
fn map(&mut self, addr: VirtAddr, target: PhysAddr) -> &mut Entry {
let entry = &mut self.entries[addr / PAGE_SIZE];
assert!(!entry.present);
entry.present = true;
@ -77,7 +79,7 @@ impl PageTable for MockPageTable {
assert!(entry.present);
entry.present = false;
}
fn get_entry(&mut self, addr: VirtAddr) -> Option<&mut Self::Entry> {
fn get_entry(&mut self, addr: VirtAddr) -> Option<&mut Entry> {
Some(&mut self.entries[addr / PAGE_SIZE])
}
fn get_page_slice_mut<'a,'b>(&'a mut self, addr: VirtAddr) -> &'b mut [u8] {

@ -3,198 +3,149 @@
//! Implemented for every architecture, used by OS.
use super::*;
use super::memory_set::InactivePageTable;
use log::*;
#[cfg(test)]
pub use self::mock_page_table::MockPageTable;
pub use self::ext::*;
#[cfg(test)]
mod mock_page_table;
mod ext;
// trait for PageTable
pub trait PageTable {
// type Entry: Entry;
/*
** @brief map a virual address to the target physics address
** @param addr: VirtAddr the virual address to map
** @param target: VirtAddr the target physics address
** @retval Entry the page table entry of the mapped virual address
*/
/// Map a page of virual address `addr` to the frame of physics address `target`
/// Return the page table entry of the mapped virual address
fn map(&mut self, addr: VirtAddr, target: PhysAddr) -> &mut Entry;
/*
** @brief unmap a virual address from physics address
** @param addr: VirtAddr the virual address to unmap
** @retval none
*/
/// Unmap a page of virual address `addr`
fn unmap(&mut self, addr: VirtAddr);
/*
** @brief get the page table entry of a virual address
** @param addr: VirtAddr the virual address
** @retval Entry the page table entry of the virual address
*/
/// Get the page table entry of a page of virual address `addr`
/// If its page do not exist, return `None`
fn get_entry(&mut self, addr: VirtAddr) -> Option<&mut Entry>;
// For testing with mock
/*
** @brief used for testing with mock
** get a mutable reference of the content of a page from a virtual address
** @param addr: VirtAddr the virual address of the page
** @retval &'b mut [u8] mutable reference of the content of a page as array of bytes
*/
fn get_page_slice_mut<'a,'b>(&'a mut self, addr: VirtAddr) -> &'b mut [u8];
/*
** @brief used for testing with mock
** read data from a virtual address
** @param addr: VirtAddr the virual address of data to read
** @retval u8 the data read
*/
fn read(&mut self, addr: VirtAddr) -> u8;
/*
** @brief used for testing with mock
** write data to a virtual address
** @param addr: VirtAddr the virual address of data to write
** @param data: u8 the data to write
** @retval none
*/
fn write(&mut self, addr: VirtAddr, data: u8);
}
/// Get a mutable reference of the content of a page of virtual address `addr`
/// Used for testing with mock
fn get_page_slice_mut<'a>(&mut self, addr: VirtAddr) -> &'a mut [u8] {
unsafe { core::slice::from_raw_parts_mut((addr & !(PAGE_SIZE - 1)) as *mut u8, PAGE_SIZE) }
}
/// Read data from virtual address `addr`
/// Used for testing with mock
fn read(&mut self, addr: VirtAddr) -> u8 {
unsafe { (addr as *const u8).read() }
}
// trait for Entry in PageTable
/// Write data to virtual address `addr`
/// Used for testing with mock
fn write(&mut self, addr: VirtAddr, data: u8) {
unsafe { (addr as *mut u8).write(data) }
}
}
/// Page Table Entry
pub trait Entry {
/*
** @brief force update this page table entry
** IMPORTANT!
** This must be called after any change to ensure it become effective.
** Usually this will make a flush to TLB/MMU.
** @retval none
*/
/// Make all changes take effect.
///
/// IMPORTANT!
/// This must be called after any change to ensure it become effective.
/// Usually it will cause a TLB/MMU flush.
fn update(&mut self);
/*
** @brief get the accessed bit of the entry
** Will be set when accessed
** @retval bool the accessed bit
*/
/// A bit set by hardware when the page is accessed
fn accessed(&self) -> bool;
/*
** @brief get the dirty bit of the entry
** Will be set when written
** @retval bool the dirty bit
*/
/// A bit set by hardware when the page is written
fn dirty(&self) -> bool;
/*
** @brief get the writable bit of the entry
** Will PageFault when try to write page where writable=0
** @retval bool the writable bit
*/
/// Will PageFault when try to write page where writable=0
fn writable(&self) -> bool;
/*
** @brief get the present bit of the entry
** Will PageFault when try to access page where present=0
** @retval bool the present bit
*/
/// Will PageFault when try to access page where present=0
fn present(&self) -> bool;
/*
** @brief clear the accessed bit
** @retval none
*/
fn clear_accessed(&mut self);
/*
** @brief clear the dirty bit
** @retval none
*/
fn clear_dirty(&mut self);
/*
** @brief set value of writable bit
** @param value: bool the writable bit value
** @retval none
*/
fn set_writable(&mut self, value: bool);
/*
** @brief set value of present bit
** @param value: bool the present bit value
** @retval none
*/
fn set_present(&mut self, value: bool);
/*
** @brief get the target physics address in the entry
** can be used for other purpose if present=0
** @retval target: PhysAddr the target physics address
*/
/// The target physics address in the entry
/// Can be used for other purpose if present=0
fn target(&self) -> PhysAddr;
/*
** @brief set the target physics address in the entry
** @param target: PhysAddr the target physics address
** @retval none
*/
fn set_target(&mut self, target: PhysAddr);
// For Copy-on-write extension
/*
** @brief used for Copy-on-write extension
** get the writable and shared bit
** @retval value: bool the writable and shared bit
*/
// For Copy-on-write
fn writable_shared(&self) -> bool;
/*
** @brief used for Copy-on-write extension
** get the readonly and shared bit
** @retval value: bool the readonly and shared bit
*/
fn readonly_shared(&self) -> bool;
/*
** @brief used for Copy-on-write extension
** mark the page as (writable or readonly) shared
** @param writable: bool if it is true, set the page as writable and shared
** else set the page as readonly and shared
** @retval value: none
*/
fn set_shared(&mut self, writable: bool);
/*
** @brief used for Copy-on-write extension
** mark the page as not shared
** @retval value: none
*/
fn clear_shared(&mut self);
// For Swap extension
/*
** @brief used for Swap extension
** get the swapped bit
** @retval value: bool the swapped bit
*/
// For Swap
fn swapped(&self) -> bool;
/*
** @brief used for Swap extension
** set the swapped bit
** @param value: bool the swapped bit value
** @retval none
*/
fn set_swapped(&mut self, value: bool);
/*
** @brief get the user bit of the entry
** @retval bool the user bit
*/
fn user(&self) -> bool;
/*
** @brief set value of user bit
** @param value: bool the user bit value
** @retval none
*/
fn set_user(&mut self, value: bool);
/*
** @brief get the execute bit of the entry
** @retval bool the execute bit
*/
fn execute(&self) -> bool;
/*
** @brief set value of user bit
** @param value: bool the execute bit value
** @retval none
*/
fn set_execute(&mut self, value: bool);
fn mmio(&self) -> bool;
fn set_mmio(&mut self, value: bool);
}
/// An inactive page table
/// Note: InactivePageTable is not a PageTable
/// but it can be activated and "become" a PageTable
pub trait InactivePageTable: Sized {
/// the active version of page table
type Active: PageTable;
/// Create a new page table with kernel memory mapped
fn new() -> Self {
let mut pt = Self::new_bare();
pt.map_kernel();
pt
}
/// Create a new page table without kernel memory mapped
fn new_bare() -> Self;
/// Map kernel segments
fn map_kernel(&mut self);
/// CR3 on x86, SATP on RISCV, TTBR on AArch64
fn token(&self) -> usize;
unsafe fn set_token(token: usize);
fn active_token() -> usize;
fn flush_tlb();
/// Make this page table editable
/// Set the recursive entry of current active page table to this
fn edit<T>(&mut self, f: impl FnOnce(&mut Self::Active) -> T) -> T;
/// Activate this page table
unsafe fn activate(&self) {
let old_token = Self::active_token();
let new_token = self.token();
debug!("switch table {:x?} -> {:x?}", old_token, new_token);
if old_token != new_token {
Self::set_token(new_token);
Self::flush_tlb();
}
}
/// Execute function `f` with this page table activated
unsafe fn with<T>(&self, f: impl FnOnce() -> T) -> T {
let old_token = Self::active_token();
let new_token = self.token();
debug!("switch table {:x?} -> {:x?}", old_token, new_token);
if old_token != new_token {
Self::set_token(new_token);
Self::flush_tlb();
}
let ret = f();
debug!("switch table {:x?} -> {:x?}", new_token, old_token);
if old_token != new_token {
Self::set_token(old_token);
Self::flush_tlb();
}
ret
}
}

@ -8,7 +8,6 @@
use super::*;
use super::paging::*;
use super::memory_set::InactivePageTable;
use super::addr::Frame;
use core::ops::{Deref, DerefMut};

@ -66,21 +66,10 @@ impl PageTable for ActivePageTable {
let entry_addr = ((addr >> 9) & 0o777_777_777_7770) | (RECURSIVE_INDEX << 39);
Some(unsafe { &mut *(entry_addr as *mut PageEntry) })
}
fn get_page_slice_mut<'a, 'b>(&'a mut self, addr: usize) -> &'b mut [u8] {
use core::slice;
unsafe { slice::from_raw_parts_mut((addr & !0xfffusize) as *mut u8, PAGE_SIZE) }
}
fn read(&mut self, addr: usize) -> u8 {
unsafe { *(addr as *const u8) }
}
fn write(&mut self, addr: usize, data: u8) {
unsafe { *(addr as *mut u8) = data; }
}
}
impl PageTableExt for ActivePageTable {}
const ROOT_PAGE_TABLE: *mut Aarch64PageTable =
((RECURSIVE_INDEX << 39) | (RECURSIVE_INDEX << 30) | (RECURSIVE_INDEX << 21) | (RECURSIVE_INDEX << 12)) as *mut Aarch64PageTable;
@ -88,19 +77,6 @@ impl ActivePageTable {
pub unsafe fn new() -> Self {
ActivePageTable(RecursivePageTable::new(&mut *(ROOT_PAGE_TABLE as *mut _)).unwrap())
}
fn with_temporary_map<T>(&mut self, frame: &Frame, f: impl FnOnce(&mut ActivePageTable, &mut Aarch64PageTable) -> T) -> T {
// Create a temporary page
let page = Page::of_addr(0xcafebabe);
assert!(self.0.translate_page(page).is_none(), "temporary page is already mapped");
// Map it to table
self.map(page.start_address().as_u64() as usize, frame.start_address().as_u64() as usize);
// Call f
let table = unsafe { &mut *page.start_address().as_mut_ptr() };
let ret = f(self, table);
// Unmap the page
self.unmap(0xcafebabe);
ret
}
}
impl Entry for PageEntry {
@ -195,9 +171,9 @@ impl InactivePageTable for InactivePageTable0 {
}
fn new_bare() -> Self {
let frame = alloc_frame().map(|target| Frame::of_addr(target))
.expect("failed to allocate frame");
active_table().with_temporary_map(&frame, |_, table: &mut Aarch64PageTable| {
let target = alloc_frame().expect("failed to allocate frame");
let frame = Frame::of_addr(target);
active_table().with_temporary_map(target, |_, table: &mut Aarch64PageTable| {
table.zero();
// set up recursive mapping for the table
table[RECURSIVE_INDEX].set_frame(frame.clone(), EF::default(), MairNormal::attr_value());
@ -205,8 +181,35 @@ impl InactivePageTable for InactivePageTable0 {
InactivePageTable0 { p4_frame: frame }
}
fn map_kernel(&mut self) {
let table = unsafe { &mut *ROOT_PAGE_TABLE };
let e0 = table[KERNEL_PML4].clone();
assert!(!e0.is_unused());
self.edit(|_| {
table[KERNEL_PML4].set_frame(Frame::containing_address(e0.addr()), EF::default(), MairNormal::attr_value());
});
}
fn token(&self) -> usize {
self.p4_frame.start_address().as_u64() as usize // as TTBRx_EL1
}
unsafe fn set_token(token: usize) {
ttbr_el1_write(1, Frame::containing_address(PhysAddr::new(token as u64)));
}
fn active_token() -> usize {
ttbr_el1_read(1).start_address().as_u64() as usize
}
fn flush_tlb() {
tlb_invalidate_all();
}
fn edit<T>(&mut self, f: impl FnOnce(&mut Self::Active) -> T) -> T {
active_table().with_temporary_map(&ttbr_el1_read(0), |active_table, p4_table: &mut Aarch64PageTable| {
let target = ttbr_el1_read(0).start_address().as_u64() as usize;
active_table().with_temporary_map(target, |active_table, p4_table: &mut Aarch64PageTable| {
let backup = p4_table[RECURSIVE_INDEX].clone();
// overwrite recursive mapping
@ -222,51 +225,9 @@ impl InactivePageTable for InactivePageTable0 {
ret
})
}
unsafe fn activate(&self) {
let old_frame = ttbr_el1_read(1);
let new_frame = self.p4_frame.clone();
debug!("switch TTBR1 {:?} -> {:?}", old_frame, new_frame);
if old_frame != new_frame {
ttbr_el1_write(1, new_frame);
tlb_invalidate_all();
}
}
unsafe fn with<T>(&self, f: impl FnOnce() -> T) -> T {
// Just need to switch the user TTBR
let old_frame = ttbr_el1_read(1);
let new_frame = self.p4_frame.clone();
debug!("switch TTBR1 {:?} -> {:?}", old_frame, new_frame);
if old_frame != new_frame {
ttbr_el1_write(1, new_frame);
tlb_invalidate_all();
}
let ret = f();
debug!("switch TTBR1 {:?} -> {:?}", new_frame, old_frame);
if old_frame != new_frame {
ttbr_el1_write(1, old_frame);
tlb_invalidate_all();
flush_icache_all();
}
ret
}
fn token(&self) -> usize {
self.p4_frame.start_address().as_u64() as usize // as TTBRx_EL1
}
}
impl InactivePageTable0 {
fn map_kernel(&mut self) {
let table = unsafe { &mut *ROOT_PAGE_TABLE };
let e0 = table[KERNEL_PML4].clone();
assert!(!e0.is_unused());
self.edit(|_| {
table[KERNEL_PML4].set_frame(Frame::containing_address(e0.addr()), EF::default(), MairNormal::attr_value());
});
}
/// Activate as kernel page table (TTBR0).
/// Used in `arch::memory::remap_the_kernel()`.
pub unsafe fn activate_as_kernel(&self) {

@ -31,7 +31,6 @@ pub fn setup_page_table(frame: Frame) {
p2.map_identity(KERNEL_P2_INDEX + 1, EF::VALID | EF::READABLE | EF::WRITABLE | EF::EXECUTABLE);
p2.map_identity(KERNEL_P2_INDEX + 2, EF::VALID | EF::READABLE | EF::WRITABLE | EF::EXECUTABLE);
use riscv::register::satp;
unsafe { satp::set(satp::Mode::Sv32, 0, frame); }
sfence_vma_all();
info!("setup init page table end");
@ -94,43 +93,10 @@ impl PageTable for ActivePageTable {
let entry_addr = ((addr >> 10) & ((1 << 22) - 4)) | (RECURSIVE_INDEX << 22);
unsafe { Some(&mut *(entry_addr as *mut PageEntry)) }
}
/*
* @param:
* addr:the input (virutal) address
* @brief:
* get the addr's memory page slice
* @retval:
* a mutable reference slice of 'addr' 's page
*/
fn get_page_slice_mut<'a, 'b>(&'a mut self, addr: usize) -> &'b mut [u8] {
use core::slice;
unsafe { slice::from_raw_parts_mut((addr & !(PAGE_SIZE - 1)) as *mut u8, PAGE_SIZE) }
}
/*
* @param:
* addr: virtual address
* @brief:
* get the address's content
* @retval:
* the content(u8) of 'addr'
*/
fn read(&mut self, addr: usize) -> u8 {
unsafe { *(addr as *const u8) }
}
/*
* @param:
* addr: virtual address
* @brief:
* write the address's content
*/
fn write(&mut self, addr: usize, data: u8) {
unsafe { *(addr as *mut u8) = data; }
}
}
impl PageTableExt for ActivePageTable {}
// define the ROOT_PAGE_TABLE, and the virtual address of it?
const ROOT_PAGE_TABLE: *mut RvPageTable =
(((RECURSIVE_INDEX << 10) | (RECURSIVE_INDEX + 1)) << 12) as *mut RvPageTable;
@ -139,27 +105,6 @@ impl ActivePageTable {
pub unsafe fn new() -> Self {
ActivePageTable(RecursivePageTable::new(&mut *ROOT_PAGE_TABLE).unwrap())
}
/*
* @param:
* frame: the target physical frame which will be temporarily mapped
* f: the function you would like to apply for once
* @brief:
* do something on the target physical frame?
*/
fn with_temporary_map<T>(&mut self, frame: &Frame, f: impl FnOnce(&mut ActivePageTable, &mut RvPageTable) -> T) -> T {
// Create a temporary page
let page = Page::of_addr(VirtAddr::new(0xcafebabe));
assert!(self.0.translate_page(page).is_none(), "temporary page is already mapped");
// Map it to table
self.map(page.start_address().as_usize(), frame.start_address().as_u32() as usize);
// Call f
let table = unsafe { &mut *(page.start_address().as_usize() as *mut _) };
let ret = f(self, table);
// Unmap the page
self.unmap(0xcafebabe);
ret
}
}
/// implementation for the Entry trait in /crate/memory/src/paging/mod.rs
impl Entry for PageEntry {
@ -213,114 +158,16 @@ pub struct InactivePageTable0 {
impl InactivePageTable for InactivePageTable0 {
type Active = ActivePageTable;
/*
* @brief:
* get a new pagetable (for a new process or thread)
* @retbal:
* the new pagetable
*/
fn new() -> Self {
let mut pt = Self::new_bare();
pt.map_kernel();
pt
}
/*
* @brief:
* allocate a new frame and then self-mapping it and regard it as the inactivepagetale
* retval:
* the inactive page table
*/
fn new_bare() -> Self {
let frame = alloc_frame().map(|target| Frame::of_addr(PhysAddr::new(target as u32)))
.expect("failed to allocate frame");
active_table().with_temporary_map(&frame, |_, table: &mut RvPageTable| {
let target = alloc_frame().expect("failed to allocate frame");
let frame = Frame::of_addr(PhysAddr::new(target as u32));
active_table().with_temporary_map(target, |_, table: &mut RvPageTable| {
table.zero();
table.set_recursive(RECURSIVE_INDEX, frame.clone());
});
InactivePageTable0 { p2_frame: frame }
}
/*
* @param:
* f: a function to do something with the temporary modified activate page table
* @brief:
* temporarily map the inactive pagetable as an active p2page and apply f on the temporary modified active page table
*/
fn edit<T>(&mut self, f: impl FnOnce(&mut Self::Active) -> T) -> T {
active_table().with_temporary_map(&satp::read().frame(), |active_table, p2_table: &mut RvPageTable| {
let backup = p2_table[RECURSIVE_INDEX].clone();
// overwrite recursive mapping
p2_table[RECURSIVE_INDEX].set(self.p2_frame.clone(), EF::VALID);
sfence_vma_all();
// execute f in the new context
let ret = f(active_table);
// restore recursive mapping to original p2 table
p2_table[RECURSIVE_INDEX] = backup;
sfence_vma_all();
ret
})
}
/*
* @brief:
* active self as the current active page table
*/
unsafe fn activate(&self) {
let old_frame = satp::read().frame();
let new_frame = self.p2_frame.clone();
debug!("switch table {:x?} -> {:x?}", old_frame, new_frame);
if old_frame != new_frame {
satp::set(satp::Mode::Sv32, 0, new_frame);
sfence_vma_all();
}
}
/*
* @param:
* f: the function to run when temporarily activate self as current page table
* @brief:
* Temporarily activate self and run the process, and return the return value of f
* @retval:
* the return value of f
*/
unsafe fn with<T>(&self, f: impl FnOnce() -> T) -> T {
let old_frame = satp::read().frame();
let new_frame = self.p2_frame.clone();
debug!("switch table {:x?} -> {:x?}", old_frame, new_frame);
if old_frame != new_frame {
satp::set(satp::Mode::Sv32, 0, new_frame);
sfence_vma_all();
}
let target = f();
debug!("switch table {:x?} -> {:x?}", new_frame, old_frame);
if old_frame != new_frame {
satp::set(satp::Mode::Sv32, 0, old_frame);
sfence_vma_all();
}
target
}
/*
* @brief:
* get the token of self, the token is self's pagetable frame's starting physical address
* @retval:
* self token
*/
fn token(&self) -> usize {
self.p2_frame.number() | (1 << 31) // as satp
}
}
impl InactivePageTable0 {
/*
* @brief:
* map the kernel code memory address (p2 page table) in the new inactive page table according the current active page table
*/
fn map_kernel(&mut self) {
let table = unsafe { &mut *ROOT_PAGE_TABLE };
let e0 = table[0x40];
@ -338,6 +185,42 @@ impl InactivePageTable0 {
table[KERNEL_P2_INDEX + 2].set(e3.frame(), EF::VALID | EF::GLOBAL);
});
}
fn token(&self) -> usize {
self.p2_frame.number() | (1 << 31) // as satp
}
unsafe fn set_token(token: usize) {
asm!("csrw 0x180, $0" :: "r"(token) :: "volatile");
}
fn active_token() -> usize {
satp::read().bits()
}
fn flush_tlb() {
sfence_vma_all();
}
fn edit<T>(&mut self, f: impl FnOnce(&mut Self::Active) -> T) -> T {
let target = satp::read().frame().start_address().as_u32() as usize;
active_table().with_temporary_map(target, |active_table, p2_table: &mut RvPageTable| {
let backup = p2_table[RECURSIVE_INDEX].clone();
// overwrite recursive mapping
p2_table[RECURSIVE_INDEX].set(self.p2_frame.clone(), EF::VALID);
sfence_vma_all();
// execute f in the new context
let ret = f(active_table);
// restore recursive mapping to original p2 table
p2_table[RECURSIVE_INDEX] = backup;
sfence_vma_all();
ret
})
}
}
impl Drop for InactivePageTable0 {

@ -62,38 +62,14 @@ impl PageTable for ActivePageTable {
}
unsafe { Some(&mut *(get_entry_ptr(addr, 1))) }
}
fn get_page_slice_mut<'a, 'b>(&'a mut self, addr: usize) -> &'b mut [u8] {
use core::slice;
unsafe { slice::from_raw_parts_mut((addr & !0xfffusize) as *mut u8, PAGE_SIZE) }
}
fn read(&mut self, addr: usize) -> u8 {
unsafe { *(addr as *const u8) }
}
fn write(&mut self, addr: usize, data: u8) {
unsafe { *(addr as *mut u8) = data; }
}
}
impl PageTableExt for ActivePageTable {}
impl ActivePageTable {
pub unsafe fn new() -> Self {
ActivePageTable(RecursivePageTable::new(&mut *(0xffffffff_fffff000 as *mut _)).unwrap())
}
fn with_temporary_map<T>(&mut self, frame: &Frame, f: impl FnOnce(&mut ActivePageTable, &mut x86PageTable) -> T) -> T {
// Create a temporary page
let page = Page::of_addr(0xcafebabe);
assert!(self.0.translate_page(page).is_none(), "temporary page is already mapped");
// Map it to table
self.map(page.start_address().as_u64() as usize, frame.start_address().as_u64() as usize);
// Call f
let table = unsafe { &mut *page.start_address().as_mut_ptr() };
let ret = f(self, table);
// Unmap the page
self.unmap(0xcafebabe);
ret
}
}
impl Entry for PageEntry {
@ -164,16 +140,10 @@ pub struct InactivePageTable0 {
impl InactivePageTable for InactivePageTable0 {
type Active = ActivePageTable;
fn new() -> Self {
let mut pt = Self::new_bare();
pt.map_kernel();
pt
}
fn new_bare() -> Self {
let frame = alloc_frame().map(|target| Frame::of_addr(target))
.expect("failed to allocate frame");
active_table().with_temporary_map(&frame, |_, table: &mut x86PageTable| {
let target = alloc_frame().expect("failed to allocate frame");
let frame = Frame::of_addr(target);
active_table().with_temporary_map(target, |_, table: &mut x86PageTable| {
table.zero();
// set up recursive mapping for the table
table[511].set_frame(frame.clone(), EF::PRESENT | EF::WRITABLE);
@ -181,8 +151,37 @@ impl InactivePageTable for InactivePageTable0 {
InactivePageTable0 { p4_frame: frame }
}
fn map_kernel(&mut self) {
let mut table = unsafe { &mut *(0xffffffff_fffff000 as *mut x86PageTable) };
// Kernel at 0xffff_ff00_0000_0000
// Kernel stack at 0x0000_57ac_0000_0000 (defined in bootloader crate)
let e510 = table[510].clone();
let estack = table[175].clone();
self.edit(|_| {
table[510].set_addr(e510.addr(), e510.flags() | EF::GLOBAL);
table[175].set_addr(estack.addr(), estack.flags() | EF::GLOBAL);
});
}
fn token(&self) -> usize {
self.p4_frame.start_address().as_u64() as usize // as CR3
}
unsafe fn set_token(token: usize) {
Cr3::write(Frame::containing_address(PhysAddr::new(token as u64)), Cr3Flags::empty());
}
fn active_token() -> usize {
Cr3::read().0.start_address().as_u64() as usize
}
fn flush_tlb() {
tlb::flush_all();
}
fn edit<T>(&mut self, f: impl FnOnce(&mut Self::Active) -> T) -> T {
active_table().with_temporary_map(&Cr3::read().0, |active_table, p4_table: &mut x86PageTable| {
let target = Cr3::read().0.start_address().as_u64() as usize;
active_table().with_temporary_map(target, |active_table, p4_table: &mut x86PageTable| {
let backup = p4_table[0o777].clone();
// overwrite recursive mapping
@ -198,48 +197,6 @@ impl InactivePageTable for InactivePageTable0 {
ret
})
}
unsafe fn activate(&self) {
let old_frame = Cr3::read().0;
let new_frame = self.p4_frame.clone();
debug!("switch table {:?} -> {:?}", old_frame, new_frame);
if old_frame != new_frame {
Cr3::write(new_frame, Cr3Flags::empty());
}
}
unsafe fn with<T>(&self, f: impl FnOnce() -> T) -> T {
let old_frame = Cr3::read().0;
let new_frame = self.p4_frame.clone();
debug!("switch table {:?} -> {:?}", old_frame, new_frame);
if old_frame != new_frame {
Cr3::write(new_frame, Cr3Flags::empty());
}
let ret = f();
debug!("switch table {:?} -> {:?}", new_frame, old_frame);
if old_frame != new_frame {
Cr3::write(old_frame, Cr3Flags::empty());
}
ret
}
fn token(&self) -> usize {
self.p4_frame.start_address().as_u64() as usize // as CR3
}
}
impl InactivePageTable0 {
fn map_kernel(&mut self) {
let mut table = unsafe { &mut *(0xffffffff_fffff000 as *mut x86PageTable) };
// Kernel at 0xffff_ff00_0000_0000
// Kernel stack at 0x0000_57ac_0000_0000 (defined in bootloader crate)
let e510 = table[510].clone();
let estack = table[175].clone();
self.edit(|_| {
table[510].set_addr(e510.addr(), e510.flags() | EF::GLOBAL);
table[175].set_addr(estack.addr(), estack.flags() | EF::GLOBAL);
});
}
}
impl Drop for InactivePageTable0 {

@ -4,7 +4,7 @@ use crate::consts::MEMORY_OFFSET;
use super::HEAP_ALLOCATOR;
use ucore_memory::{*, paging::PageTable};
use ucore_memory::cow::CowExt;
pub use ucore_memory::memory_set::{MemoryArea, MemoryAttr, InactivePageTable, handler::*};
pub use ucore_memory::memory_set::{MemoryArea, MemoryAttr, handler::*};
use ucore_memory::swap::*;
use crate::process::{process};
use crate::sync::{SpinNoIrqLock, SpinNoIrq, MutexGuard};

Loading…
Cancel
Save