Use PageTable interface in OS

master
WangRunji 7 years ago
parent f500086b9e
commit ade0f0110f

@ -36,6 +36,7 @@ uart_16550 = "0.1"
lazy_static = { version = "1.0.0", features = ["spin_no_std"] } lazy_static = { version = "1.0.0", features = ["spin_no_std"] }
simple-filesystem = { git = "https://github.com/wangrunji0408/SimpleFileSystem-Rust" } simple-filesystem = { git = "https://github.com/wangrunji0408/SimpleFileSystem-Rust" }
bit-allocator = { path = "crate/bit-allocator" } bit-allocator = { path = "crate/bit-allocator" }
ucore-memory = { path = "crate/memory" }
[build-dependencies] [build-dependencies]
cc = "1.0" cc = "1.0"

@ -1,5 +1,5 @@
[package] [package]
name = "memory" name = "ucore-memory"
version = "0.1.0" version = "0.1.0"
authors = ["WangRunji <wangrunji0408@163.com>"] authors = ["WangRunji <wangrunji0408@163.com>"]

@ -22,6 +22,7 @@ impl<T: PageTable> CowExt<T> {
let entry = self.page_table.map(addr, target); let entry = self.page_table.map(addr, target);
entry.set_writable(false); entry.set_writable(false);
entry.set_shared(writable); entry.set_shared(writable);
entry.update();
let frame = target / PAGE_SIZE; let frame = target / PAGE_SIZE;
match writable { match writable {
true => self.rc_map.write_increase(&frame), true => self.rc_map.write_increase(&frame),
@ -52,6 +53,7 @@ impl<T: PageTable> CowExt<T> {
if self.rc_map.read_count(&frame) == 0 && self.rc_map.write_count(&frame) == 1 { if self.rc_map.read_count(&frame) == 0 && self.rc_map.write_count(&frame) == 1 {
entry.clear_shared(); entry.clear_shared();
entry.set_writable(true); entry.set_writable(true);
entry.update();
self.rc_map.write_decrease(&frame); self.rc_map.write_decrease(&frame);
return true; return true;
} }

@ -22,6 +22,7 @@ pub struct MockEntry {
} }
impl Entry for MockEntry { impl Entry for MockEntry {
fn update(&mut self) {}
fn accessed(&self) -> bool { self.accessed } fn accessed(&self) -> bool { self.accessed }
fn dirty(&self) -> bool { self.dirty } fn dirty(&self) -> bool { self.dirty }
fn writable(&self) -> bool { self.writable } fn writable(&self) -> bool { self.writable }
@ -42,6 +43,11 @@ impl Entry for MockEntry {
self.writable_shared = false; self.writable_shared = false;
self.readonly_shared = false; self.readonly_shared = false;
} }
fn user(&self) -> bool { unimplemented!() }
fn set_user(&mut self, value: bool) { unimplemented!() }
fn execute(&self) -> bool { unimplemented!() }
fn set_execute(&mut self, value: bool) { unimplemented!() }
} }
type PageFaultHandler = Box<FnMut(&mut MockPageTable, VirtAddr)>; type PageFaultHandler = Box<FnMut(&mut MockPageTable, VirtAddr)>;
@ -49,7 +55,6 @@ type PageFaultHandler = Box<FnMut(&mut MockPageTable, VirtAddr)>;
impl PageTable for MockPageTable { impl PageTable for MockPageTable {
type Entry = MockEntry; type Entry = MockEntry;
/// Map a page, return false if no more space
fn map(&mut self, addr: VirtAddr, target: PhysAddr) -> &mut Self::Entry { fn map(&mut self, addr: VirtAddr, target: PhysAddr) -> &mut Self::Entry {
let entry = &mut self.entries[addr / PAGE_SIZE]; let entry = &mut self.entries[addr / PAGE_SIZE];
assert!(!entry.present); assert!(!entry.present);

@ -1,6 +1,12 @@
//! Generic page table interface
//!
//! Implemented for every architecture, used by OS.
use super::*; use super::*;
#[cfg(test)]
pub use self::mock_page_table::MockPageTable; pub use self::mock_page_table::MockPageTable;
#[cfg(test)]
mod mock_page_table; mod mock_page_table;
pub trait PageTable { pub trait PageTable {
@ -13,6 +19,11 @@ pub trait PageTable {
} }
pub trait Entry { pub trait Entry {
/// IMPORTANT!
/// This must be called after any change to ensure it become effective.
/// Usually this will make a flush to TLB/MMU.
fn update(&mut self);
/// Will be set when accessed /// Will be set when accessed
fn accessed(&self) -> bool; fn accessed(&self) -> bool;
/// Will be set when written /// Will be set when written
@ -34,4 +45,9 @@ pub trait Entry {
fn readonly_shared(&self) -> bool; fn readonly_shared(&self) -> bool;
fn set_shared(&mut self, writable: bool); fn set_shared(&mut self, writable: bool);
fn clear_shared(&mut self); fn clear_shared(&mut self);
fn user(&self) -> bool;
fn set_user(&mut self, value: bool);
fn execute(&self) -> bool;
fn set_execute(&mut self, value: bool);
} }

@ -1,20 +1,11 @@
use memory::*; use memory::*;
//pub use self::cow::*; pub use ucore_memory::paging::{Entry, PageTable};
use x86_64::structures::paging::*;
use x86_64::registers::control::{Cr3, Cr3Flags};
use x86_64::instructions::tlb; use x86_64::instructions::tlb;
use x86_64::registers::control::{Cr3, Cr3Flags};
use x86_64::structures::paging::{Mapper, PageTable as x86PageTable, PageTableEntry, PageTableFlags as EF, RecursivePageTable};
pub use x86_64::structures::paging::{FrameAllocator, FrameDeallocator, Page, PageRange, PhysFrame as Frame, Size4KiB};
use x86_64::ux::u9; use x86_64::ux::u9;
pub type Frame = PhysFrame;
pub type EntryFlags = PageTableFlags;
pub type ActivePageTable = RecursivePageTable<'static>;
pub use x86_64::structures::paging::{Page, PageRange, Mapper, FrameAllocator, FrameDeallocator, Size4KiB, PageTable};
//mod cow;
const ENTRY_COUNT: usize = 512;
pub trait PageExt { pub trait PageExt {
fn of_addr(address: VirtAddr) -> Self; fn of_addr(address: VirtAddr) -> Self;
fn range_of(begin: VirtAddr, end: VirtAddr) -> PageRange; fn range_of(begin: VirtAddr, end: VirtAddr) -> PageRange;
@ -40,18 +31,53 @@ impl FrameExt for Frame {
} }
} }
pub trait ActiveTableExt { pub struct ActivePageTable(RecursivePageTable<'static>);
fn with(&mut self, table: &mut InactivePageTable, f: impl FnOnce(&mut ActivePageTable));
fn map_to_(&mut self, page: Page, frame: Frame, flags: EntryFlags); pub struct PageEntry(PageTableEntry);
impl PageTable for ActivePageTable {
type Entry = PageEntry;
fn map(&mut self, addr: usize, target: usize) -> &mut PageEntry {
let flags = EF::PRESENT | EF::WRITABLE | EF::NO_EXECUTE;
self.0.map_to(Page::of_addr(addr), Frame::of_addr(target), flags, &mut frame_allocator())
.unwrap().flush();
self.get_entry(addr)
}
fn unmap(&mut self, addr: usize) {
let (frame, flush) = self.0.unmap(Page::of_addr(addr)).unwrap();
flush.flush();
}
fn get_entry(&mut self, addr: usize) -> &mut PageEntry {
let entry_addr = ((addr >> 9) & 0o777_777_777_7770) | 0xffffff80_00000000;
unsafe { &mut *(entry_addr as *mut PageEntry) }
}
fn read_page(&mut self, addr: usize, data: &mut [u8]) {
use core::slice;
let mem = unsafe { slice::from_raw_parts((addr & !0xfffusize) as *const u8, 4096) };
data.copy_from_slice(mem);
} }
impl ActiveTableExt for ActivePageTable { fn write_page(&mut self, addr: usize, data: &[u8]) {
fn with(&mut self, table: &mut InactivePageTable, f: impl FnOnce(&mut ActivePageTable)) { use core::slice;
with_temporary_map(self, &Cr3::read().0, |active_table, p4_table: &mut PageTable| { let mem = unsafe { slice::from_raw_parts_mut((addr & !0xfffusize) as *mut u8, 4096) };
mem.copy_from_slice(data);
}
}
impl ActivePageTable {
pub unsafe fn new() -> Self {
ActivePageTable(RecursivePageTable::new(&mut *(0xffffffff_fffff000 as *mut _)).unwrap())
}
pub fn with(&mut self, table: &mut InactivePageTable, f: impl FnOnce(&mut ActivePageTable)) {
with_temporary_map(self, &Cr3::read().0, |active_table, p4_table: &mut x86PageTable| {
let backup = p4_table[0o777].clone(); let backup = p4_table[0o777].clone();
// overwrite recursive mapping // overwrite recursive mapping
p4_table[0o777].set_frame(table.p4_frame.clone(), EntryFlags::PRESENT | EntryFlags::WRITABLE); p4_table[0o777].set_frame(table.p4_frame.clone(), EF::PRESENT | EF::WRITABLE);
tlb::flush_all(); tlb::flush_all();
// execute f in the new context // execute f in the new context
@ -62,20 +88,55 @@ impl ActiveTableExt for ActivePageTable {
tlb::flush_all(); tlb::flush_all();
}); });
} }
fn map_to_(&mut self, page: Page<Size4KiB>, frame: PhysFrame<Size4KiB>, flags: EntryFlags) { pub fn map_to(&mut self, page: Page, frame: Frame) -> &mut PageEntry {
self.map_to(page, frame, flags, &mut frame_allocator()).unwrap().flush(); self.map(page.start_address().as_u64() as usize, frame.start_address().as_u64() as usize)
}
// Set user bit for p1-p4 entry }
// It's a workaround since x86_64 PageTable do not set user bit.
if flags.contains(EntryFlags::USER_ACCESSIBLE) { impl Entry for PageEntry {
let mut addr = page.start_address().as_u64(); fn update(&mut self) {
for _ in 0..4 { use x86_64::{VirtAddr, instructions::tlb::flush};
let addr = VirtAddr::new_unchecked((self as *const _ as u64) << 9);
flush(addr);
}
fn accessed(&self) -> bool { self.0.flags().contains(EF::ACCESSED) }
fn dirty(&self) -> bool { self.0.flags().contains(EF::DIRTY) }
fn writable(&self) -> bool { self.0.flags().contains(EF::WRITABLE) }
fn present(&self) -> bool { self.0.flags().contains(EF::PRESENT) }
fn clear_accessed(&mut self) { self.as_flags().remove(EF::ACCESSED); }
fn clear_dirty(&mut self) { self.as_flags().remove(EF::DIRTY); }
fn set_writable(&mut self, value: bool) { self.as_flags().set(EF::WRITABLE, value); }
fn set_present(&mut self, value: bool) { self.as_flags().set(EF::PRESENT, value); }
fn target(&self) -> usize { self.0.addr().as_u64() as usize }
fn writable_shared(&self) -> bool { self.0.flags().contains(EF::BIT_10) }
fn readonly_shared(&self) -> bool { self.0.flags().contains(EF::BIT_9) }
fn set_shared(&mut self, writable: bool) {
let flags = self.as_flags();
flags.set(EF::BIT_10, writable);
flags.set(EF::BIT_9, !writable);
}
fn clear_shared(&mut self) { self.as_flags().remove(EF::BIT_9 | EF::BIT_10); }
fn user(&self) -> bool { self.0.flags().contains(EF::USER_ACCESSIBLE) }
fn set_user(&mut self, value: bool) {
self.as_flags().set(EF::USER_ACCESSIBLE, value);
if value {
let mut addr = self as *const _ as usize;
for _ in 0..3 {
// Upper level entry
addr = ((addr >> 9) & 0o777_777_777_7770) | 0xffffff80_00000000; addr = ((addr >> 9) & 0o777_777_777_7770) | 0xffffff80_00000000;
// set USER_ACCESSIBLE // set USER_ACCESSIBLE
unsafe { (*(addr as *mut EntryFlags)).insert(EntryFlags::USER_ACCESSIBLE) }; unsafe { (*(addr as *mut EF)).insert(EF::USER_ACCESSIBLE) };
} }
} }
} }
fn execute(&self) -> bool { !self.0.flags().contains(EF::NO_EXECUTE) }
fn set_execute(&mut self, value: bool) { self.as_flags().set(EF::NO_EXECUTE, !value); }
}
impl PageEntry {
fn as_flags(&mut self) -> &mut EF {
unsafe { &mut *(self as *mut _ as *mut EF) }
}
} }
#[derive(Debug)] #[derive(Debug)]
@ -85,13 +146,23 @@ pub struct InactivePageTable {
impl InactivePageTable { impl InactivePageTable {
pub fn new(frame: Frame, active_table: &mut ActivePageTable) -> InactivePageTable { pub fn new(frame: Frame, active_table: &mut ActivePageTable) -> InactivePageTable {
with_temporary_map(active_table, &frame, |_, table: &mut PageTable| { with_temporary_map(active_table, &frame, |_, table: &mut x86PageTable| {
table.zero(); table.zero();
// set up recursive mapping for the table // set up recursive mapping for the table
table[511].set_frame(frame.clone(), EntryFlags::PRESENT | EntryFlags::WRITABLE); table[511].set_frame(frame.clone(), EF::PRESENT | EF::WRITABLE);
}); });
InactivePageTable { p4_frame: frame } InactivePageTable { p4_frame: frame }
} }
pub fn map_kernel(&mut self, active_table: &mut ActivePageTable) {
let mut table = unsafe { &mut *(0xffffffff_fffff000 as *mut x86PageTable) };
let e510 = table[510].clone();
let e509 = table[509].clone();
active_table.with(self, |pt: &mut ActivePageTable| {
table[510] = e510;
table[509] = e509;
});
}
pub fn switch(&self) { pub fn switch(&self) {
let old_frame = Cr3::read().0; let old_frame = Cr3::read().0;
let new_frame = self.p4_frame.clone(); let new_frame = self.p4_frame.clone();
@ -112,15 +183,15 @@ impl Drop for InactivePageTable {
} }
} }
fn with_temporary_map(active_table: &mut ActivePageTable, frame: &Frame, f: impl FnOnce(&mut ActivePageTable, &mut PageTable)) { fn with_temporary_map(active_table: &mut ActivePageTable, frame: &Frame, f: impl FnOnce(&mut ActivePageTable, &mut x86PageTable)) {
// Create a temporary page // Create a temporary page
let page = Page::of_addr(0xcafebabe); let page = Page::of_addr(0xcafebabe);
assert!(active_table.translate_page(page).is_none(), "temporary page is already mapped"); assert!(active_table.0.translate_page(page).is_none(), "temporary page is already mapped");
// Map it to table // Map it to table
active_table.map_to_(page, frame.clone(), EntryFlags::PRESENT | EntryFlags::WRITABLE); active_table.map_to(page, frame.clone());
// Call f // Call f
let table = unsafe { &mut *page.start_address().as_mut_ptr() }; let table = unsafe { &mut *page.start_address().as_mut_ptr() };
f(active_table, table); f(active_table, table);
// Unmap the page // Unmap the page
active_table.unmap(page).unwrap().1.flush(); active_table.unmap(0xcafebabe);
} }

@ -36,6 +36,7 @@ extern crate simple_filesystem;
extern crate spin; extern crate spin;
extern crate syscall as redox_syscall; extern crate syscall as redox_syscall;
extern crate uart_16550; extern crate uart_16550;
extern crate ucore_memory;
extern crate volatile; extern crate volatile;
#[macro_use] #[macro_use]
extern crate x86_64; extern crate x86_64;

@ -67,54 +67,61 @@ impl MemoryArea {
Some(phys_start) => { Some(phys_start) => {
for page in Page::range_of(self.start_addr, self.end_addr) { for page in Page::range_of(self.start_addr, self.end_addr) {
let frame = Frame::of_addr(phys_start.get() + page.start_address().as_u64() as usize - self.start_addr); let frame = Frame::of_addr(phys_start.get() + page.start_address().as_u64() as usize - self.start_addr);
pt.map_to_(page, frame, self.flags.0); self.flags.apply(pt.map_to(page, frame));
} }
} }
None => { None => {
for page in Page::range_of(self.start_addr, self.end_addr) { for page in Page::range_of(self.start_addr, self.end_addr) {
let frame = alloc_frame(); let frame = alloc_frame();
pt.map_to_(page, frame, self.flags.0); self.flags.apply(pt.map_to(page, frame));
} }
} }
} }
} }
fn unmap(&self, pt: &mut ActivePageTable) { fn unmap(&self, pt: &mut ActivePageTable) {
for page in Page::range_of(self.start_addr, self.end_addr) { for page in Page::range_of(self.start_addr, self.end_addr) {
let (frame, flush) = pt.unmap(page).unwrap(); let addr = page.start_address().as_u64() as usize;
flush.flush();
if self.phys_start_addr.is_none() { if self.phys_start_addr.is_none() {
let frame = Frame::of_addr(pt.get_entry(addr).target());
dealloc_frame(frame); dealloc_frame(frame);
} }
pt.unmap(addr);
} }
} }
} }
#[derive(Debug, Copy, Clone, Eq, PartialEq)] #[derive(Debug, Copy, Clone, Eq, PartialEq, Default)]
pub struct MemoryAttr(EntryFlags); pub struct MemoryAttr {
user: bool,
impl Default for MemoryAttr { readonly: bool,
fn default() -> Self { execute: bool,
MemoryAttr(EntryFlags::PRESENT | EntryFlags::NO_EXECUTE | EntryFlags::WRITABLE) hide: bool,
}
} }
impl MemoryAttr { impl MemoryAttr {
pub fn user(mut self) -> Self { pub fn user(mut self) -> Self {
self.0 |= EntryFlags::USER_ACCESSIBLE; self.user = true;
self self
} }
pub fn readonly(mut self) -> Self { pub fn readonly(mut self) -> Self {
self.0.remove(EntryFlags::WRITABLE); self.readonly = true;
self self
} }
pub fn execute(mut self) -> Self { pub fn execute(mut self) -> Self {
self.0.remove(EntryFlags::NO_EXECUTE); self.execute = true;
self self
} }
pub fn hide(mut self) -> Self { pub fn hide(mut self) -> Self {
self.0.remove(EntryFlags::PRESENT); self.hide = true;
self self
} }
fn apply(&self, entry: &mut impl Entry) {
if self.user { entry.set_user(true); }
if self.readonly { entry.set_writable(false); }
if self.execute { entry.set_execute(true); }
if self.hide { entry.set_present(false); }
if self.user || self.readonly || self.execute || self.hide { entry.update(); }
}
} }
/// 内存空间集合,包含若干段连续空间 /// 内存空间集合,包含若干段连续空间
@ -226,15 +233,6 @@ fn new_page_table_with_kernel() -> InactivePageTable {
let frame = alloc_frame(); let frame = alloc_frame();
let mut active_table = active_table(); let mut active_table = active_table();
let mut page_table = InactivePageTable::new(frame, &mut active_table); let mut page_table = InactivePageTable::new(frame, &mut active_table);
page_table.map_kernel(&mut active_table);
use consts::{KERNEL_HEAP_PML4, KERNEL_PML4};
let mut table = unsafe { &mut *(0xffffffff_fffff000 as *mut PageTable) };
let e510 = table[KERNEL_PML4].clone();
let e509 = table[KERNEL_HEAP_PML4].clone();
active_table.with(&mut page_table, |pt: &mut ActivePageTable| {
table[KERNEL_PML4] = e510;
table[KERNEL_HEAP_PML4] = e509;
});
page_table page_table
} }

@ -8,6 +8,7 @@ pub use self::memory_set::*;
pub use self::stack_allocator::*; pub use self::stack_allocator::*;
use spin::{Mutex, MutexGuard}; use spin::{Mutex, MutexGuard};
use super::HEAP_ALLOCATOR; use super::HEAP_ALLOCATOR;
use ucore_memory::paging::PageTable;
mod memory_set; mod memory_set;
mod stack_allocator; mod stack_allocator;
@ -42,7 +43,7 @@ fn alloc_stack(size_in_pages: usize) -> Stack {
fn active_table() -> MutexGuard<'static, ActivePageTable> { fn active_table() -> MutexGuard<'static, ActivePageTable> {
lazy_static! { lazy_static! {
static ref ACTIVE_TABLE: Mutex<ActivePageTable> = Mutex::new(unsafe { static ref ACTIVE_TABLE: Mutex<ActivePageTable> = Mutex::new(unsafe {
ActivePageTable::new(&mut *(0xffffffff_fffff000 as *mut _)).unwrap() ActivePageTable::new()
}); });
} }
ACTIVE_TABLE.lock() ACTIVE_TABLE.lock()
@ -153,11 +154,10 @@ fn get_init_kstack_and_set_guard_page() -> Stack {
extern { fn stack_bottom(); } extern { fn stack_bottom(); }
let stack_bottom = PhysAddr::new(stack_bottom as u64).to_kernel_virtual(); let stack_bottom = PhysAddr::new(stack_bottom as u64).to_kernel_virtual();
let stack_bottom_page = Page::of_addr(stack_bottom);
// turn the stack bottom into a guard page // turn the stack bottom into a guard page
active_table().unmap(stack_bottom_page); active_table().unmap(stack_bottom);
debug!("guard page at {:?}", stack_bottom_page.start_address()); debug!("guard page at {:?}", stack_bottom);
Stack::new(stack_bottom + 8 * PAGE_SIZE, stack_bottom + 1 * PAGE_SIZE) Stack::new(stack_bottom + 8 * PAGE_SIZE, stack_bottom + 1 * PAGE_SIZE)
} }

@ -1,5 +1,5 @@
use super::*;
use memory::PAGE_SIZE; use memory::PAGE_SIZE;
use super::*;
// TODO: use BitAllocator & alloc fixed size stack // TODO: use BitAllocator & alloc fixed size stack
pub struct StackAllocator { pub struct StackAllocator {
@ -41,7 +41,7 @@ impl StackAllocator {
// map stack pages to physical frames // map stack pages to physical frames
for page in Page::range_inclusive(start, end) { for page in Page::range_inclusive(start, end) {
let frame = alloc_frame(); let frame = alloc_frame();
active_table.map_to_(page, frame, EntryFlags::PRESENT | EntryFlags::WRITABLE); active_table.map_to(page, frame);
} }
// create a new stack // create a new stack

Loading…
Cancel
Save