Fix PageTable::get_entry -> Option.

toolchain_update
WangRunji 6 years ago
parent a42d6086c6
commit 438e290b6d

@ -48,33 +48,34 @@ impl<T: PageTable> CowExt<T> {
}
}
pub fn unmap_shared(&mut self, addr: VirtAddr) {
{
let entry = self.page_table.get_entry(addr);
let frame = entry.target() / PAGE_SIZE;
if entry.readonly_shared() {
self.rc_map.read_decrease(&frame);
} else if entry.writable_shared() {
self.rc_map.write_decrease(&frame);
}
let entry = self.page_table.get_entry(addr)
.expect("entry not exist");
let frame = entry.target() / PAGE_SIZE;
if entry.readonly_shared() {
self.rc_map.read_decrease(&frame);
} else if entry.writable_shared() {
self.rc_map.write_decrease(&frame);
}
self.page_table.unmap(addr);
}
/// This function must be called whenever PageFault happens.
/// Return whether copy-on-write happens.
pub fn page_fault_handler(&mut self, addr: VirtAddr, alloc_frame: impl FnOnce() -> PhysAddr) -> bool {
{
let entry = self.page_table.get_entry(addr);
if !entry.readonly_shared() && !entry.writable_shared() {
return false;
}
let frame = entry.target() / PAGE_SIZE;
if self.rc_map.read_count(&frame) == 0 && self.rc_map.write_count(&frame) == 1 {
entry.clear_shared();
entry.set_writable(true);
entry.update();
self.rc_map.write_decrease(&frame);
return true;
}
let entry = self.page_table.get_entry(addr);
if entry.is_none() {
return false;
}
let entry = entry.unwrap();
if !entry.readonly_shared() && !entry.writable_shared() {
return false;
}
let frame = entry.target() / PAGE_SIZE;
if self.rc_map.read_count(&frame) == 0 && self.rc_map.write_count(&frame) == 1 {
entry.clear_shared();
entry.set_writable(true);
entry.update();
self.rc_map.write_decrease(&frame);
return true;
}
use core::mem::uninitialized;
let mut temp_data: [u8; PAGE_SIZE] = unsafe { uninitialized() };
@ -186,7 +187,7 @@ pub mod test {
pt.write(0x1000, 2);
assert_eq!(pt.rc_map.read_count(&frame), 1);
assert_eq!(pt.rc_map.write_count(&frame), 1);
assert_ne!(pt.get_entry(0x1000).target(), target);
assert_ne!(pt.get_entry(0x1000).unwrap().target(), target);
assert_eq!(pt.read(0x1000), 2);
assert_eq!(pt.read(0x2000), 1);
assert_eq!(pt.read(0x3000), 1);
@ -199,7 +200,7 @@ pub mod test {
pt.write(0x2000, 3);
assert_eq!(pt.rc_map.read_count(&frame), 0);
assert_eq!(pt.rc_map.write_count(&frame), 0);
assert_eq!(pt.get_entry(0x2000).target(), target,
assert_eq!(pt.get_entry(0x2000).unwrap().target(), target,
"The last write reference should not allocate new frame.");
assert_eq!(pt.read(0x1000), 2);
assert_eq!(pt.read(0x2000), 3);

@ -1,5 +1,6 @@
#![no_std]
#![feature(alloc)]
#![feature(nll)]
extern crate alloc;

@ -85,7 +85,7 @@ impl MemoryArea {
for page in Page::range_of(self.start_addr, self.end_addr) {
let addr = page.start_address();
if self.phys_start_addr.is_none() {
let target = pt.get_entry(addr).target();
let target = pt.get_entry(addr).unwrap().target();
T::dealloc_frame(target);
}
pt.unmap(addr);

@ -70,8 +70,8 @@ impl PageTable for MockPageTable {
assert!(entry.present);
entry.present = false;
}
fn get_entry(&mut self, addr: VirtAddr) -> &mut <Self as PageTable>::Entry {
&mut self.entries[addr / PAGE_SIZE]
fn get_entry(&mut self, addr: VirtAddr) -> Option<&mut Self::Entry> {
Some(&mut self.entries[addr / PAGE_SIZE])
}
fn get_page_slice_mut<'a,'b>(&'a mut self, addr: VirtAddr) -> &'b mut [u8] {
self._read(addr);
@ -156,34 +156,35 @@ mod test {
fn entry() {
let mut pt = MockPageTable::new();
pt.map(0x0, 0x1000);
{
let entry = pt.get_entry(0);
assert!(entry.present());
assert!(entry.writable());
assert!(!entry.accessed());
assert!(!entry.dirty());
assert_eq!(entry.target(), 0x1000);
}
let entry = pt.get_entry(0).unwrap();
assert!(entry.present());
assert!(entry.writable());
assert!(!entry.accessed());
assert!(!entry.dirty());
assert_eq!(entry.target(), 0x1000);
pt.read(0x0);
assert!(pt.get_entry(0).accessed());
assert!(!pt.get_entry(0).dirty());
let entry = pt.get_entry(0).unwrap();
assert!(entry.accessed());
assert!(!entry.dirty());
pt.get_entry(0).clear_accessed();
assert!(!pt.get_entry(0).accessed());
entry.clear_accessed();
assert!(!entry.accessed());
pt.write(0x1, 1);
assert!(pt.get_entry(0).accessed());
assert!(pt.get_entry(0).dirty());
let entry = pt.get_entry(0).unwrap();
assert!(entry.accessed());
assert!(entry.dirty());
pt.get_entry(0).clear_dirty();
assert!(!pt.get_entry(0).dirty());
entry.clear_dirty();
assert!(!entry.dirty());
pt.get_entry(0).set_writable(false);
assert!(!pt.get_entry(0).writable());
entry.set_writable(false);
assert!(!entry.writable());
pt.get_entry(0).set_present(false);
assert!(!pt.get_entry(0).present());
entry.set_present(false);
assert!(!entry.present());
}
#[test]

@ -13,7 +13,7 @@ pub trait PageTable {
type Entry: Entry;
fn map(&mut self, addr: VirtAddr, target: PhysAddr) -> &mut Self::Entry;
fn unmap(&mut self, addr: VirtAddr);
fn get_entry(&mut self, addr: VirtAddr) -> &mut Self::Entry;
fn get_entry(&mut self, addr: VirtAddr) -> Option<&mut Self::Entry>;
// For testing with mock
fn get_page_slice_mut<'a,'b>(&'a mut self, addr: VirtAddr) -> &'b mut [u8];
fn read(&mut self, addr: VirtAddr) -> u8;

@ -38,7 +38,7 @@ impl SwapManager for EnhancedClockSwapManager {
// The reason may be `get_page_slice_mut()` contains unsafe operation,
// which lead the compiler to do a wrong optimization.
// let slice = page_table.get_page_slice_mut(addr);
let entry = page_table.get_entry(addr);
let entry = page_table.get_entry(addr).unwrap();
// println!("{:#x} , {}, {}", addr, entry.accessed(), entry.dirty());
match (entry.accessed(), entry.dirty()) {

@ -68,7 +68,8 @@ impl<T: PageTable, M: SwapManager, S: Swapper> SwapExt<T, M, S> {
/// Swap out page of `addr`, return the origin map target.
fn swap_out(&mut self, addr: VirtAddr) -> Result<PhysAddr, SwapError> {
let data = self.page_table.get_page_slice_mut(addr);
let entry = self.page_table.get_entry(addr);
let entry = self.page_table.get_entry(addr)
.ok_or(SwapError::NotMapped)?;
if entry.swapped() {
return Err(SwapError::AlreadySwapped);
}
@ -82,26 +83,25 @@ impl<T: PageTable, M: SwapManager, S: Swapper> SwapExt<T, M, S> {
}
/// Map page of `addr` to `target`, then swap in the data.
fn swap_in(&mut self, addr: VirtAddr, target: PhysAddr) -> Result<(), SwapError> {
let token = {
let entry = self.page_table.get_entry(addr);
if !entry.swapped() {
return Err(SwapError::NotSwapped);
}
let token = entry.target() / PAGE_SIZE;
entry.set_target(target);
entry.set_swapped(false);
entry.set_present(true);
entry.update();
token
};
let entry = self.page_table.get_entry(addr)
.ok_or(SwapError::NotMapped)?;
if !entry.swapped() {
return Err(SwapError::NotSwapped);
}
let token = entry.target() / PAGE_SIZE;
entry.set_target(target);
entry.set_swapped(false);
entry.set_present(true);
entry.update();
let data = self.page_table.get_page_slice_mut(addr);
self.swapper.swap_in(token, data).map_err(|_| SwapError::IOError)?;
self.swap_manager.push(addr);
Ok(())
}
pub fn page_fault_handler(&mut self, addr: VirtAddr, alloc_frame: impl FnOnce() -> Option<PhysAddr>) -> bool {
if !self.page_table.get_entry(addr).swapped() {
return false;
match self.page_table.get_entry(addr) {
None => return false,
Some(entry) => if !entry.swapped() { return false; },
}
// Allocate a frame, if failed, swap out a page
let frame = alloc_frame().unwrap_or_else(|| self.swap_out_any().ok().unwrap());
@ -112,6 +112,7 @@ impl<T: PageTable, M: SwapManager, S: Swapper> SwapExt<T, M, S> {
pub enum SwapError {
AlreadySwapped,
NotMapped,
NotSwapped,
NoSwapped,
IOError,

18
kernel/Cargo.lock generated

@ -42,12 +42,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "bootloader"
version = "0.3.1"
source = "git+https://github.com/wangrunji0408/bootloader#d53b01de2fb8c0b3793ebf2d8ec1d56180dc71d4"
source = "git+https://github.com/wangrunji0408/bootloader#b03d826d6591f392cd824bbd350e82b5f17f21f3"
dependencies = [
"apic 0.1.0 (git+https://github.com/wangrunji0408/APIC-Rust)",
"fixedvec 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
"usize_conversions 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
"x86_64 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)",
"x86_64 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)",
"xmas-elf 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)",
]
@ -58,7 +58,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "cfg-if"
version = "0.1.5"
version = "0.1.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
@ -118,7 +118,7 @@ name = "log"
version = "0.4.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"cfg-if 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
"cfg-if 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@ -245,7 +245,7 @@ version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
"x86_64 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)",
"x86_64 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@ -272,7 +272,7 @@ dependencies = [
"ucore-memory 0.1.0",
"ucore-process 0.1.0",
"volatile 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"x86_64 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)",
"x86_64 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)",
"xmas-elf 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)",
]
@ -343,7 +343,7 @@ dependencies = [
[[package]]
name = "x86_64"
version = "0.2.13"
version = "0.2.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"bit_field 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
@ -374,7 +374,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
"checksum bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "228047a76f468627ca71776ecdebd732a3423081fcf5125585bcd7c49886ce12"
"checksum bootloader 0.3.1 (git+https://github.com/wangrunji0408/bootloader)" = "<none>"
"checksum cc 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)" = "f159dfd43363c4d08055a07703eb7a3406b0dac4d0584d96965a3262db3c9d16"
"checksum cfg-if 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "0c4e7bb64a8ebb0d856483e1e682ea3422f883c5f5615a90d51a2c82fe87fdd3"
"checksum cfg-if 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "082bb9b28e00d3c9d39cc03e64ce4cea0f1bb9b3fde493f0cbc008472d22bdf4"
"checksum fixedvec 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "7c6c16d316ccdac21a4dd648e314e76facbbaf316e83ca137d0857a9c07419d0"
"checksum fuchsia-zircon 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2e9763c69ebaae630ba35f74888db465e49e259ba1bc0eda7d06f4a067615d82"
"checksum fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7"
@ -408,6 +408,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
"checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
"checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
"checksum x86 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)" = "841e1ca5a87068718a2a26f2473c6f93cf3b8119f9778fa0ae4b39b664d9e66a"
"checksum x86_64 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)" = "a7e95a2813e20d24546c2b29ecc6df55cfde30c983df69eeece0b179ca9d68ac"
"checksum x86_64 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)" = "2bd647af1614659e1febec1d681231aea4ebda4818bf55a578aff02f3e4db4b4"
"checksum xmas-elf 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "22678df5df766e8d1e5d609da69f0c3132d794edf6ab5e75e7abcd2270d4cf58"
"checksum zero 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "5f1bc8a6b2005884962297587045002d8cfb8dcec9db332f4ca216ddc5de82c5"

@ -9,7 +9,6 @@ no_bbl = []
[profile.dev]
# MUST >= 1 : Enable RVO to avoid stack overflow
# MUST <= 1 : Avoid double fault at -O2 T_T
opt-level = 1
[profile.release]

@ -2,9 +2,9 @@
// [0x80000000, 0x80800000]
const P2_SIZE: usize = 1 << 22;
const P2_MASK: usize = 0x3ff << 22;
pub const RECURSIVE_PAGE_PML4: usize = 0x3fe;
pub const RECURSIVE_INDEX: usize = 0x3fe;
pub const KERNEL_OFFSET: usize = 0;
pub const KERNEL_PML4: usize = 0x8000_0000 >> 22;
pub const KERNEL_P2_INDEX: usize = 0x8000_0000 >> 22;
pub const KERNEL_HEAP_OFFSET: usize = 0x8020_0000;
pub const KERNEL_HEAP_SIZE: usize = 0x0020_0000;
pub const MEMORY_OFFSET: usize = 0x8000_0000;

@ -1,4 +1,4 @@
use consts::{KERNEL_PML4, RECURSIVE_PAGE_PML4};
use consts::{KERNEL_P2_INDEX, RECURSIVE_INDEX};
// Depends on kernel
use memory::{active_table, alloc_frame, alloc_stack, dealloc_frame};
use super::riscv::addr::*;
@ -14,14 +14,14 @@ use ucore_memory::paging::*;
pub fn setup_page_table(frame: Frame) {
let p2 = unsafe { &mut *(frame.start_address().as_u32() as *mut RvPageTable) };
p2.zero();
p2.set_recursive(RECURSIVE_PAGE_PML4, frame.clone());
p2.set_recursive(RECURSIVE_INDEX, frame.clone());
// Set kernel identity map
// 0x10000000 ~ 1K area
p2.map_identity(0x40, EF::VALID | EF::READABLE | EF::WRITABLE);
// 0x80000000 ~ 8K area
p2.map_identity(KERNEL_PML4, EF::VALID | EF::READABLE | EF::WRITABLE | EF::EXECUTABLE);
p2.map_identity(KERNEL_PML4 + 1, EF::VALID | EF::READABLE | EF::WRITABLE | EF::EXECUTABLE);
p2.map_identity(KERNEL_P2_INDEX, EF::VALID | EF::READABLE | EF::WRITABLE | EF::EXECUTABLE);
p2.map_identity(KERNEL_P2_INDEX + 1, EF::VALID | EF::READABLE | EF::WRITABLE | EF::EXECUTABLE);
use super::riscv::register::satp;
unsafe { satp::set(satp::Mode::Sv32, 0, frame); }
@ -42,7 +42,7 @@ impl PageTable for ActivePageTable {
let frame = Frame::of_addr(PhysAddr::new(target as u32));
self.0.map_to(page, frame, flags, &mut FrameAllocatorForRiscv)
.unwrap().flush();
self.get_entry(addr)
self.get_entry(addr).unwrap()
}
fn unmap(&mut self, addr: usize) {
@ -51,11 +51,14 @@ impl PageTable for ActivePageTable {
flush.flush();
}
fn get_entry(&mut self, addr: usize) -> &mut PageEntry {
fn get_entry(&mut self, addr: usize) -> Option<&mut PageEntry> {
if unsafe { !(*ROOT_PAGE_TABLE)[addr >> 22].flags().contains(EF::VALID) } {
return None;
}
let page = Page::of_addr(VirtAddr::new(addr));
let _ = self.0.translate_page(page);
let entry_addr = ((addr >> 10) & 0x003ffffc) | (RECURSIVE_PAGE_PML4 << 22);
unsafe { &mut *(entry_addr as *mut PageEntry) }
let entry_addr = ((addr >> 10) & ((1 << 22) - 4)) | (RECURSIVE_INDEX << 22);
unsafe { Some(&mut *(entry_addr as *mut PageEntry)) }
}
fn get_page_slice_mut<'a, 'b>(&'a mut self, addr: usize) -> &'b mut [u8] {
@ -73,7 +76,7 @@ impl PageTable for ActivePageTable {
}
const ROOT_PAGE_TABLE: *mut RvPageTable =
(((RECURSIVE_PAGE_PML4 << 10) | (RECURSIVE_PAGE_PML4 + 1)) << 12) as *mut RvPageTable;
(((RECURSIVE_INDEX << 10) | (RECURSIVE_INDEX + 1)) << 12) as *mut RvPageTable;
impl ActivePageTable {
pub unsafe fn new() -> Self {
@ -153,24 +156,24 @@ impl InactivePageTable for InactivePageTable0 {
.expect("failed to allocate frame");
active_table().with_temporary_map(&frame, |_, table: &mut RvPageTable| {
table.zero();
table.set_recursive(RECURSIVE_PAGE_PML4, frame.clone());
table.set_recursive(RECURSIVE_INDEX, frame.clone());
});
InactivePageTable0 { p2_frame: frame }
}
fn edit(&mut self, f: impl FnOnce(&mut Self::Active)) {
active_table().with_temporary_map(&satp::read().frame(), |active_table, p2_table: &mut RvPageTable| {
let backup = p2_table[RECURSIVE_PAGE_PML4].clone();
let backup = p2_table[RECURSIVE_INDEX].clone();
// overwrite recursive mapping
p2_table[RECURSIVE_PAGE_PML4].set(self.p2_frame.clone(), EF::VALID);
p2_table[RECURSIVE_INDEX].set(self.p2_frame.clone(), EF::VALID);
sfence_vma_all();
// execute f in the new context
f(active_table);
// restore recursive mapping to original p4 table
p2_table[RECURSIVE_PAGE_PML4] = backup;
// restore recursive mapping to original p2 table
p2_table[RECURSIVE_INDEX] = backup;
sfence_vma_all();
});
}
@ -222,12 +225,12 @@ impl InactivePageTable0 {
fn map_kernel(&mut self) {
let table = unsafe { &mut *ROOT_PAGE_TABLE };
let e0 = table[0x40];
let e1 = table[KERNEL_PML4];
let e1 = table[KERNEL_P2_INDEX];
assert!(!e1.is_unused());
self.edit(|_| {
table[0x40] = e0;
table[KERNEL_PML4].set(e1.frame(), EF::VALID | EF::GLOBAL);
table[KERNEL_P2_INDEX].set(e1.frame(), EF::VALID | EF::GLOBAL);
});
}
}

@ -49,7 +49,7 @@ impl PageTable for ActivePageTable {
let flags = EF::PRESENT | EF::WRITABLE | EF::NO_EXECUTE;
self.0.map_to(Page::of_addr(addr), Frame::of_addr(target), flags, &mut FrameAllocatorForX86)
.unwrap().flush();
self.get_entry(addr)
unsafe { &mut *(get_entry_ptr(addr, 1)) }
}
fn unmap(&mut self, addr: usize) {
@ -57,9 +57,12 @@ impl PageTable for ActivePageTable {
flush.flush();
}
fn get_entry(&mut self, addr: usize) -> &mut PageEntry {
let entry_addr = ((addr >> 9) & 0o777_777_777_7770) | 0xffffff80_00000000;
unsafe { &mut *(entry_addr as *mut PageEntry) }
fn get_entry(&mut self, addr: usize) -> Option<&mut PageEntry> {
for level in 0..3 {
let entry = get_entry_ptr(addr, 4 - level);
if unsafe { !(*entry).present() } { return None; }
}
unsafe { Some(&mut *(get_entry_ptr(addr, 1))) }
}
fn get_page_slice_mut<'a, 'b>(&'a mut self, addr: usize) -> &'b mut [u8] {
@ -140,6 +143,12 @@ impl Entry for PageEntry {
fn set_execute(&mut self, value: bool) { self.as_flags().set(EF::NO_EXECUTE, !value); }
}
fn get_entry_ptr(addr: usize, level: u8) -> *mut PageEntry {
debug_assert!(level <= 4);
let entry_addr = ((addr >> (level * 9)) & !0x7) | !((1 << (48 - level * 9)) - 1);
entry_addr as *mut PageEntry
}
impl PageEntry {
fn as_flags(&mut self) -> &mut EF {
unsafe { &mut *(self as *mut _ as *mut EF) }

@ -15,8 +15,9 @@ pub fn before_return() {
}
pub fn error(tf: &TrapFrame) -> ! {
error!("{:#x?}", tf);
let pid = processor().pid();
error!("On CPU{} Process {}:\n{:#x?}", cpu::id(), pid, tf);
error!("On CPU{} Process {}", cpu::id(), pid);
processor().manager().exit(pid, 0x100);
processor().yield_now();

Loading…
Cancel
Save