aarch64/mmu: refactor PageTableFlags

master
equation314 6 years ago
parent de6354ddd3
commit f9e47b2fd8

@ -1,12 +1,27 @@
use core::fmt;
use core::ops::{Index, IndexMut};
use super::{PageSize, PhysFrame, Size4KiB};
use super::PhysFrame;
use addr::PhysAddr;
use usize_conversions::usize_from;
use ux::*;
use register::FieldValue;
use register::cpu::RegisterReadWrite;
/// Memory attribute fields mask
const MEMORY_ATTR_MASK: u64 = (MEMORY_ATTRIBUTE::SH.mask << MEMORY_ATTRIBUTE::SH.shift)
| (MEMORY_ATTRIBUTE::AttrIndx.mask << MEMORY_ATTRIBUTE::AttrIndx.shift);
/// Output address mask
const ADDR_MASK: u64 = 0x0000_ffff_ffff_f000;
/// Other flags mask
const FLAGS_MASK: u64 = !(MEMORY_ATTR_MASK | ADDR_MASK);
/// Memory attribute fields
type PageTableAttributeFieldValue = FieldValue<u64, MEMORY_ATTRIBUTE::Register>;
pub struct PageTableAttribute(PageTableAttributeFieldValue);
/// The error returned by the `PageTableEntry::frame` method.
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum FrameError {
@ -24,6 +39,18 @@ pub struct PageTableEntry {
pub entry: u64,
}
impl RegisterReadWrite<u64, MEMORY_ATTRIBUTE::Register> for PageTableEntry {
#[inline]
fn get(&self) -> u64 {
self.entry
}
#[inline]
fn set(&self, value: u64) {
unsafe { *(&self.entry as *const u64 as *mut u64) = value }
}
}
impl PageTableEntry {
/// Returns whether this entry is zero.
pub fn is_unused(&self) -> bool {
@ -42,7 +69,16 @@ impl PageTableEntry {
/// Returns the physical address mapped by this entry, might be zero.
pub fn addr(&self) -> PhysAddr {
PhysAddr::new(self.entry & 0x0000_ffff_ffff_f000)
PhysAddr::new(self.entry & ADDR_MASK)
}
/// Returns the memory attribute fields of this entry.
pub fn attr(&self) -> PageTableAttribute {
PageTableAttribute(PageTableAttributeFieldValue::new(
MEMORY_ATTR_MASK,
0,
self.entry & MEMORY_ATTR_MASK,
))
}
/// Returns the physical frame mapped by this entry.
@ -53,30 +89,36 @@ impl PageTableEntry {
/// - `FrameError::HugeFrame` if the entry has the `HUGE_PAGE` flag set (for huge pages the
/// `addr` function must be used)
pub fn frame(&self) -> Result<PhysFrame, FrameError> {
if !self.flags().contains(PageTableFlags::PRESENT) {
if !self.flags().contains(PageTableFlags::VALID) {
Err(FrameError::FrameNotPresent)
} else if self.flags().contains(PageTableFlags::HUGE_PAGE) {
} else if !self.flags().contains(PageTableFlags::TABLE_OR_PAGE) {
// is a huge page (block)
Err(FrameError::HugeFrame)
} else {
Ok(PhysFrame::containing_address(self.addr()))
}
}
/// Map the entry to the specified physical frame with the specified flags.
pub fn set_frame(&mut self, frame: PhysFrame, flags: PageTableFlags) {
// is not a huge page (block)
assert!(flags.contains(PageTableFlags::TABLE_OR_PAGE));
self.set(frame.start_address().as_u64() | flags.bits());
}
/// Map the entry to the specified physical address with the specified flags.
pub fn set_addr(&mut self, addr: PhysAddr, flags: PageTableFlags) {
assert!(addr.is_aligned(Size4KiB::SIZE));
self.entry = (addr.as_u64()) | flags.bits();
pub fn modify_addr(&mut self, addr: PhysAddr) {
self.entry = (self.entry & !ADDR_MASK) | addr.as_u64();
}
/// Map the entry to the specified physical frame with the specified flags.
pub fn set_frame(&mut self, frame: PhysFrame, flags: PageTableFlags) {
assert!(!flags.contains(PageTableFlags::HUGE_PAGE));
self.set_addr(frame.start_address(), flags)
/// Sets the flags of this entry.
pub fn modify_flags(&mut self, flags: PageTableFlags) {
self.entry = (self.entry & !FLAGS_MASK) | flags.bits();
}
/// Sets the flags of this entry.
pub fn set_flags(&mut self, flags: PageTableFlags) {
self.entry = self.addr().as_u64() | flags.bits();
pub fn modify_attr(&mut self, attr: PageTableAttribute) {
self.entry = (self.entry & !MEMORY_ATTR_MASK) | attr.0.value;
}
}
@ -90,42 +132,78 @@ impl fmt::Debug for PageTableEntry {
}
}
register_bitfields! {u64,
// Memory attribute fields in the VMSAv8-64 translation table format descriptors (Page 2148~2152)
MEMORY_ATTRIBUTE [
/// Shareability field
SH OFFSET(8) NUMBITS(2) [
NonShareable = 0b00,
OuterShareable = 0b10,
InnerShareable = 0b11
],
/// Memory attributes index into the MAIR_EL1 register
AttrIndx OFFSET(2) NUMBITS(3) []
]
}
bitflags! {
/// Possible flags for a page table entry.
pub struct PageTableFlags: u64 {
const ALL = 0xffffffff_ffffffff;
const TYPE_MASK = 3 << 0;
// const TYPE_FAULT = 0 << 0;
const TYPE_PAGE = 3 << 0;
const TABLE_BIT = 1 << 1;
// const BLOCK_BIT = 0 << 1;
const PAGE_BIT = 1 << 1;
const PRESENT = 1 << 0;
const USER_ACCESSIBLE = 1 << 6; /* AP[1] */
const RDONLY = 1 << 7; /* AP[2] */
const SHARED = 3 << 8; /* SH[1:0], inner shareable */
const BIT_8 = 1 << 8;
const BIT_9 = 1 << 9;
// const SHARED = 3 << 8; /* SH[1:0], inner shareable */
// const BIT_8 = 1 << 8;
// const BIT_9 = 1 << 9;
// pub const ATTRIB_SH_NON_SHAREABLE: usize = 0x0 << 8;
const OUTER_SHAREABLE = 0b10 << 8;
const INNER_SHAREABLE = 0b11 << 8;
const ACCESSED = 1 << 10; /* AF, Access Flag */
const NONE_GLOBAL = 1 << 11; /* None Global */
const GLOBAL = (!(1 << 11));
const DBM = 1 << 51; /* Dirty Bit Management */
const WRITE = 1 << 51; /* DBM */
const CONT = 1 << 52; /* Contiguous range */
const PXN = 1 << 53; /* Privileged XN */
const UXN = 1 << 54; /* User XN */
const HYP_XN = 1 << 54; /* HYP XN */
// const OUTER_SHAREABLE = 0b10 << 8;
// const INNER_SHAREABLE = 0b11 << 8;
/// identifies whether the descriptor is valid
const VALID = 1 << 0;
/// the descriptor type
/// 0, Block
/// 1, Table/Page
const TABLE_OR_PAGE = 1 << 1;
/// Access permission: accessable at EL0
const AP_EL0 = 1 << 6;
/// Access permission: read-only
const AP_RO = 1 << 7;
/// Access flag
const AF = 1 << 10;
/// not global bit
const nG = 1 << 11;
/// Dirty Bit Modifier
const DBM = 1 << 51;
/// A hint bit indicating that the translation table entry is one of a contiguous set or
/// entries
const Contiguous = 1 << 52;
/// Privileged Execute-never
const PXN = 1 << 53;
/// Execute-never/Unprivileged execute-never
const XN = 1 << 54;
/// Software Dirty Bit Modifier
const WRITE = 1 << 51;
/// Software dirty bit
const DIRTY = 1 << 55;
/// Software swapped bit
const SWAPPED = 1 << 56;
const HUGE_PAGE = 1 << 57;
const PROT_NONE = 1 << 58;
/// Software writable shared bit for COW
const WRITABLE_SHARED = 1 << 57;
/// Software readonly shared bit for COW
const READONLY_SHARED = 1 << 58;
/// Privileged Execute-never for table descriptors
const PXNTable = 1 << 59;
/// Execute-never/Unprivileged execute-never for table descriptors
const XNTable = 1 << 60;
}
}
impl Default for PageTableFlags {
fn default() -> Self {
Self::VALID | Self::TABLE_OR_PAGE | Self::AF | Self::WRITE | Self::PXN | Self::XN
}
}
@ -151,12 +229,12 @@ impl PageTable {
}
}
/// Setup identity map: VirtPage at pagenumber -> PhysFrame at pagenumber
/// pn: pagenumber = addr>>12 in riscv32.
pub fn map_identity(&mut self, p4num: usize, flags: PageTableFlags) {
let entry = self.entries[p4num].clone();
self.entries[p4num].set_addr(entry.addr(), flags);
}
// Setup identity map: VirtPage at pagenumber -> PhysFrame at pagenumber
// pn: pagenumber = addr>>12 in riscv32.
// pub fn map_identity(&mut self, p4num: usize, flags: PageTableFlags) {
// let entry = self.entries[p4num].clone();
// self.entries[p4num].set_addr(entry.addr(), flags);
// }
}
impl Index<usize> for PageTable {

@ -218,7 +218,7 @@ impl<'a> RecursivePageTable<'a> {
if entry.is_unused() {
if let Some(frame) = allocator.alloc() {
entry.set_frame(frame, Flags::PRESENT | Flags::WRITE | Flags::ACCESSED | Flags::PAGE_BIT);
entry.set_frame(frame, Flags::default());
created = true;
} else {
return Err(MapToError::FrameAllocationFailed);
@ -226,7 +226,8 @@ impl<'a> RecursivePageTable<'a> {
} else {
created = false;
}
if entry.flags().contains(Flags::HUGE_PAGE) {
// is a huge page (block)
if !entry.flags().contains(Flags::TABLE_OR_PAGE) {
return Err(MapToError::ParentEntryHugePage);
}
@ -382,7 +383,7 @@ impl<'a> Mapper<Size4KiB> for RecursivePageTable<'a> {
return Err(FlagUpdateError::PageNotMapped);
}
p1[page.p1_index()].set_flags(flags);
p1[page.p1_index()].modify_flags(flags);
Ok(MapperFlush::new(page))
}

@ -8,7 +8,7 @@ use ucore_memory::paging::*;
use aarch64::asm::{tlb_invalidate, tlb_invalidate_all, ttbr_el1_read, ttbr_el1_write};
use aarch64::{PhysAddr, VirtAddr};
use aarch64::paging::{Mapper, PageTable as Aarch64PageTable, PageTableEntry, PageTableFlags as EF, RecursivePageTable};
use aarch64::paging::{FrameAllocator, FrameDeallocator, Page, PageRange, PhysFrame as Frame, Size4KiB, Size2MiB};
use aarch64::paging::{FrameAllocator, FrameDeallocator, Page, PhysFrame as Frame, Size4KiB, Size2MiB};
register_bitfields! {u64,
// AArch64 Reference Manual page 2150
@ -167,7 +167,7 @@ impl PageTable for ActivePageTable {
type Entry = PageEntry;
fn map(&mut self, addr: usize, target: usize) -> &mut PageEntry {
let flags = EF::PRESENT | EF::WRITE | EF::ACCESSED | EF::UXN | EF::PAGE_BIT;
let flags = EF::default();
self.0.map_to(Page::of_addr(addr), Frame::of_addr(target), flags, &mut FrameAllocatorForAarch64)
.unwrap().flush();
self.get_entry(addr)
@ -224,49 +224,58 @@ impl Entry for PageEntry {
tlb_invalidate(addr);
}
fn present(&self) -> bool { self.0.flags().contains(EF::PRESENT) }
fn accessed(&self) -> bool { self.0.flags().contains(EF::ACCESSED) }
fn present(&self) -> bool { self.0.flags().contains(EF::VALID) }
fn accessed(&self) -> bool { self.0.flags().contains(EF::AF) }
fn writable(&self) -> bool { self.0.flags().contains(EF::WRITE) }
fn dirty(&self) -> bool { self.hw_dirty() && self.sw_dirty() }
fn clear_accessed(&mut self) { self.as_flags().remove(EF::ACCESSED); }
fn clear_accessed(&mut self) { self.as_flags().remove(EF::AF); }
fn clear_dirty(&mut self)
{
self.as_flags().remove(EF::DIRTY);
self.as_flags().insert(EF::RDONLY);
self.as_flags().insert(EF::AP_RO);
}
fn set_writable(&mut self, value: bool)
{
self.as_flags().set(EF::RDONLY, !value);
self.as_flags().set(EF::AP_RO, !value);
self.as_flags().set(EF::WRITE, value);
}
fn set_present(&mut self, value: bool) { self.as_flags().set(EF::PRESENT, value); }
fn set_present(&mut self, value: bool) { self.as_flags().set(EF::VALID, value); }
fn target(&self) -> usize { self.0.addr().as_u64() as usize }
fn set_target(&mut self, target: usize) {
let flags = self.0.flags();
self.0.set_addr(PhysAddr::new(target as u64), flags);
self.0.modify_addr(PhysAddr::new(target as u64));
}
fn writable_shared(&self) -> bool { self.0.flags().contains(EF::BIT_9) }
fn readonly_shared(&self) -> bool { self.0.flags().contains(EF::BIT_9) }
fn writable_shared(&self) -> bool { self.0.flags().contains(EF::WRITABLE_SHARED) }
fn readonly_shared(&self) -> bool { self.0.flags().contains(EF::READONLY_SHARED) }
fn set_shared(&mut self, writable: bool) {
let flags = self.as_flags();
flags.set(EF::BIT_8, writable);
flags.set(EF::BIT_9, writable);
flags.set(EF::WRITABLE_SHARED, writable);
flags.set(EF::READONLY_SHARED, !writable);
}
fn clear_shared(&mut self) { self.as_flags().remove(EF::BIT_8 | EF::BIT_9); }
fn user(&self) -> bool { self.0.flags().contains(EF::USER_ACCESSIBLE) }
fn clear_shared(&mut self) { self.as_flags().remove(EF::WRITABLE_SHARED | EF::READONLY_SHARED); }
fn user(&self) -> bool { self.0.flags().contains(EF::AP_EL0) }
fn swapped(&self) -> bool { self.0.flags().contains(EF::SWAPPED) }
fn set_swapped(&mut self, value: bool) { self.as_flags().set(EF::SWAPPED, value); }
fn set_user(&mut self, value: bool) {
self.as_flags().set(EF::USER_ACCESSIBLE, value);
self.as_flags().set(EF::NONE_GLOBAL, value); // set non-global to use ASID
self.as_flags().set(EF::AP_EL0, value);
self.as_flags().set(EF::nG, value); // set non-global to use ASID
}
fn execute(&self) -> bool {
match self.user() {
true => !self.0.flags().contains(EF::XN),
false => !self.0.flags().contains(EF::PXN),
}
}
fn set_execute(&mut self, value: bool) {
match self.user() {
true => self.as_flags().set(EF::XN, !value),
false => self.as_flags().set(EF::PXN, !value),
}
}
fn execute(&self) -> bool { !self.0.flags().contains(EF::UXN) }
fn set_execute(&mut self, value: bool) { self.as_flags().set(EF::UXN, !value); }
}
impl PageEntry {
fn read_only(&self) -> bool { self.0.flags().contains(EF::RDONLY) }
fn read_only(&self) -> bool { self.0.flags().contains(EF::AP_RO) }
fn hw_dirty(&self) -> bool { self.writable() && !self.read_only() }
fn sw_dirty(&self) -> bool { self.0.flags().contains(EF::DIRTY) }
fn as_flags(&mut self) -> &mut EF {
@ -294,7 +303,7 @@ impl InactivePageTable for InactivePageTable0 {
active_table().with_temporary_map(&frame, |_, table: &mut Aarch64PageTable| {
table.zero();
// set up recursive mapping for the table
table[RECURSIVE_INDEX].set_frame(frame.clone(), EF::PRESENT | EF::WRITE | EF::ACCESSED | EF::PAGE_BIT);
table[RECURSIVE_INDEX].set_frame(frame.clone(), EF::default());
});
InactivePageTable0 { p4_frame: frame }
}
@ -304,7 +313,7 @@ impl InactivePageTable for InactivePageTable0 {
let backup = p4_table[RECURSIVE_INDEX].clone();
// overwrite recursive mapping
p4_table[RECURSIVE_INDEX].set_frame(self.p4_frame.clone(), EF::PRESENT | EF::WRITE | EF::ACCESSED | EF::PAGE_BIT);
p4_table[RECURSIVE_INDEX].set_frame(self.p4_frame.clone(), EF::default());
tlb_invalidate_all();
// execute f in the new context
@ -367,7 +376,7 @@ impl InactivePageTable0 {
assert!(!e0.is_unused());
self.edit(|_| {
table[KERNEL_PML4].set_addr(e0.addr(), e0.flags() & EF::GLOBAL);
table[KERNEL_PML4].set_frame(Frame::containing_address(e0.addr()), EF::default());
});
}
}

@ -136,7 +136,7 @@ mod aarch64 {
pub const KERNEL_PML4: usize = 0;
pub const KERNEL_HEAP_SIZE: usize = 8 * 1024 * 1024;
pub const MEMORY_OFFSET: usize = 0;
pub const USER_STACK_OFFSET: usize = 0xffff_ffff_0000_0000;
pub const USER_STACK_OFFSET: usize = 0xffff_8000_0000_0000;
pub const USER_STACK_SIZE: usize = 1 * 1024 * 1024;
pub const USER32_STACK_OFFSET: usize = USER_STACK_OFFSET;
}

Loading…
Cancel
Save