aarch64/mmu: add memory region attribute config

master
equation314 6 years ago
parent f9e47b2fd8
commit a8b334123a

@ -0,0 +1,64 @@
//!Memory region attributes (D4.5, page 2174)
use super::{PageTableAttribute, MEMORY_ATTRIBUTE};
use regs::*;
pub trait MairType {
const INDEX: u64;
#[inline]
fn config_value() -> u64;
#[inline]
fn attr_value() -> PageTableAttribute;
}
pub enum MairDevice {}
pub enum MairNormal {}
pub enum MairNormalNonCacheable {}
impl MairType for MairDevice {
const INDEX: u64 = 0;
#[inline]
fn config_value() -> u64 {
(MAIR_ATTR::Attr_HIGH::Device + MAIR_ATTR::Attr_LOW_DEVICE::Device_nGnRE).value
}
#[inline]
fn attr_value() -> PageTableAttribute {
MEMORY_ATTRIBUTE::SH::OuterShareable + MEMORY_ATTRIBUTE::AttrIndx.val(Self::INDEX)
}
}
impl MairType for MairNormal {
const INDEX: u64 = 1;
#[inline]
fn config_value() -> u64 {
(MAIR_ATTR::Attr_HIGH::Memory_OuterWriteBack_NonTransient_ReadAlloc_WriteAlloc
+ MAIR_ATTR::Attr_LOW_MEMORY::InnerWriteBack_NonTransient_ReadAlloc_WriteAlloc)
.value
}
#[inline]
fn attr_value() -> PageTableAttribute {
MEMORY_ATTRIBUTE::SH::InnerShareable + MEMORY_ATTRIBUTE::AttrIndx.val(Self::INDEX)
}
}
impl MairType for MairNormalNonCacheable {
const INDEX: u64 = 2;
#[inline]
fn config_value() -> u64 {
(MAIR_ATTR::Attr_HIGH::Memory_OuterNonCacheable
+ MAIR_ATTR::Attr_LOW_MEMORY::InnerNonCacheable)
.value
}
#[inline]
fn attr_value() -> PageTableAttribute {
MEMORY_ATTRIBUTE::SH::NonShareable + MEMORY_ATTRIBUTE::AttrIndx.val(Self::INDEX)
}
}

@ -18,6 +18,8 @@ mod frame_alloc;
mod page_table;
mod recursive;
pub mod memory_attribute;
/// Trait for abstracting over the three possible block/page sizes on aarch64, 4KiB, 2MiB, 1GiB.
pub trait PageSize: Copy + Eq + PartialOrd + Ord {
/// The page size in bytes.
@ -366,6 +368,10 @@ impl<S: PageSize> PhysFrame<S> {
pub fn of_addr(address: usize) -> Self {
Self::containing_address(PhysAddr::new(address as u64))
}
pub fn range_of(begin: usize, end: usize) -> PhysFrameRange<S> {
Self::range(Self::of_addr(begin), Self::of_addr(end - 1) + 1)
}
}
impl<S: PageSize> fmt::Debug for PhysFrame<S> {

@ -1,7 +1,7 @@
use core::fmt;
use core::ops::{Index, IndexMut};
use super::PhysFrame;
use super::{PhysFrame, PageSize};
use addr::PhysAddr;
use usize_conversions::usize_from;
@ -19,8 +19,7 @@ const ADDR_MASK: u64 = 0x0000_ffff_ffff_f000;
const FLAGS_MASK: u64 = !(MEMORY_ATTR_MASK | ADDR_MASK);
/// Memory attribute fields
type PageTableAttributeFieldValue = FieldValue<u64, MEMORY_ATTRIBUTE::Register>;
pub struct PageTableAttribute(PageTableAttributeFieldValue);
pub type PageTableAttribute = FieldValue<u64, MEMORY_ATTRIBUTE::Register>;
/// The error returned by the `PageTableEntry::frame` method.
#[derive(Debug, Clone, Copy, PartialEq)]
@ -53,32 +52,33 @@ impl RegisterReadWrite<u64, MEMORY_ATTRIBUTE::Register> for PageTableEntry {
impl PageTableEntry {
/// Returns whether this entry is zero.
#[inline]
pub fn is_unused(&self) -> bool {
self.entry == 0
}
/// Sets this entry to zero.
#[inline]
pub fn set_unused(&mut self) {
self.entry = 0;
}
/// Returns the flags of this entry.
#[inline]
pub fn flags(&self) -> PageTableFlags {
PageTableFlags::from_bits_truncate(self.entry)
}
/// Returns the physical address mapped by this entry, might be zero.
#[inline]
pub fn addr(&self) -> PhysAddr {
PhysAddr::new(self.entry & ADDR_MASK)
}
/// Returns the memory attribute fields of this entry.
#[inline]
pub fn attr(&self) -> PageTableAttribute {
PageTableAttribute(PageTableAttributeFieldValue::new(
MEMORY_ATTR_MASK,
0,
self.entry & MEMORY_ATTR_MASK,
))
PageTableAttribute::new(MEMORY_ATTR_MASK, 0, self.entry & MEMORY_ATTR_MASK)
}
/// Returns the physical frame mapped by this entry.
@ -99,11 +99,19 @@ impl PageTableEntry {
}
}
/// Map the entry to the specified physical frame with the specified flags.
pub fn set_frame(&mut self, frame: PhysFrame, flags: PageTableFlags) {
// is not a huge page (block)
/// Map the entry to the specified physical frame with the specified flags and memory attribute.
pub fn set_frame(&mut self, frame: PhysFrame, flags: PageTableFlags, attr: PageTableAttribute) {
// is not a block
assert!(flags.contains(PageTableFlags::TABLE_OR_PAGE));
self.set(frame.start_address().as_u64() | flags.bits());
self.set(frame.start_address().as_u64() | flags.bits() | attr.value);
}
/// The descriptor gives the base address of a block of memory, and the attributes for that
/// memory region.
pub fn set_block<S: PageSize>(&mut self, addr: PhysAddr, flags: PageTableFlags, attr: PageTableAttribute) {
// is a block
assert!(!flags.contains(PageTableFlags::TABLE_OR_PAGE));
self.set(addr.align_down(S::SIZE).as_u64() | flags.bits() | attr.value);
}
/// Map the entry to the specified physical address with the specified flags.
@ -118,7 +126,7 @@ impl PageTableEntry {
/// Sets the flags of this entry.
pub fn modify_attr(&mut self, attr: PageTableAttribute) {
self.entry = (self.entry & !MEMORY_ATTR_MASK) | attr.0.value;
self.entry = (self.entry & !MEMORY_ATTR_MASK) | attr.value;
}
}
@ -128,6 +136,7 @@ impl fmt::Debug for PageTableEntry {
f.field("value", &self.entry);
f.field("addr", &self.addr());
f.field("flags", &self.flags());
f.field("attr", &self.attr().value);
f.finish()
}
}
@ -150,14 +159,6 @@ register_bitfields! {u64,
bitflags! {
/// Possible flags for a page table entry.
pub struct PageTableFlags: u64 {
// const SHARED = 3 << 8; /* SH[1:0], inner shareable */
// const BIT_8 = 1 << 8;
// const BIT_9 = 1 << 9;
// pub const ATTRIB_SH_NON_SHAREABLE: usize = 0x0 << 8;
// const OUTER_SHAREABLE = 0b10 << 8;
// const INNER_SHAREABLE = 0b11 << 8;
/// identifies whether the descriptor is valid
const VALID = 1 << 0;
/// the descriptor type
@ -202,6 +203,7 @@ bitflags! {
}
impl Default for PageTableFlags {
#[inline]
fn default() -> Self {
Self::VALID | Self::TABLE_OR_PAGE | Self::AF | Self::WRITE | Self::PXN | Self::XN
}
@ -228,13 +230,6 @@ impl PageTable {
entry.set_unused();
}
}
// Setup identity map: VirtPage at pagenumber -> PhysFrame at pagenumber
// pn: pagenumber = addr>>12 in riscv32.
// pub fn map_identity(&mut self, p4num: usize, flags: PageTableFlags) {
// let entry = self.entries[p4num].clone();
// self.entries[p4num].set_addr(entry.addr(), flags);
// }
}
impl Index<usize> for PageTable {

@ -5,7 +5,7 @@ use paging::{
page_table::{FrameError, PageTable, PageTableEntry, PageTableFlags},
NotGiantPageSize, Page, PageSize, PhysFrame, Size4KiB,
};
use paging::page_table::PageTableFlags as Flags;
use paging::{page_table::PageTableFlags as Flags, PageTableAttribute, memory_attribute::*};
use asm::{ttbr_el1_read, tlb_invalidate};
use barrier;
use ux::u9;
@ -46,6 +46,7 @@ pub trait Mapper<S: PageSize> {
page: Page<S>,
frame: PhysFrame<S>,
flags: PageTableFlags,
attr: PageTableAttribute,
allocator: &mut A,
) -> Result<MapperFlush<S>, MapToError>
where
@ -71,6 +72,7 @@ pub trait Mapper<S: PageSize> {
&mut self,
frame: PhysFrame<S>,
flags: PageTableFlags,
attr: PageTableAttribute,
allocator: &mut A,
) -> Result<MapperFlush<S>, MapToError>
where
@ -79,7 +81,7 @@ pub trait Mapper<S: PageSize> {
Self: Mapper<S>,
{
let page = Page::containing_address(VirtAddr::new(frame.start_address().as_u64()));
self.map_to(page, frame, flags, allocator)
self.map_to(page, frame, flags, attr, allocator)
}
}
@ -218,7 +220,7 @@ impl<'a> RecursivePageTable<'a> {
if entry.is_unused() {
if let Some(frame) = allocator.alloc() {
entry.set_frame(frame, Flags::default());
entry.set_frame(frame, Flags::default(), MairNormal::attr_value());
created = true;
} else {
return Err(MapToError::FrameAllocationFailed);
@ -289,6 +291,7 @@ impl<'a> Mapper<Size4KiB> for RecursivePageTable<'a> {
page: Page<Size4KiB>,
frame: PhysFrame<Size4KiB>,
flags: PageTableFlags,
attr: PageTableAttribute,
allocator: &mut A,
) -> Result<MapperFlush<Size4KiB>, MapToError>
where
@ -309,7 +312,7 @@ impl<'a> Mapper<Size4KiB> for RecursivePageTable<'a> {
if !p1[page.p1_index()].is_unused() {
return Err(MapToError::PageAlreadyMapped);
}
p1[page.p1_index()].set_frame(frame, flags);
p1[page.p1_index()].set_frame(frame, flags, attr);
Ok(MapperFlush::new(page))
}

@ -24,72 +24,49 @@ use register::cpu::RegisterReadWrite;
register_bitfields! {u64,
MAIR_EL1 [
// TODO: Macrofy this
/// Attribute 7
Attr7_HIGH OFFSET(60) NUMBITS(4) [],
Attr7_LOW_DEVICE OFFSET(56) NUMBITS(4) [],
Attr7_LOW_MEMORY OFFSET(56) NUMBITS(4) [],
Attr7 OFFSET(56) NUMBITS(8) [],
/// Attribute 6
Attr6_HIGH OFFSET(52) NUMBITS(4) [],
Attr6_LOW_DEVICE OFFSET(48) NUMBITS(4) [],
Attr6_LOW_MEMORY OFFSET(48) NUMBITS(4) [],
Attr6 OFFSET(48) NUMBITS(8) [],
/// Attribute 5
Attr5_HIGH OFFSET(44) NUMBITS(4) [],
Attr5_LOW_DEVICE OFFSET(40) NUMBITS(4) [],
Attr5_LOW_MEMORY OFFSET(40) NUMBITS(4) [],
Attr5 OFFSET(40) NUMBITS(8) [],
/// Attribute 4
Attr4_HIGH OFFSET(36) NUMBITS(4) [],
Attr4_LOW_DEVICE OFFSET(32) NUMBITS(4) [],
Attr4_LOW_MEMORY OFFSET(32) NUMBITS(4) [],
Attr4 OFFSET(32) NUMBITS(8) [],
/// Attribute 3
Attr3_HIGH OFFSET(28) NUMBITS(4) [],
Attr3_LOW_DEVICE OFFSET(24) NUMBITS(4) [],
Attr3_LOW_MEMORY OFFSET(24) NUMBITS(4) [],
Attr3 OFFSET(24) NUMBITS(8) [],
/// Attribute 2
Attr2_HIGH OFFSET(20) NUMBITS(4) [
Device = 0b0000,
Memory_OuterNonCacheable = 0b0100,
Memory_OuterWriteBack_NonTransient_ReadAlloc_WriteAlloc = 0b1111
],
Attr2_LOW_DEVICE OFFSET(16) NUMBITS(4) [
Device_nGnRE = 0b0100
],
Attr2_LOW_MEMORY OFFSET(16) NUMBITS(4) [
InnerNonCacheable = 0b0100,
InnerWriteBack_NonTransient_ReadAlloc_WriteAlloc = 0b1111
],
Attr2 OFFSET(16) NUMBITS(8) [],
/// Attribute 1
Attr1_HIGH OFFSET(12) NUMBITS(4) [
Device = 0b0000,
Memory_OuterNonCacheable = 0b0100,
Memory_OuterWriteBack_NonTransient_ReadAlloc_WriteAlloc = 0b1111
],
Attr1_LOW_DEVICE OFFSET(8) NUMBITS(4) [
Device_nGnRE = 0b0100
],
Attr1_LOW_MEMORY OFFSET(8) NUMBITS(4) [
InnerNonCacheable = 0b0100,
InnerWriteBack_NonTransient_ReadAlloc_WriteAlloc = 0b1111
],
Attr1 OFFSET(8) NUMBITS(8) [],
/// Attribute 0
Attr0_HIGH OFFSET(4) NUMBITS(4) [
Attr0 OFFSET(0) NUMBITS(8) []
]
}
register_bitfields! {u64,
MAIR_ATTR [
Attr_HIGH OFFSET(4) NUMBITS(4) [
Device = 0b0000,
Memory_OuterNonCacheable = 0b0100,
Memory_OuterWriteThrough_NonTransient_ReadAlloc_WriteAlloc = 0b1011,
Memory_OuterWriteBack_NonTransient_ReadAlloc_WriteAlloc = 0b1111
],
Attr0_LOW_DEVICE OFFSET(0) NUMBITS(4) [
Device_nGnRE = 0b0100
Attr_LOW_DEVICE OFFSET(0) NUMBITS(4) [
Device_nGnRnE = 0b0000,
Device_nGnRE = 0b0100,
Device_nGRE = 0b1000,
Device_GRE = 0b1100
],
Attr0_LOW_MEMORY OFFSET(0) NUMBITS(4) [
Attr_LOW_MEMORY OFFSET(0) NUMBITS(4) [
InnerNonCacheable = 0b0100,
InnerWriteThrough_NonTransient_ReadAlloc_WriteAlloc = 0b1011,
InnerWriteBack_NonTransient_ReadAlloc_WriteAlloc = 0b1111
]
]

@ -40,7 +40,7 @@ pub use self::daif::DAIF;
pub use self::elr_el2::ELR_EL2;
pub use self::hcr_el2::HCR_EL2;
pub use self::id_aa64mmfr0_el1::ID_AA64MMFR0_EL1;
pub use self::mair_el1::MAIR_EL1;
pub use self::mair_el1::{MAIR_EL1, MAIR_ATTR};
pub use self::mpidr_el1::MPIDR_EL1;
pub use self::sctlr_el1::SCTLR_EL1;
pub use self::sp::SP;

@ -6,6 +6,8 @@ pub mod irq;
pub mod timer;
pub mod serial;
pub const IO_BASE: usize = bcm2837::IO_BASE;
pub fn init() {
assert_has_not_been_called!("board::init must be called only once");

@ -3,7 +3,8 @@
use ucore_memory::PAGE_SIZE;
use memory::{FRAME_ALLOCATOR, init_heap, MemoryArea, MemoryAttr, MemorySet, Stack};
use super::atags::atags::Atags;
use aarch64::{barrier, regs::*, addr::*, paging::PhysFrame as Frame};
use aarch64::{barrier, regs::*, addr::*};
use aarch64::paging::{PhysFrame as Frame, memory_attribute::*};
/// Memory initialization.
pub fn init() {
@ -28,13 +29,11 @@ pub fn init_mmu_early() {
// device.
MAIR_EL1.write(
// Attribute 1
MAIR_EL1::Attr1_HIGH::Device
+ MAIR_EL1::Attr1_LOW_DEVICE::Device_nGnRE
// Attribute 0
+ MAIR_EL1::Attr0_HIGH::Memory_OuterWriteBack_NonTransient_ReadAlloc_WriteAlloc
+ MAIR_EL1::Attr0_LOW_MEMORY::InnerWriteBack_NonTransient_ReadAlloc_WriteAlloc,
MAIR_EL1::Attr0.val(MairDevice::config_value()) +
MAIR_EL1::Attr1.val(MairNormal::config_value()) +
MAIR_EL1::Attr2.val(MairNormalNonCacheable::config_value()),
);
// Configure various settings of stage 1 of the EL1 translation regime.
let ips = ID_AA64MMFR0_EL1.read(ID_AA64MMFR0_EL1::PARange);
TCR_EL1.write(

@ -8,62 +8,8 @@ use ucore_memory::paging::*;
use aarch64::asm::{tlb_invalidate, tlb_invalidate_all, ttbr_el1_read, ttbr_el1_write};
use aarch64::{PhysAddr, VirtAddr};
use aarch64::paging::{Mapper, PageTable as Aarch64PageTable, PageTableEntry, PageTableFlags as EF, RecursivePageTable};
use aarch64::paging::{FrameAllocator, FrameDeallocator, Page, PhysFrame as Frame, Size4KiB, Size2MiB};
register_bitfields! {u64,
// AArch64 Reference Manual page 2150
STAGE1_DESCRIPTOR [
/// Execute-never
XN OFFSET(54) NUMBITS(1) [
False = 0,
True = 1
],
/// Various address fields, depending on use case
LVL4_OUTPUT_ADDR_4KiB OFFSET(39) NUMBITS(9) [], // [47:39]
LVL3_OUTPUT_ADDR_4KiB OFFSET(30) NUMBITS(18) [], // [47:30]
LVL2_OUTPUT_ADDR_4KiB OFFSET(21) NUMBITS(27) [], // [47:21]
NEXT_LVL_TABLE_ADDR_4KiB OFFSET(12) NUMBITS(36) [], // [47:12]
/// Access flag
AF OFFSET(10) NUMBITS(1) [
False = 0,
True = 1
],
/// Shareability field
SH OFFSET(8) NUMBITS(2) [
OuterShareable = 0b10,
InnerShareable = 0b11
],
/// Access Permissions
AP OFFSET(6) NUMBITS(2) [
RW_EL1 = 0b00,
RW_EL1_EL0 = 0b01,
RO_EL1 = 0b10,
RO_EL1_EL0 = 0b11
],
/// Memory attributes index into the MAIR_EL1 register
AttrIndx OFFSET(2) NUMBITS(3) [],
TYPE OFFSET(1) NUMBITS(1) [
Block = 0,
Table = 1
],
VALID OFFSET(0) NUMBITS(1) [
False = 0,
True = 1
]
]
}
mod mair {
pub const NORMAL: u64 = 0;
pub const DEVICE: u64 = 1;
}
use aarch64::paging::{FrameAllocator, FrameDeallocator, Page, PhysFrame as Frame, Size4KiB, Size2MiB, Size1GiB};
use aarch64::paging::memory_attribute::*;
// need 3 page
pub fn setup_page_table(frame_lvl4: Frame, frame_lvl3: Frame, frame_lvl2: Frame) {
@ -74,34 +20,24 @@ pub fn setup_page_table(frame_lvl4: Frame, frame_lvl3: Frame, frame_lvl2: Frame)
p3.zero();
p2.zero();
// Fill the rest of the LVL2 (2MiB) entries as block
// descriptors. Differentiate between normal and device mem.
const MMIO_BASE: u64 = 0x3F000000;
let mmio_base: u64 = MMIO_BASE >> 21;
let mut common = STAGE1_DESCRIPTOR::VALID::True
+ STAGE1_DESCRIPTOR::TYPE::Block
+ STAGE1_DESCRIPTOR::AP::RW_EL1
+ STAGE1_DESCRIPTOR::AF::True;
// + STAGE1_DESCRIPTOR::XN::True;
for i in 0..512 {
let j: u64 = i as u64;
let mem_attr = if j >= mmio_base {
STAGE1_DESCRIPTOR::SH::OuterShareable + STAGE1_DESCRIPTOR::AttrIndx.val(mair::DEVICE)
} else {
STAGE1_DESCRIPTOR::SH::InnerShareable + STAGE1_DESCRIPTOR::AttrIndx.val(mair::NORMAL)
};
let (start_addr, end_addr) = (0, 0x40000000);
let block_flags = EF::VALID | EF::AF | EF::WRITE | EF::XN;
for page in Page::<Size2MiB>::range_of(start_addr, end_addr) {
let paddr = PhysAddr::new(page.start_address().as_u64());
p2[i].entry = (common + mem_attr + STAGE1_DESCRIPTOR::LVL2_OUTPUT_ADDR_4KiB.val(j)).value;
use arch::board::IO_BASE;
if paddr.as_u64() >= IO_BASE as u64 {
p2[page.p2_index()].set_block::<Size2MiB>(paddr, block_flags | EF::PXN, MairDevice::attr_value());
} else {
p2[page.p2_index()].set_block::<Size2MiB>(paddr, block_flags, MairNormal::attr_value());
}
}
common = common + STAGE1_DESCRIPTOR::SH::InnerShareable + STAGE1_DESCRIPTOR::AttrIndx.val(mair::NORMAL);
p3[0].set_frame(frame_lvl2, EF::default(), MairNormal::attr_value());
p3[1].set_block::<Size1GiB>(PhysAddr::new(0x40000000), block_flags | EF::PXN, MairDevice::attr_value());
p3[0].entry = (common + STAGE1_DESCRIPTOR::TYPE::Table + STAGE1_DESCRIPTOR::NEXT_LVL_TABLE_ADDR_4KiB.val(frame_lvl2.start_address().as_u64() >> 12)).value;
p3[1].entry = (common + STAGE1_DESCRIPTOR::LVL3_OUTPUT_ADDR_4KiB.val(1)).value;
p4[0].entry = (common + STAGE1_DESCRIPTOR::TYPE::Table + STAGE1_DESCRIPTOR::NEXT_LVL_TABLE_ADDR_4KiB.val(frame_lvl3.start_address().as_u64() >> 12)).value;
p4[RECURSIVE_INDEX].entry = (common + STAGE1_DESCRIPTOR::TYPE::Table + STAGE1_DESCRIPTOR::NEXT_LVL_TABLE_ADDR_4KiB.val(frame_lvl4.start_address().as_u64() >> 12)).value;
p4[0].set_frame(frame_lvl3, EF::default(), MairNormal::attr_value());
p4[RECURSIVE_INDEX].set_frame(frame_lvl4, EF::default(), MairNormal::attr_value());
// warn!("p2");
// for i in 0..512 {
@ -129,18 +65,10 @@ pub fn setup_page_table(frame_lvl4: Frame, frame_lvl3: Frame, frame_lvl2: Frame)
/// map the range [start, end) as device memory, insert to the MemorySet
pub fn remap_device_2mib(ms: &mut MemorySet<InactivePageTable0>, start_addr: usize, end_addr: usize) {
ms.edit(|active_table| {
let common = STAGE1_DESCRIPTOR::VALID::True
+ STAGE1_DESCRIPTOR::TYPE::Block
+ STAGE1_DESCRIPTOR::AP::RW_EL1
+ STAGE1_DESCRIPTOR::AF::True
+ STAGE1_DESCRIPTOR::XN::True;
let mem_attr = STAGE1_DESCRIPTOR::SH::OuterShareable + STAGE1_DESCRIPTOR::AttrIndx.val(mair::DEVICE);
type Page2MiB = Page<Size2MiB>;
for page in Page2MiB::range_of(start_addr, end_addr) {
for page in Page::<Size2MiB>::range_of(start_addr, end_addr) {
let paddr = PhysAddr::new(page.start_address().as_u64());
let p2 = unsafe { &mut *active_table.0.p2_ptr(page) };
p2[page.p2_index()].entry = (common + mem_attr + STAGE1_DESCRIPTOR::LVL2_OUTPUT_ADDR_4KiB.val(page.start_address().as_u64() >> 21)).value;
p2[page.p2_index()].set_block::<Size2MiB>(paddr, EF::default() - EF::TABLE_OR_PAGE, MairDevice::attr_value());
}
// let p2 = unsafe { &mut *(0o777_777_000_000_0000 as *mut Aarch64PageTable) };
@ -168,7 +96,8 @@ impl PageTable for ActivePageTable {
fn map(&mut self, addr: usize, target: usize) -> &mut PageEntry {
let flags = EF::default();
self.0.map_to(Page::of_addr(addr), Frame::of_addr(target), flags, &mut FrameAllocatorForAarch64)
let attr = MairNormal::attr_value();
self.0.map_to(Page::of_addr(addr), Frame::of_addr(target), flags, attr, &mut FrameAllocatorForAarch64)
.unwrap().flush();
self.get_entry(addr)
}
@ -303,7 +232,7 @@ impl InactivePageTable for InactivePageTable0 {
active_table().with_temporary_map(&frame, |_, table: &mut Aarch64PageTable| {
table.zero();
// set up recursive mapping for the table
table[RECURSIVE_INDEX].set_frame(frame.clone(), EF::default());
table[RECURSIVE_INDEX].set_frame(frame.clone(), EF::default(), MairNormal::attr_value());
});
InactivePageTable0 { p4_frame: frame }
}
@ -313,7 +242,7 @@ impl InactivePageTable for InactivePageTable0 {
let backup = p4_table[RECURSIVE_INDEX].clone();
// overwrite recursive mapping
p4_table[RECURSIVE_INDEX].set_frame(self.p4_frame.clone(), EF::default());
p4_table[RECURSIVE_INDEX].set_frame(self.p4_frame.clone(), EF::default(), MairNormal::attr_value());
tlb_invalidate_all();
// execute f in the new context
@ -376,7 +305,7 @@ impl InactivePageTable0 {
assert!(!e0.is_unused());
self.edit(|_| {
table[KERNEL_PML4].set_frame(Frame::containing_address(e0.addr()), EF::default());
table[KERNEL_PML4].set_frame(Frame::containing_address(e0.addr()), EF::default(), MairNormal::attr_value());
});
}
}

Loading…
Cancel
Save