aarch64/mmu: use both TTBR0_EL1 & TTBR1_EL1

master
equation314 6 years ago
parent bb1c1abaa4
commit 55087fc5a2

@ -6,6 +6,15 @@ use bit_field::BitField;
use usize_conversions::FromUsize;
use ux::*;
#[derive(Debug)]
#[repr(u8)]
pub enum VirtAddrRange {
/// 0x0000000000000000 to 0x0000FFFFFFFFFFFF
BottomRange = 0,
/// 0xFFFF000000000000 to 0xFFFFFFFFFFFFFFFF.
TopRange = 1,
}
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
#[repr(transparent)]
pub struct VirtAddr(u64);
@ -105,6 +114,20 @@ impl VirtAddr {
u12::new((self.0 & 0xfff).try_into().unwrap())
}
/// Returns the VA range
pub fn va_range(&self) -> Result<VirtAddrRange, VirtAddrNotValid> {
match self.va_range_bits() {
0x0000 => Ok(VirtAddrRange::BottomRange),
0xffff => Ok(VirtAddrRange::TopRange),
_ => Err(VirtAddrNotValid(self.0)),
}
}
/// Returns the top 16 bits
pub fn va_range_bits(&self) -> u16 {
((self.0 >> 48) & 0xffff) as u16
}
/// Returns the 9-bit level 1 page table index.
pub fn p1_index(&self) -> u9 {
u9::new(((self.0 >> 12) & 0o777).try_into().unwrap())

@ -160,24 +160,20 @@ pub fn eret() -> ! {
}
}
bitflags! {
/// Controls cache settings for the level 4 page table.
pub struct ttbr0_el1_Flags: u64 {
const COMMON_NOT_PRIVATE = 1 << 0;
}
}
pub fn ttbr0_el1_read() -> (PhysFrame, ttbr0_el1_Flags) {
let value = TTBR0_EL1.get();
let flags = ttbr0_el1_Flags::from_bits_truncate(value);
let addr = PhysAddr::new(value & 0x_0000_ffff_ffff_f000);
let frame = PhysFrame::containing_address(addr);
(frame, flags)
}
pub fn ttbr0_el1_write(frame: PhysFrame) {
let addr = frame.start_address();
let value = addr.as_u64();
TTBR0_EL1.set_baddr(value);
pub fn ttbr_el1_read(which: u8) -> (PhysFrame) {
let baddr = match which {
0 => TTBR0_EL1.get_baddr(),
1 => TTBR1_EL1.get_baddr(),
_ => 0,
};
PhysFrame::containing_address(PhysAddr::new(baddr))
}
pub fn ttbr_el1_write(which: u8, frame: PhysFrame) {
let baddr = frame.start_address().as_u64();
match which {
0 => TTBR0_EL1.set_baddr(baddr),
1 => TTBR1_EL1.set_baddr(baddr),
_ => {}
};
}

@ -98,6 +98,11 @@ impl<S: PageSize> Page<S> {
S::SIZE
}
/// Returns the level 4 page table index of this page.
pub fn va_range_bits(&self) -> u16 {
self.start_address().va_range_bits()
}
/// Returns the level 4 page table index of this page.
pub fn p4_index(&self) -> u9 {
self.start_address().p4_index()

@ -7,7 +7,7 @@ use paging::{
NotGiantPageSize, Page, PageSize, PhysFrame, Size4KiB,
};
use paging::page_table::PageTableFlags as Flags;
use asm::ttbr0_el1_read;
use asm::ttbr_el1_read;
use ux::u9;
use addr::{PhysAddr, VirtAddr};
@ -162,7 +162,9 @@ impl<'a> RecursivePageTable<'a> {
{
return Err(NotRecursivelyMapped);
}
if Ok(ttbr0_el1_read().0) != table[recursive_index].frame() {
if Ok(ttbr_el1_read(page.start_address().va_range().unwrap() as u8)) !=
table[recursive_index].frame()
{
return Err(NotRecursivelyMapped);
}
@ -212,7 +214,6 @@ impl<'a> RecursivePageTable<'a> {
where
A: FrameAllocator<Size4KiB>,
{
let created;
if entry.is_unused() {
@ -281,7 +282,6 @@ impl<'a> RecursivePageTable<'a> {
}
}
impl<'a> Mapper<Size4KiB> for RecursivePageTable<'a> {
fn map_to<A>(
&mut self,
@ -293,7 +293,7 @@ impl<'a> Mapper<Size4KiB> for RecursivePageTable<'a> {
where
A: FrameAllocator<Size4KiB>,
{
let self_mut = unsafe{ &mut *(self as *const _ as *mut Self) };
let self_mut = unsafe { &mut *(self as *const _ as *mut Self) };
let p4 = &mut self_mut.p4;
let p3_page = self.p3_page(page);
@ -317,7 +317,7 @@ impl<'a> Mapper<Size4KiB> for RecursivePageTable<'a> {
&mut self,
page: Page<Size4KiB>,
) -> Result<(PhysFrame<Size4KiB>, MapperFlush<Size4KiB>), UnmapError> {
let self_mut = unsafe{ &mut *(self as *const _ as *mut Self) };
let self_mut = unsafe { &mut *(self as *const _ as *mut Self) };
let p4 = &mut self_mut.p4;
let p4_entry = &p4[page.p4_index()];
@ -357,7 +357,7 @@ impl<'a> Mapper<Size4KiB> for RecursivePageTable<'a> {
page: Page<Size4KiB>,
flags: PageTableFlags,
) -> Result<MapperFlush<Size4KiB>, FlagUpdateError> {
let self_mut = unsafe{ &mut *(self as *const _ as *mut Self) };
let self_mut = unsafe { &mut *(self as *const _ as *mut Self) };
let p4 = &mut self_mut.p4;
if p4[page.p4_index()].is_unused() {
@ -388,7 +388,7 @@ impl<'a> Mapper<Size4KiB> for RecursivePageTable<'a> {
}
fn translate_page(&self, page: Page<Size4KiB>) -> Option<PhysFrame<Size4KiB>> {
let self_mut = unsafe{ &mut *(self as *const _ as *mut Self) };
let self_mut = unsafe { &mut *(self as *const _ as *mut Self) };
let p4 = &mut self_mut.p4;
if p4[page.p4_index()].is_unused() {

@ -24,6 +24,7 @@ mod spsel;
mod spsr_el2;
mod tcr_el1;
mod ttbr0_el1;
mod ttbr1_el1;
// Export only the R/W traits and the static reg definitions
pub use register::cpu::*;
@ -49,3 +50,4 @@ pub use self::spsel::SPSel;
pub use self::spsr_el2::SPSR_EL2;
pub use self::tcr_el1::TCR_EL1;
pub use self::ttbr0_el1::TTBR0_EL1;
pub use self::ttbr1_el1::TTBR1_EL1;

@ -22,6 +22,17 @@ use register::cpu::RegisterReadWrite;
register_bitfields! {u64,
TCR_EL1 [
/// Top Byte ignored - indicates whether the top byte of an address is
/// used for address match for the TTBR1_EL1 region, or ignored and used
/// for tagged addresses. Defined values are:
///
/// 0 Top Byte used in the address calculation.
/// 1 Top Byte ignored in the address calculation.
TBI1 OFFSET(38) NUMBITS(1) [
Used = 0,
Ignored = 1
],
/// Top Byte ignored - indicates whether the top byte of an address is
/// used for address match for the TTBR0_EL1 region, or ignored and used
/// for tagged addresses. Defined values are:
@ -33,6 +44,20 @@ register_bitfields! {u64,
Ignored = 1
],
/// ASID Size. Defined values are:
///
/// 0 8 bit - the upper 8 bits of TTBR0_EL1 and TTBR1_EL1 are ignored by
/// hardware for every purpose except reading back the register, and are
/// treated as if they are all zeros for when used for allocation and matching entries in the TLB.
/// 1 16 bit - the upper 16 bits of TTBR0_EL1 and TTBR1_EL1 are used for
/// allocation and matching in the TLB.
///
/// If the implementation has only 8 bits of ASID, this field is RES0.
AS OFFSET(36) NUMBITS(1) [
Bits_8 = 0,
Bits_16 = 1
],
/// Intermediate Physical Address Size.
///
/// 000 32 bits, 4GiB.
@ -67,6 +92,116 @@ register_bitfields! {u64,
Bits_52 = 0b110
],
/// Granule size for the TTBR1_EL1.
///
/// 01 16KiB
/// 10 4KiB
/// 11 64KiB
///
/// Other values are reserved.
///
/// If the value is programmed to either a reserved value, or a size
/// that has not been implemented, then the hardware will treat the
/// field as if it has been programmed to an IMPLEMENTATION DEFINED
/// choice of the sizes that has been implemented for all purposes other
/// than the value read back from this register.
///
/// It is IMPLEMENTATION DEFINED whether the value read back is the
/// value programmed or the value that corresponds to the size chosen.
TG1 OFFSET(30) NUMBITS(2) [
KiB_4 = 0b10,
KiB_16 = 0b01,
KiB_64 = 0b11
],
/// Shareability attribute for memory associated with translation table
/// walks using TTBR1_EL1.
///
/// 00 Non-shareable
/// 10 Outer Shareable
/// 11 Inner Shareable
///
/// Other values are reserved.
SH1 OFFSET(28) NUMBITS(2) [
None = 0b00,
Outer = 0b10,
Inner = 0b11
],
/// Outer cacheability attribute for memory associated with translation
/// table walks using TTBR1_EL1.
///
/// 00 Normal memory, Outer Non-cacheable
///
/// 01 Normal memory, Outer Write-Back Read-Allocate Write-Allocate
/// Cacheable
///
/// 10 Normal memory, Outer Write-Through Read-Allocate No
/// Write-Allocate Cacheable
///
/// 11 Normal memory, Outer Write-Back Read-Allocate No Write-Allocate
/// Cacheable
ORGN1 OFFSET(26) NUMBITS(2) [
NonCacheable = 0b00,
WriteBack_ReadAlloc_WriteAlloc_Cacheable = 0b01,
WriteThrough_ReadAlloc_NoWriteAlloc_Cacheable = 0b10,
WriteBack_ReadAlloc_NoWriteAlloc_Cacheable = 0b11
],
/// Inner cacheability attribute for memory associated with translation
/// table walks using TTBR1_EL1.
///
/// 00 Normal memory, Inner Non-cacheable
///
/// 01 Normal memory, Inner Write-Back Read-Allocate Write-Allocate
/// Cacheable
///
/// 10 Normal memory, Inner Write-Through Read-Allocate No
/// Write-Allocate Cacheable
///
/// 11 Normal memory, Inner Write-Back Read-Allocate No Write-Allocate
/// Cacheable
IRGN1 OFFSET(24) NUMBITS(2) [
NonCacheable = 0b00,
WriteBack_ReadAlloc_WriteAlloc_Cacheable = 0b01,
WriteThrough_ReadAlloc_NoWriteAlloc_Cacheable = 0b10,
WriteBack_ReadAlloc_NoWriteAlloc_Cacheable = 0b11
],
/// Translation table walk disable for translations using
/// TTBR1_EL1. This bit controls whether a translation table walk is
/// performed on a TLB miss, for an address that is translated using
/// TTBR1_EL1. The encoding of this bit is:
///
/// 0 Perform translation table walks using TTBR1_EL1.
///
/// 1 A TLB miss on an address that is translated using TTBR1_EL1
/// generates a Translation fault. No translation table walk is
/// performed.
EPD1 OFFSET(23) NUMBITS(1) [
EnableTTBR1Walks = 0,
DisableTTBR1Walks = 1
],
/// Selects whether TTBR0_EL1 or TTBR1_EL1 defines the ASID. The encoding
/// of this bit is:
///
/// 0 TTBR0_EL1.ASID defines the ASID.
///
/// 1 TTBR1_EL1.ASID defines the ASID.
A1 OFFSET(22) NUMBITS(1) [
UseTTBR0ASID = 0b0,
UseTTBR1ASID = 0b1
],
/// The size offset of the memory region addressed by TTBR1_EL1. The
/// region size is 2^(64-T1SZ) bytes.
///
/// The maximum and minimum possible values for T1SZ depend on the level
/// of translation table and the memory translation granule size, as
/// described in the AArch64 Virtual Memory System Architecture chapter.
T1SZ OFFSET(16) NUMBITS(6) [],
/// Granule size for the TTBR0_EL1.
///
/// 00 4KiB

@ -47,6 +47,11 @@ impl RegisterReadWrite<u64, TTBR0_EL1::Register> for Reg {
}
impl Reg {
#[inline]
pub fn get_baddr(&self) -> u64 {
self.read(TTBR0_EL1::BADDR) << 1
}
#[inline]
pub fn set_baddr(&self, addr: u64) {
self.write(TTBR0_EL1::BADDR.val(addr >> 1));

@ -0,0 +1,61 @@
/*
* Copyright (c) 2018 by the author(s)
*
* =============================================================================
*
* Licensed under either of
* - Apache License, Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
* - MIT License (http://opensource.org/licenses/MIT)
* at your option.
*
* =============================================================================
*
* Author(s):
* - Andre Richter <andre.o.richter@gmail.com>
*/
//! Translation Table Base Register 1 - EL1
//!
//! Holds the base address of the translation table for the initial lookup for
//! stage 1 of the translation of an address from the upper VA range in the
//! EL1&0 translation regime, and other information for this translation regime.
use register::cpu::RegisterReadWrite;
register_bitfields! {u64,
TTBR1_EL1 [
/// An ASID for the translation table base address. The TCR_EL1.A1 field
/// selects either TTBR0_EL1.ASID or TTBR1_EL1.ASID.
///
/// If the implementation has only 8 bits of ASID, then the upper 8 bits
/// of this field are RES 0.
ASID OFFSET(48) NUMBITS(16) [],
/// Translation table base address
BADDR OFFSET(1) NUMBITS(47) [],
/// Common not Private
CnP OFFSET(0) NUMBITS(1) []
]
}
pub struct Reg;
impl RegisterReadWrite<u64, TTBR1_EL1::Register> for Reg {
sys_coproc_read_raw!(u64, "TTBR1_EL1");
sys_coproc_write_raw!(u64, "TTBR1_EL1");
}
impl Reg {
#[inline]
pub fn get_baddr(&self) -> u64 {
self.read(TTBR1_EL1::BADDR) << 1
}
#[inline]
pub fn set_baddr(&self, addr: u64) {
self.write(TTBR1_EL1::BADDR.val(addr >> 1));
}
}
pub static TTBR1_EL1: Reg = Reg {};

@ -63,12 +63,12 @@ extern {
struct ContextData {
x19to29: [usize; 11],
lr: usize,
ttbr0: usize,
ttbr1: usize,
}
impl ContextData {
fn new(ttbr0: usize) -> Self {
ContextData { lr: __trapret as usize, ttbr0, ..ContextData::default() }
fn new(ttbr1: usize) -> Self {
ContextData { lr: __trapret as usize, ttbr1, ..ContextData::default() }
}
}
@ -98,7 +98,7 @@ impl Context {
stp x25, x26, [x8], #16
stp x27, x28, [x8], #16
stp x29, lr, [x8], #16
mrs x9, ttbr0_el1
mrs x9, ttbr1_el1
str x9, [x8], #8
ldr x8, [x1]
@ -111,7 +111,8 @@ impl Context {
ldr x9, [x8], #8
mov sp, x8
msr ttbr0_el1, x9 // set new page directory
msr ttbr1_el1, x9 // set new page directory
// TODO: with ASID we needn't flush TLB
dsb ishst // ensure write has completed
tlbi vmalle1is // invalidate the TLB entry for the entry that changes
dsb ish // ensure TLB invalidation is complete
@ -126,21 +127,21 @@ impl Context {
Context(0)
}
pub unsafe fn new_kernel_thread(entry: extern fn(usize) -> !, arg: usize, kstack_top: usize, ttbr0: usize) -> Self {
pub unsafe fn new_kernel_thread(entry: extern fn(usize) -> !, arg: usize, kstack_top: usize, ttbr: usize) -> Self {
InitStack {
context: ContextData::new(ttbr0),
context: ContextData::new(ttbr),
tf: TrapFrame::new_kernel_thread(entry, arg, kstack_top),
}.push_at(kstack_top)
}
pub unsafe fn new_user_thread(entry_addr: usize, ustack_top: usize, kstack_top: usize, is32: bool, ttbr0: usize) -> Self {
pub unsafe fn new_user_thread(entry_addr: usize, ustack_top: usize, kstack_top: usize, is32: bool, ttbr: usize) -> Self {
InitStack {
context: ContextData::new(ttbr0),
context: ContextData::new(ttbr), // TODO: set ASID
tf: TrapFrame::new_user_thread(entry_addr, ustack_top),
}.push_at(kstack_top)
}
pub unsafe fn new_fork(tf: &TrapFrame, kstack_top: usize, ttbr0: usize) -> Self {
pub unsafe fn new_fork(tf: &TrapFrame, kstack_top: usize, ttbr: usize) -> Self {
InitStack {
context: ContextData::new(ttbr0),
context: ContextData::new(ttbr), // TODO: set ASID
tf: {
let mut tf = tf.clone();
tf.x0 = 0;

@ -38,14 +38,25 @@ pub fn init_mmu_early() {
// Configure various settings of stage 1 of the EL1 translation regime.
let ips = ID_AA64MMFR0_EL1.read(ID_AA64MMFR0_EL1::PARange);
TCR_EL1.write(
TCR_EL1::TBI0::Ignored
+ TCR_EL1::IPS.val(ips)
+ TCR_EL1::TG0::KiB_4 // 4 KiB granule
+ TCR_EL1::SH0::Inner
+ TCR_EL1::ORGN0::WriteBack_ReadAlloc_WriteAlloc_Cacheable
+ TCR_EL1::IRGN0::WriteBack_ReadAlloc_WriteAlloc_Cacheable
+ TCR_EL1::EPD0::EnableTTBR0Walks
+ TCR_EL1::T0SZ.val(16), // Start walks at level 2
TCR_EL1::TBI1::Ignored +
TCR_EL1::TBI0::Ignored +
TCR_EL1::AS::Bits_16 +
TCR_EL1::IPS.val(ips) +
TCR_EL1::TG1::KiB_4 +
TCR_EL1::SH1::Inner +
TCR_EL1::ORGN1::WriteBack_ReadAlloc_WriteAlloc_Cacheable +
TCR_EL1::IRGN1::WriteBack_ReadAlloc_WriteAlloc_Cacheable +
TCR_EL1::EPD1::EnableTTBR1Walks +
TCR_EL1::A1::UseTTBR1ASID +
TCR_EL1::T1SZ.val(16) +
TCR_EL1::TG0::KiB_4 +
TCR_EL1::SH0::Inner +
TCR_EL1::ORGN0::WriteBack_ReadAlloc_WriteAlloc_Cacheable +
TCR_EL1::IRGN0::WriteBack_ReadAlloc_WriteAlloc_Cacheable +
TCR_EL1::EPD0::EnableTTBR0Walks +
TCR_EL1::T0SZ.val(16),
);
// Switch the MMU on.
@ -63,7 +74,7 @@ pub fn init_mmu_early() {
fn init_frame_allocator() {
use bit_allocator::BitAlloc;
use core::ops::Range;
use consts::{MEMORY_OFFSET};
use consts::MEMORY_OFFSET;
let (start, end) = memory_map().expect("failed to find memory map");
let mut ba = FRAME_ALLOCATOR.lock();
@ -116,7 +127,7 @@ fn remap_the_kernel() {
///
/// This function is expected to return `Some` under all normal cirumstances.
fn memory_map() -> Option<(usize, usize)> {
let binary_end = unsafe { _end as u32 };
let binary_end = _end as u32;
let mut atags: Atags = Atags::get();
while let Some(atag) = atags.next() {

@ -5,7 +5,7 @@ use memory::{active_table, alloc_frame, alloc_stack, dealloc_frame};
use ucore_memory::memory_set::*;
use ucore_memory::PAGE_SIZE;
use ucore_memory::paging::*;
use aarch64::asm::{tlb_invalidate, tlb_invalidate_all, ttbr0_el1_read, ttbr0_el1_write};
use aarch64::asm::{tlb_invalidate, tlb_invalidate_all, ttbr_el1_read, ttbr_el1_write};
use aarch64::{PhysAddr, VirtAddr};
use aarch64::paging::{Mapper, PageTable as Aarch64PageTable, PageTableEntry, PageTableFlags as EF, RecursivePageTable};
use aarch64::paging::{FrameAllocator, FrameDeallocator, Page, PageRange, PhysFrame as Frame, Size4KiB, Size2MiB};
@ -122,7 +122,7 @@ pub fn setup_page_table(frame_lvl4: Frame, frame_lvl3: Frame, frame_lvl2: Frame)
// }
// }
ttbr0_el1_write(frame_lvl4);
ttbr_el1_write(0, frame_lvl4);
tlb_invalidate_all();
}
@ -291,9 +291,9 @@ impl InactivePageTable for InactivePageTable0 {
type Active = ActivePageTable;
fn new() -> Self {
let mut pt = Self::new_bare();
pt.map_kernel();
pt
// When the new InactivePageTable is created for the user MemorySet, it's use ttbr1 as the
// TTBR. And the kernel TTBR ttbr0 will never changed, so we needn't call map_kernel()
Self::new_bare()
}
fn new_bare() -> Self {
@ -308,7 +308,7 @@ impl InactivePageTable for InactivePageTable0 {
}
fn edit(&mut self, f: impl FnOnce(&mut Self::Active)) {
active_table().with_temporary_map(&ttbr0_el1_read().0, |active_table, p4_table: &mut Aarch64PageTable| {
active_table().with_temporary_map(&ttbr_el1_read(0), |active_table, p4_table: &mut Aarch64PageTable| {
let backup = p4_table[RECURSIVE_INDEX].clone();
// overwrite recursive mapping
@ -325,33 +325,34 @@ impl InactivePageTable for InactivePageTable0 {
}
unsafe fn activate(&self) {
let old_frame = ttbr0_el1_read().0;
let old_frame = ttbr_el1_read(0);
let new_frame = self.p4_frame.clone();
debug!("switch table {:?} -> {:?}", old_frame, new_frame);
debug!("switch TTBR0 {:?} -> {:?}", old_frame, new_frame);
if old_frame != new_frame {
ttbr0_el1_write(new_frame);
ttbr_el1_write(0, new_frame);
tlb_invalidate_all();
}
}
unsafe fn with(&self, f: impl FnOnce()) {
let old_frame = ttbr0_el1_read().0;
// Just need to switch the user TTBR
let old_frame = ttbr_el1_read(1);
let new_frame = self.p4_frame.clone();
debug!("switch table {:?} -> {:?}", old_frame, new_frame);
debug!("switch TTBR1 {:?} -> {:?}", old_frame, new_frame);
if old_frame != new_frame {
ttbr0_el1_write(new_frame);
ttbr_el1_write(1, new_frame);
tlb_invalidate_all();
}
f();
debug!("switch table {:?} -> {:?}", new_frame, old_frame);
debug!("switch TTBR1 {:?} -> {:?}", new_frame, old_frame);
if old_frame != new_frame {
ttbr0_el1_write(old_frame);
ttbr_el1_write(1, old_frame);
tlb_invalidate_all();
}
}
fn token(&self) -> usize {
self.p4_frame.start_address().as_u64() as usize // as CR3
self.p4_frame.start_address().as_u64() as usize // as TTBRx_EL1
}
fn alloc_frame() -> Option<usize> {

@ -136,7 +136,7 @@ mod aarch64 {
pub const KERNEL_PML4: usize = 0;
pub const KERNEL_HEAP_SIZE: usize = 8 * 1024 * 1024;
pub const MEMORY_OFFSET: usize = 0;
pub const USER_STACK_OFFSET: usize = 0x3000_0000;
pub const USER_STACK_OFFSET: usize = 0xffff_ffff_0000_0000;
pub const USER_STACK_SIZE: usize = 1 * 1024 * 1024;
pub const USER32_STACK_OFFSET: usize = USER_STACK_OFFSET;
}

@ -142,6 +142,10 @@ fn memory_set_from<'a>(elf: &'a ElfFile<'a>) -> MemorySet {
ProgramHeader::Ph32(ph) => (ph.virtual_addr as usize, ph.mem_size as usize, ph.flags),
ProgramHeader::Ph64(ph) => (ph.virtual_addr as usize, ph.mem_size as usize, ph.flags),
};
#[cfg(target_arch = "aarch64")]
assert_eq!((virt_addr >> 48), 0xffff, "Segment Fault");
set.push(MemoryArea::new(virt_addr, virt_addr + mem_size, memory_attr_from(flags), ""));
}
set

Loading…
Cancel
Save