aarch64/mmu: move crate aarch64 to remote

master
equation314 6 years ago
parent a59a7fbe9a
commit 75b039c924

@ -1,16 +0,0 @@
[package]
name = "aarch64"
version = "0.1.0"
authors = ["koumingyang <1761674434@qq.com>"]
[dependencies]
register = "0.2.0"
bit_field = "0.9.0"
bitflags = "1.0.1"
usize_conversions = "0.2.0"
os_bootinfo = "0.2.0"
bare-metal = "0.2.0"
[dependencies.ux]
default-features = false
version = "0.1.0"

@ -1,441 +0,0 @@
use core::convert::{Into, TryInto};
use core::fmt;
use core::ops::{Add, AddAssign, Sub, SubAssign};
use bit_field::BitField;
use usize_conversions::FromUsize;
use ux::*;
#[derive(Debug)]
#[repr(u8)]
pub enum VirtAddrRange {
/// 0x0000000000000000 to 0x0000FFFFFFFFFFFF
BottomRange = 0,
/// 0xFFFF000000000000 to 0xFFFFFFFFFFFFFFFF.
TopRange = 1,
}
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
#[repr(transparent)]
pub struct VirtAddr(u64);
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
#[repr(transparent)]
pub struct PhysAddr(u64);
#[derive(Debug)]
pub struct VirtAddrNotValid(u64);
impl VirtAddr {
/// Creates a new canonical virtual address.
///
/// This function performs sign extension of bit 47 to make the address canonical. Panics
/// if the bits in the range 48 to 64 contain data (i.e. are not null and no sign extension).
pub fn new(addr: u64) -> VirtAddr {
Self::try_new(addr).expect(
"invalid virtual address",
)
}
/// Tries to create a new canonical virtual address.
/// in aarch64, valid virtual address starts with 0x0000 or 0xffff.
pub fn try_new(addr: u64) -> Result<VirtAddr, VirtAddrNotValid> {
match addr.get_bits(48..64) {
0 | 0xffff => Ok(VirtAddr(addr)), // address is canonical
other => Err(VirtAddrNotValid(other)),
}
}
pub fn new_unchecked(addr: u64) -> VirtAddr {
VirtAddr(addr)
}
/// Creates a virtual address that points to `0`.
pub const fn zero() -> VirtAddr {
VirtAddr(0)
}
/// Converts the address to an `u64`.
pub fn as_u64(self) -> u64 {
self.0
}
/// Creates a virtual address from the given pointer
pub fn from_ptr<T>(ptr: *const T) -> Self {
use usize_conversions::FromUsize;
Self::new(u64::from_usize(ptr as usize))
}
/// Converts the address to a raw pointer.
#[cfg(target_pointer_width = "64")]
pub fn as_ptr<T>(self) -> *const T {
use usize_conversions::usize_from;
usize_from(self.as_u64()) as *const T
}
/// Converts the address to a mutable raw pointer.
#[cfg(target_pointer_width = "64")]
pub fn as_mut_ptr<T>(self) -> *mut T {
self.as_ptr::<T>() as *mut T
}
/// Aligns the virtual address upwards to the given alignment.
///
/// See the `align_up` function for more information.
pub fn align_up<U>(self, align: U) -> Self
where
U: Into<u64>,
{
VirtAddr(align_up(self.0, align.into()))
}
/// Aligns the virtual address downwards to the given alignment.
///
/// See the `align_down` function for more information.
pub fn align_down<U>(self, align: U) -> Self
where
U: Into<u64>,
{
VirtAddr(align_down(self.0, align.into()))
}
/// Checks whether the virtual address has the demanded alignment.
pub fn is_aligned<U>(self, align: U) -> bool
where
U: Into<u64>,
{
self.align_down(align) == self
}
/// Returns the 12-bit page offset of this virtual address.
pub fn page_offset(&self) -> u12 {
u12::new((self.0 & 0xfff).try_into().unwrap())
}
/// Returns the VA range
pub fn va_range(&self) -> Result<VirtAddrRange, VirtAddrNotValid> {
match self.va_range_bits() {
0x0000 => Ok(VirtAddrRange::BottomRange),
0xffff => Ok(VirtAddrRange::TopRange),
_ => Err(VirtAddrNotValid(self.0)),
}
}
/// Returns the top 16 bits
pub fn va_range_bits(&self) -> u16 {
((self.0 >> 48) & 0xffff) as u16
}
/// Returns the 9-bit level 1 page table index.
pub fn p1_index(&self) -> u9 {
u9::new(((self.0 >> 12) & 0o777).try_into().unwrap())
}
/// Returns the 9-bit level 2 page table index.
pub fn p2_index(&self) -> u9 {
u9::new(((self.0 >> 12 >> 9) & 0o777).try_into().unwrap())
}
/// Returns the 9-bit level 3 page table index.
pub fn p3_index(&self) -> u9 {
u9::new(((self.0 >> 12 >> 9 >> 9) & 0o777).try_into().unwrap())
}
/// Returns the 9-bit level 4 page table index.
pub fn p4_index(&self) -> u9 {
u9::new(((self.0 >> 12 >> 9 >> 9 >> 9) & 0o777).try_into().unwrap())
}
}
impl fmt::Debug for VirtAddr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "VirtAddr({:#x})", self.0)
}
}
impl Add<u64> for VirtAddr {
type Output = Self;
fn add(self, rhs: u64) -> Self::Output {
VirtAddr::new(self.0 + rhs)
}
}
impl AddAssign<u64> for VirtAddr {
fn add_assign(&mut self, rhs: u64) {
*self = *self + rhs;
}
}
impl Add<usize> for VirtAddr
where
u64: FromUsize,
{
type Output = Self;
fn add(self, rhs: usize) -> Self::Output {
self + u64::from_usize(rhs)
}
}
impl AddAssign<usize> for VirtAddr
where
u64: FromUsize,
{
fn add_assign(&mut self, rhs: usize) {
self.add_assign(u64::from_usize(rhs))
}
}
impl Sub<u64> for VirtAddr {
type Output = Self;
fn sub(self, rhs: u64) -> Self::Output {
VirtAddr::new(self.0.checked_sub(rhs).unwrap())
}
}
impl SubAssign<u64> for VirtAddr {
fn sub_assign(&mut self, rhs: u64) {
*self = *self - rhs;
}
}
impl Sub<usize> for VirtAddr
where
u64: FromUsize,
{
type Output = Self;
fn sub(self, rhs: usize) -> Self::Output {
self - u64::from_usize(rhs)
}
}
impl SubAssign<usize> for VirtAddr
where
u64: FromUsize,
{
fn sub_assign(&mut self, rhs: usize) {
self.sub_assign(u64::from_usize(rhs))
}
}
impl Sub<VirtAddr> for VirtAddr {
type Output = u64;
fn sub(self, rhs: VirtAddr) -> Self::Output {
self.as_u64().checked_sub(rhs.as_u64()).unwrap()
}
}
/// A passed `u64` was not a valid physical address.
///
/// This means that bits 52 to 64 are not were not all null.
#[derive(Debug)]
pub struct PhysAddrNotValid(u64);
impl PhysAddr {
/// Creates a new physical address.
///
/// Panics if a bit in the range 52 to 64 is set.
pub fn new(addr: u64) -> PhysAddr {
assert_eq!(
addr.get_bits(52..64),
0,
"physical addresses must not have any bits in the range 52 to 64 set"
);
PhysAddr(addr)
}
/// Tries to create a new physical address.
///
/// Fails if any bits in the range 52 to 64 are set.
pub fn try_new(addr: u64) -> Result<PhysAddr, PhysAddrNotValid> {
match addr.get_bits(52..64) {
0 => Ok(PhysAddr(addr)), // address is valid
other => Err(PhysAddrNotValid(other)),
}
}
/// Converts the address to an `u64`.
pub fn as_u64(self) -> u64 {
self.0
}
/// Convenience method for checking if a physical address is null.
pub fn is_null(&self) -> bool {
self.0 == 0
}
/// Aligns the physical address upwards to the given alignment.
///
/// See the `align_up` function for more information.
pub fn align_up<U>(self, align: U) -> Self
where
U: Into<u64>,
{
PhysAddr(align_up(self.0, align.into()))
}
/// Aligns the physical address downwards to the given alignment.
///
/// See the `align_down` function for more information.
pub fn align_down<U>(self, align: U) -> Self
where
U: Into<u64>,
{
PhysAddr(align_down(self.0, align.into()))
}
/// Checks whether the physical address has the demanded alignment.
pub fn is_aligned<U>(self, align: U) -> bool
where
U: Into<u64>,
{
self.align_down(align) == self
}
}
impl fmt::Debug for PhysAddr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "PhysAddr({:#x})", self.0)
}
}
impl fmt::Binary for PhysAddr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)
}
}
impl fmt::LowerHex for PhysAddr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)
}
}
impl fmt::Octal for PhysAddr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)
}
}
impl fmt::UpperHex for PhysAddr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)
}
}
impl Add<u64> for PhysAddr {
type Output = Self;
fn add(self, rhs: u64) -> Self::Output {
PhysAddr::new(self.0 + rhs)
}
}
impl AddAssign<u64> for PhysAddr {
fn add_assign(&mut self, rhs: u64) {
*self = *self + rhs;
}
}
impl Add<usize> for PhysAddr
where
u64: FromUsize,
{
type Output = Self;
fn add(self, rhs: usize) -> Self::Output {
self + u64::from_usize(rhs)
}
}
impl AddAssign<usize> for PhysAddr
where
u64: FromUsize,
{
fn add_assign(&mut self, rhs: usize) {
self.add_assign(u64::from_usize(rhs))
}
}
impl Sub<u64> for PhysAddr {
type Output = Self;
fn sub(self, rhs: u64) -> Self::Output {
PhysAddr::new(self.0.checked_sub(rhs).unwrap())
}
}
impl SubAssign<u64> for PhysAddr {
fn sub_assign(&mut self, rhs: u64) {
*self = *self - rhs;
}
}
impl Sub<usize> for PhysAddr
where
u64: FromUsize,
{
type Output = Self;
fn sub(self, rhs: usize) -> Self::Output {
self - u64::from_usize(rhs)
}
}
impl SubAssign<usize> for PhysAddr
where
u64: FromUsize,
{
fn sub_assign(&mut self, rhs: usize) {
self.sub_assign(u64::from_usize(rhs))
}
}
impl Sub<PhysAddr> for PhysAddr {
type Output = u64;
fn sub(self, rhs: PhysAddr) -> Self::Output {
self.as_u64().checked_sub(rhs.as_u64()).unwrap()
}
}
/// Align address downwards.
///
/// Returns the greatest x with alignment `align` so that x <= addr. The alignment must be
/// a power of 2.
pub fn align_down(addr: u64, align: u64) -> u64 {
assert!(align.is_power_of_two(), "`align` must be a power of two");
addr & !(align - 1)
}
/// Align address upwards.
///
/// Returns the smallest x with alignment `align` so that x >= addr. The alignment must be
/// a power of 2.
pub fn align_up(addr: u64, align: u64) -> u64 {
assert!(align.is_power_of_two(), "`align` must be a power of two");
let align_mask = align - 1;
if addr & align_mask == 0 {
addr // already aligned
} else {
(addr | align_mask) + 1
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
pub fn test_align_up() {
// align 1
assert_eq!(align_up(0, 1), 0);
assert_eq!(align_up(1234, 1), 1234);
assert_eq!(align_up(0xffffffffffffffff, 1), 0xffffffffffffffff);
// align 2
assert_eq!(align_up(0, 2), 0);
assert_eq!(align_up(1233, 2), 1234);
assert_eq!(align_up(0xfffffffffffffffe, 2), 0xfffffffffffffffe);
// address 0
assert_eq!(align_up(0, 128), 0);
assert_eq!(align_up(0, 1), 0);
assert_eq!(align_up(0, 2), 0);
assert_eq!(align_up(0, 0x8000000000000000), 0);
}
}

@ -1,158 +0,0 @@
//! Miscellaneous assembly instructions and functions
use paging::PhysFrame;
use addr::{PhysAddr, VirtAddr};
use regs::*;
/// Returns the current stack pointer.
#[inline(always)]
pub fn sp() -> *const u8 {
let ptr: usize;
unsafe {
asm!("mov $0, sp" : "=r"(ptr));
}
ptr as *const u8
}
/// Returns the current point counter.
#[inline(always)]
pub unsafe fn get_pc() -> usize {
let pc: usize;
asm!("adr $0, ." : "=r"(pc));
pc
}
/// The classic no-op
#[inline]
pub fn nop() {
match () {
#[cfg(target_arch = "aarch64")]
() => unsafe { asm!("nop" :::: "volatile") },
#[cfg(not(target_arch = "aarch64"))]
() => unimplemented!(),
}
}
/// Wait For Interrupt
#[inline]
pub fn wfi() {
match () {
#[cfg(target_arch = "aarch64")]
() => unsafe { asm!("wfi" :::: "volatile") },
#[cfg(not(target_arch = "aarch64"))]
() => unimplemented!(),
}
}
/// Wait For Event
#[inline]
pub fn wfe() {
match () {
#[cfg(target_arch = "aarch64")]
() => unsafe { asm!("wfe" :::: "volatile") },
#[cfg(not(target_arch = "aarch64"))]
() => unimplemented!(),
}
}
/// Exception return
///
/// Will jump to wherever the corresponding link register points to, and
/// therefore never return.
#[inline]
pub fn eret() -> ! {
use core;
match () {
#[cfg(target_arch = "aarch64")]
() => unsafe {
asm!("eret" :::: "volatile");
core::intrinsics::unreachable()
},
#[cfg(not(target_arch = "aarch64"))]
() => unimplemented!(),
}
}
/// Invalidate all TLB entries.
#[inline(always)]
pub fn tlb_invalidate_all() {
unsafe {
asm!(
"dsb ishst
tlbi vmalle1is
dsb ish
isb"
);
}
}
/// Invalidate TLB entries that would be used to translate the specified address.
#[inline(always)]
pub fn tlb_invalidate(vaddr: VirtAddr) {
unsafe {
asm!(
"dsb ishst
tlbi vaae1is, $0
dsb ish
isb" :: "r"(vaddr.as_u64() >> 12)
);
}
}
/// Invalidate all instruction caches in Inner Shareable domain to Point of Unification.
#[inline(always)]
pub fn flush_icache_all() {
unsafe {
asm!(
"ic ialluis
dsb ish
isb"
);
}
}
/// Address Translate.
#[inline(always)]
pub fn address_translate(vaddr: usize) -> usize {
let paddr: usize;
unsafe {
asm!("at S1E1R, $1; mrs $0, par_el1" : "=r"(paddr) : "r"(vaddr));
}
paddr
}
/// Read TTBRx_EL1 as PhysFrame
pub fn ttbr_el1_read(which: u8) -> PhysFrame {
let baddr = match which {
0 => TTBR0_EL1.get_baddr(),
1 => TTBR1_EL1.get_baddr(),
_ => 0,
};
PhysFrame::containing_address(PhysAddr::new(baddr))
}
/// Write TTBRx_EL1 from PhysFrame
pub fn ttbr_el1_write(which: u8, frame: PhysFrame) {
let baddr = frame.start_address().as_u64();
match which {
0 => TTBR0_EL1.set_baddr(baddr),
1 => TTBR1_EL1.set_baddr(baddr),
_ => {}
};
}
/// write TTBRx_EL1 from PhysFrame and ASID
pub fn ttbr_el1_write_asid(which: u8, asid: u16, frame: PhysFrame) {
let baddr = frame.start_address().as_u64();
match which {
0 => TTBR0_EL1.write(TTBR0_EL1::ASID.val(asid as u64) + TTBR0_EL1::BADDR.val(baddr >> 1)),
1 => TTBR1_EL1.write(TTBR1_EL1::ASID.val(asid as u64) + TTBR1_EL1::BADDR.val(baddr >> 1)),
_ => {}
};
}

@ -1,91 +0,0 @@
/*
* Copyright (c) 2018 by the author(s)
*
* =============================================================================
*
* Licensed under either of
* - Apache License, Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
* - MIT License (http://opensource.org/licenses/MIT)
* at your option.
*
* =============================================================================
*
* Author(s):
* - Andre Richter <andre.o.richter@gmail.com>
*/
// Borrow implementations from the pending upstream ACLE implementation until it
// is merged. Afterwards, we'll probably just reexport them, hoping that the API
// doesn't change.
//
// https://github.com/rust-lang-nursery/stdsimd/pull/557
mod sealed {
pub trait Dmb {
unsafe fn __dmb(&self);
}
pub trait Dsb {
unsafe fn __dsb(&self);
}
pub trait Isb {
unsafe fn __isb(&self);
}
}
macro_rules! dmb_dsb {
($A:ident) => {
impl sealed::Dmb for $A {
#[inline(always)]
unsafe fn __dmb(&self) {
asm!(concat!("DMB ", stringify!($A)) : : : "memory" : "volatile")
}
}
impl sealed::Dsb for $A {
#[inline(always)]
unsafe fn __dsb(&self) {
asm!(concat!("DSB ", stringify!($A)) : : : "memory" : "volatile")
}
}
};
}
pub struct SY;
pub struct ISH;
pub struct ISHST;
dmb_dsb!(SY);
dmb_dsb!(ISH);
dmb_dsb!(ISHST);
impl sealed::Isb for SY {
#[inline(always)]
unsafe fn __isb(&self) {
asm!("ISB SY" : : : "memory" : "volatile")
}
}
#[inline(always)]
pub unsafe fn dmb<A>(arg: A)
where
A: sealed::Dmb,
{
arg.__dmb()
}
#[inline(always)]
pub unsafe fn dsb<A>(arg: A)
where
A: sealed::Dsb,
{
arg.__dsb()
}
#[inline(always)]
pub unsafe fn isb<A>(arg: A)
where
A: sealed::Isb,
{
arg.__isb()
}

@ -1,29 +0,0 @@
#![no_std]
//#![deny(warnings)]
#![feature(asm)]
#![feature(const_fn)]
#![feature(core_intrinsics)]
#![feature(try_from)]
extern crate bare_metal;
#[macro_use]
extern crate register;
#[macro_use]
extern crate bitflags;
extern crate bit_field;
extern crate os_bootinfo;
extern crate usize_conversions;
/// Provides the non-standard-width integer types `u2``u63`.
///
/// We use these integer types in various APIs, for example `u9` for page tables indices.
pub extern crate ux;
pub use addr::{align_down, align_up, PhysAddr, VirtAddr};
pub mod asm;
pub mod addr;
pub mod paging;
pub mod barrier;
pub mod regs;

@ -1,15 +0,0 @@
//! Traits for abstracting away frame allocation and deallocation.
use paging::{PageSize, PhysFrame};
/// A trait for types that can allocate a frame of memory.
pub trait FrameAllocator<S: PageSize> {
/// Allocate a frame of the appropriate size and return it if possible.
fn alloc(&mut self) -> Option<PhysFrame<S>>;
}
/// A trait for types that can deallocate a frame of memory.
pub trait FrameDeallocator<S: PageSize> {
/// Deallocate the given frame of memory.
fn dealloc(&mut self, frame: PhysFrame<S>);
}

@ -1,64 +0,0 @@
//!Memory region attributes (D4.5, page 2174)
use super::{PageTableAttribute, MEMORY_ATTRIBUTE};
use regs::*;
pub trait MairType {
const INDEX: u64;
#[inline]
fn config_value() -> u64;
#[inline]
fn attr_value() -> PageTableAttribute;
}
pub enum MairDevice {}
pub enum MairNormal {}
pub enum MairNormalNonCacheable {}
impl MairType for MairDevice {
const INDEX: u64 = 0;
#[inline]
fn config_value() -> u64 {
(MAIR_ATTR::Attr_HIGH::Device + MAIR_ATTR::Attr_LOW_DEVICE::Device_nGnRE).value
}
#[inline]
fn attr_value() -> PageTableAttribute {
MEMORY_ATTRIBUTE::SH::OuterShareable + MEMORY_ATTRIBUTE::AttrIndx.val(Self::INDEX)
}
}
impl MairType for MairNormal {
const INDEX: u64 = 1;
#[inline]
fn config_value() -> u64 {
(MAIR_ATTR::Attr_HIGH::Memory_OuterWriteBack_NonTransient_ReadAlloc_WriteAlloc
+ MAIR_ATTR::Attr_LOW_MEMORY::InnerWriteBack_NonTransient_ReadAlloc_WriteAlloc)
.value
}
#[inline]
fn attr_value() -> PageTableAttribute {
MEMORY_ATTRIBUTE::SH::InnerShareable + MEMORY_ATTRIBUTE::AttrIndx.val(Self::INDEX)
}
}
impl MairType for MairNormalNonCacheable {
const INDEX: u64 = 2;
#[inline]
fn config_value() -> u64 {
(MAIR_ATTR::Attr_HIGH::Memory_OuterNonCacheable
+ MAIR_ATTR::Attr_LOW_MEMORY::InnerNonCacheable)
.value
}
#[inline]
fn attr_value() -> PageTableAttribute {
MEMORY_ATTRIBUTE::SH::OuterShareable + MEMORY_ATTRIBUTE::AttrIndx.val(Self::INDEX)
}
}

@ -1,549 +0,0 @@
//! Abstractions for page tables and other paging related structures.
//!
//! Page tables translate virtual memory “pages” to physical memory “frames”.
pub use self::frame_alloc::*;
pub use self::page_table::*;
#[cfg(target_arch = "aarch64")]
pub use self::recursive::*;
use core::fmt;
use core::marker::PhantomData;
use core::ops::{Add, AddAssign, Sub, SubAssign};
use os_bootinfo;
use ux::*;
use addr::{PhysAddr, VirtAddr};
mod frame_alloc;
mod page_table;
mod recursive;
pub mod memory_attribute;
/// Trait for abstracting over the three possible block/page sizes on aarch64, 4KiB, 2MiB, 1GiB.
pub trait PageSize: Copy + Eq + PartialOrd + Ord {
/// The page size in bytes.
const SIZE: u64;
/// A string representation of the page size for debug output.
const SIZE_AS_DEBUG_STR: &'static str;
}
/// This trait is implemented for 4KiB and 2MiB pages, but not for 1GiB pages.
pub trait NotGiantPageSize: PageSize {}
/// A standard 4KiB page.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum Size4KiB {}
/// A “huge” 2MiB page.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum Size2MiB {}
/// A “giant” 1GiB page.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum Size1GiB {}
impl PageSize for Size4KiB {
const SIZE: u64 = 4096;
const SIZE_AS_DEBUG_STR: &'static str = "4KiB";
}
impl NotGiantPageSize for Size4KiB {}
impl PageSize for Size2MiB {
const SIZE: u64 = Size4KiB::SIZE * 512;
const SIZE_AS_DEBUG_STR: &'static str = "2MiB";
}
impl NotGiantPageSize for Size2MiB {}
impl PageSize for Size1GiB {
const SIZE: u64 = Size2MiB::SIZE * 512;
const SIZE_AS_DEBUG_STR: &'static str = "1GiB";
}
/// A virtual memory page.
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
#[repr(C)]
pub struct Page<S: PageSize = Size4KiB> {
start_address: VirtAddr,
size: PhantomData<S>,
}
impl<S: PageSize> Page<S> {
/// Returns the page that starts at the given virtual address.
///
/// Returns an error if the address is not correctly aligned (i.e. is not a valid page start).
pub fn from_start_address(address: VirtAddr) -> Result<Self, ()> {
if !address.is_aligned(S::SIZE) {
return Err(());
}
Ok(Page::containing_address(address))
}
/// Returns the page that contains the given virtual address.
pub fn containing_address(address: VirtAddr) -> Self {
Page {
start_address: address.align_down(S::SIZE),
size: PhantomData,
}
}
/// Returns the start address of the page.
pub fn start_address(&self) -> VirtAddr {
self.start_address
}
/// Returns the size the page (4KB, 2MB or 1GB).
pub const fn size(&self) -> u64 {
S::SIZE
}
/// Returns the level 4 page table index of this page.
pub fn va_range_bits(&self) -> u16 {
self.start_address().va_range_bits()
}
/// Returns the level 4 page table index of this page.
pub fn p4_index(&self) -> u9 {
self.start_address().p4_index()
}
/// Returns the level 3 page table index of this page.
pub fn p3_index(&self) -> u9 {
self.start_address().p3_index()
}
/// Returns a range of pages, exclusive `end`.
pub fn range(start: Self, end: Self) -> PageRange<S> {
PageRange { start, end }
}
/// Returns a range of pages, inclusive `end`.
pub fn range_inclusive(start: Self, end: Self) -> PageRangeInclusive<S> {
PageRangeInclusive { start, end }
}
pub fn of_addr(address: usize) -> Self {
Self::containing_address(VirtAddr::new(address as u64))
}
pub fn range_of(begin: usize, end: usize) -> PageRange<S> {
Self::range(Page::of_addr(begin), Page::of_addr(end - 1) + 1)
}
}
impl<S: NotGiantPageSize> Page<S> {
/// Returns the level 2 page table index of this page.
pub fn p2_index(&self) -> u9 {
self.start_address().p2_index()
}
}
impl Page<Size1GiB> {
/// Returns the 1GiB memory page with the specified page table indices.
pub fn from_page_table_indices_1gib(p4_index: u9, p3_index: u9) -> Self {
use bit_field::BitField;
let mut addr = 0;
addr.set_bits(39..48, u64::from(p4_index));
addr.set_bits(30..39, u64::from(p3_index));
Page::containing_address(VirtAddr::new(addr))
}
}
impl Page<Size2MiB> {
/// Returns the 2MiB memory page with the specified page table indices.
pub fn from_page_table_indices_2mib(p4_index: u9, p3_index: u9, p2_index: u9) -> Self {
use bit_field::BitField;
let mut addr = 0;
addr.set_bits(39..48, u64::from(p4_index));
addr.set_bits(30..39, u64::from(p3_index));
addr.set_bits(21..30, u64::from(p2_index));
Page::containing_address(VirtAddr::new(addr))
}
}
impl Page<Size4KiB> {
/// Returns the 4KiB memory page with the specified page table indices.
pub fn from_page_table_indices(p4_index: u9, p3_index: u9, p2_index: u9, p1_index: u9) -> Self {
use bit_field::BitField;
let mut addr = 0;
addr.set_bits(39..48, u64::from(p4_index));
addr.set_bits(30..39, u64::from(p3_index));
addr.set_bits(21..30, u64::from(p2_index));
addr.set_bits(12..21, u64::from(p1_index));
Page::containing_address(VirtAddr::new(addr))
}
/// Returns the level 1 page table index of this page.
pub fn p1_index(&self) -> u9 {
self.start_address().p1_index()
}
}
impl<S: PageSize> fmt::Debug for Page<S> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_fmt(format_args!(
"Page[{}]({:#x})",
S::SIZE_AS_DEBUG_STR,
self.start_address().as_u64()
))
}
}
impl<S: PageSize> Add<u64> for Page<S> {
type Output = Self;
fn add(self, rhs: u64) -> Self::Output {
Page::containing_address(self.start_address() + rhs * u64::from(S::SIZE))
}
}
impl<S: PageSize> AddAssign<u64> for Page<S> {
fn add_assign(&mut self, rhs: u64) {
*self = self.clone() + rhs;
}
}
impl<S: PageSize> Sub<u64> for Page<S> {
type Output = Self;
fn sub(self, rhs: u64) -> Self::Output {
Page::containing_address(self.start_address() - rhs * u64::from(S::SIZE))
}
}
impl<S: PageSize> SubAssign<u64> for Page<S> {
fn sub_assign(&mut self, rhs: u64) {
*self = self.clone() - rhs;
}
}
impl<S: PageSize> Sub<Self> for Page<S> {
type Output = u64;
fn sub(self, rhs: Self) -> Self::Output {
(self.start_address - rhs.start_address) / S::SIZE
}
}
/// A range of pages with exclusive upper bound.
#[derive(Clone, Copy, PartialEq, Eq)]
#[repr(C)]
pub struct PageRange<S: PageSize = Size4KiB> {
/// The start of the range, inclusive.
pub start: Page<S>,
/// The end of the range, exclusive.
pub end: Page<S>,
}
impl<S: PageSize> PageRange<S> {
/// Returns wether this range contains no pages.
pub fn is_empty(&self) -> bool {
!(self.start < self.end)
}
}
impl<S: PageSize> Iterator for PageRange<S> {
type Item = Page<S>;
fn next(&mut self) -> Option<Self::Item> {
if self.start < self.end {
let page = self.start.clone();
self.start += 1;
Some(page)
} else {
None
}
}
}
impl PageRange<Size2MiB> {
/// Converts the range of 2MiB pages to a range of 4KiB pages.
pub fn as_4kib_page_range(self) -> PageRange<Size4KiB> {
PageRange {
start: Page::containing_address(self.start.start_address()),
end: Page::containing_address(self.end.start_address()),
}
}
}
impl<S: PageSize> fmt::Debug for PageRange<S> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("PageRange")
.field("start", &self.start)
.field("end", &self.end)
.finish()
}
}
/// A range of pages with inclusive upper bound.
#[derive(Clone, Copy, PartialEq, Eq)]
#[repr(C)]
pub struct PageRangeInclusive<S: PageSize = Size4KiB> {
/// The start of the range, inclusive.
pub start: Page<S>,
/// The end of the range, inclusive.
pub end: Page<S>,
}
impl<S: PageSize> PageRangeInclusive<S> {
/// Returns wether this range contains no pages.
pub fn is_empty(&self) -> bool {
!(self.start <= self.end)
}
}
impl<S: PageSize> Iterator for PageRangeInclusive<S> {
type Item = Page<S>;
fn next(&mut self) -> Option<Self::Item> {
if self.start <= self.end {
let page = self.start.clone();
self.start += 1;
Some(page)
} else {
None
}
}
}
impl<S: PageSize> fmt::Debug for PageRangeInclusive<S> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("PageRangeInclusive")
.field("start", &self.start)
.field("end", &self.end)
.finish()
}
}
/// A physical memory frame.
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
#[repr(C)]
pub struct PhysFrame<S: PageSize = Size4KiB> {
start_address: PhysAddr,
size: PhantomData<S>,
}
impl<S: PageSize> PhysFrame<S> {
/// Returns the frame that starts at the given virtual address.
///
/// Returns an error if the address is not correctly aligned (i.e. is not a valid frame start).
pub fn from_start_address(address: PhysAddr) -> Result<Self, ()> {
if !address.is_aligned(S::SIZE) {
return Err(());
}
Ok(PhysFrame::containing_address(address))
}
/// Returns the frame that contains the given physical address.
pub fn containing_address(address: PhysAddr) -> Self {
PhysFrame {
start_address: address.align_down(S::SIZE),
size: PhantomData,
}
}
/// Returns the start address of the frame.
pub fn start_address(&self) -> PhysAddr {
self.start_address
}
/// Returns the size the frame (4KB, 2MB or 1GB).
pub fn size(&self) -> u64 {
S::SIZE
}
/// Returns a range of frames, exclusive `end`.
pub fn range(start: PhysFrame<S>, end: PhysFrame<S>) -> PhysFrameRange<S> {
PhysFrameRange { start, end }
}
/// Returns a range of frames, inclusive `end`.
pub fn range_inclusive(start: PhysFrame<S>, end: PhysFrame<S>) -> PhysFrameRangeInclusive<S> {
PhysFrameRangeInclusive { start, end }
}
pub fn of_addr(address: usize) -> Self {
Self::containing_address(PhysAddr::new(address as u64))
}
pub fn range_of(begin: usize, end: usize) -> PhysFrameRange<S> {
Self::range(Self::of_addr(begin), Self::of_addr(end - 1) + 1)
}
}
impl<S: PageSize> fmt::Debug for PhysFrame<S> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_fmt(format_args!(
"PhysFrame[{}]({:#x})",
S::SIZE_AS_DEBUG_STR,
self.start_address().as_u64()
))
}
}
impl<S: PageSize> Add<u64> for PhysFrame<S> {
type Output = Self;
fn add(self, rhs: u64) -> Self::Output {
PhysFrame::containing_address(self.start_address() + rhs * u64::from(S::SIZE))
}
}
impl<S: PageSize> AddAssign<u64> for PhysFrame<S> {
fn add_assign(&mut self, rhs: u64) {
*self = self.clone() + rhs;
}
}
impl<S: PageSize> Sub<u64> for PhysFrame<S> {
type Output = Self;
fn sub(self, rhs: u64) -> Self::Output {
PhysFrame::containing_address(self.start_address() - rhs * u64::from(S::SIZE))
}
}
impl<S: PageSize> SubAssign<u64> for PhysFrame<S> {
fn sub_assign(&mut self, rhs: u64) {
*self = self.clone() - rhs;
}
}
impl<S: PageSize> Sub<PhysFrame<S>> for PhysFrame<S> {
type Output = u64;
fn sub(self, rhs: PhysFrame<S>) -> Self::Output {
(self.start_address - rhs.start_address) / S::SIZE
}
}
/// An range of physical memory frames, exclusive the upper bound.
#[derive(Clone, Copy, PartialEq, Eq)]
#[repr(C)]
pub struct PhysFrameRange<S: PageSize = Size4KiB> {
/// The start of the range, inclusive.
pub start: PhysFrame<S>,
/// The end of the range, exclusive.
pub end: PhysFrame<S>,
}
impl<S: PageSize> PhysFrameRange<S> {
/// Returns whether the range contains no frames.
pub fn is_empty(&self) -> bool {
!(self.start < self.end)
}
}
impl<S: PageSize> Iterator for PhysFrameRange<S> {
type Item = PhysFrame<S>;
fn next(&mut self) -> Option<Self::Item> {
if self.start < self.end {
let frame = self.start.clone();
self.start += 1;
Some(frame)
} else {
None
}
}
}
impl From<os_bootinfo::FrameRange> for PhysFrameRange {
fn from(range: os_bootinfo::FrameRange) -> Self {
PhysFrameRange {
start: PhysFrame::from_start_address(PhysAddr::new(range.start_addr())).unwrap(),
end: PhysFrame::from_start_address(PhysAddr::new(range.end_addr())).unwrap(),
}
}
}
impl Into<os_bootinfo::FrameRange> for PhysFrameRange {
fn into(self) -> os_bootinfo::FrameRange {
os_bootinfo::FrameRange::new(
self.start.start_address().as_u64(),
self.end.start_address().as_u64(),
)
}
}
impl<S: PageSize> fmt::Debug for PhysFrameRange<S> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("PhysFrameRange")
.field("start", &self.start)
.field("end", &self.end)
.finish()
}
}
/// An range of physical memory frames, inclusive the upper bound.
#[derive(Clone, Copy, PartialEq, Eq)]
#[repr(C)]
pub struct PhysFrameRangeInclusive<S: PageSize = Size4KiB> {
/// The start of the range, inclusive.
pub start: PhysFrame<S>,
/// The start of the range, exclusive.
pub end: PhysFrame<S>,
}
impl<S: PageSize> PhysFrameRangeInclusive<S> {
/// Returns whether the range contains no frames.
pub fn is_empty(&self) -> bool {
!(self.start <= self.end)
}
}
impl<S: PageSize> Iterator for PhysFrameRangeInclusive<S> {
type Item = PhysFrame<S>;
fn next(&mut self) -> Option<Self::Item> {
if self.start <= self.end {
let frame = self.start.clone();
self.start += 1;
Some(frame)
} else {
None
}
}
}
impl<S: PageSize> fmt::Debug for PhysFrameRangeInclusive<S> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("PhysFrameRangeInclusive")
.field("start", &self.start)
.field("end", &self.end)
.finish()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
pub fn test_page_ranges() {
let page_size = Size4KiB::SIZE;
let number = 1000;
let start_addr = VirtAddr::new(0xdeadbeaf);
let start: Page = Page::containing_address(start_addr);
let end = start.clone() + number;
let mut range = Page::range(start.clone(), end.clone());
for i in 0..number {
assert_eq!(
range.next(),
Some(Page::containing_address(start_addr + page_size * i))
);
}
assert_eq!(range.next(), None);
let mut range_inclusive = Page::range_inclusive(start, end);
for i in 0..=number {
assert_eq!(
range_inclusive.next(),
Some(Page::containing_address(start_addr + page_size * i))
);
}
assert_eq!(range_inclusive.next(), None);
}
}

@ -1,267 +0,0 @@
use core::fmt;
use core::ops::{Index, IndexMut};
use super::{PhysFrame, PageSize};
use addr::PhysAddr;
use usize_conversions::usize_from;
use ux::*;
use register::FieldValue;
use register::cpu::RegisterReadWrite;
/// Memory attribute fields mask
const MEMORY_ATTR_MASK: u64 = (MEMORY_ATTRIBUTE::SH.mask << MEMORY_ATTRIBUTE::SH.shift)
| (MEMORY_ATTRIBUTE::AttrIndx.mask << MEMORY_ATTRIBUTE::AttrIndx.shift);
/// Output address mask
const ADDR_MASK: u64 = 0x0000_ffff_ffff_f000;
/// Other flags mask
const FLAGS_MASK: u64 = !(MEMORY_ATTR_MASK | ADDR_MASK);
/// Memory attribute fields
pub type PageTableAttribute = FieldValue<u64, MEMORY_ATTRIBUTE::Register>;
/// The error returned by the `PageTableEntry::frame` method.
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum FrameError {
/// The entry does not have the `PRESENT` flag set, so it isn't currently mapped to a frame.
FrameNotPresent,
/// The entry does have the `HUGE_PAGE` flag set. The `frame` method has a standard 4KiB frame
/// as return type, so a huge frame can't be returned.
HugeFrame,
}
/// A 64-bit page table entry.
#[derive(Clone)]
#[repr(transparent)]
pub struct PageTableEntry {
pub entry: u64,
}
impl RegisterReadWrite<u64, MEMORY_ATTRIBUTE::Register> for PageTableEntry {
#[inline]
fn get(&self) -> u64 {
self.entry
}
#[inline]
fn set(&self, value: u64) {
unsafe { *(&self.entry as *const u64 as *mut u64) = value }
}
}
impl PageTableEntry {
/// Returns whether this entry is zero.
#[inline]
pub fn is_unused(&self) -> bool {
self.entry == 0
}
/// Sets this entry to zero.
#[inline]
pub fn set_unused(&mut self) {
self.entry = 0;
}
/// Returns the flags of this entry.
#[inline]
pub fn flags(&self) -> PageTableFlags {
PageTableFlags::from_bits_truncate(self.entry)
}
/// Returns the physical address mapped by this entry, might be zero.
#[inline]
pub fn addr(&self) -> PhysAddr {
PhysAddr::new(self.entry & ADDR_MASK)
}
/// Returns the memory attribute fields of this entry.
#[inline]
pub fn attr(&self) -> PageTableAttribute {
PageTableAttribute::new(MEMORY_ATTR_MASK, 0, self.entry & MEMORY_ATTR_MASK)
}
/// Returns the physical frame mapped by this entry.
///
/// Returns the following errors:
///
/// - `FrameError::FrameNotPresent` if the entry doesn't have the `PRESENT` flag set.
/// - `FrameError::HugeFrame` if the entry has the `HUGE_PAGE` flag set (for huge pages the
/// `addr` function must be used)
pub fn frame(&self) -> Result<PhysFrame, FrameError> {
if !self.flags().contains(PageTableFlags::VALID) {
Err(FrameError::FrameNotPresent)
} else if !self.flags().contains(PageTableFlags::TABLE_OR_PAGE) {
// is a huge page (block)
Err(FrameError::HugeFrame)
} else {
Ok(PhysFrame::containing_address(self.addr()))
}
}
/// Map the entry to the specified physical frame with the specified flags and memory attribute.
pub fn set_frame(&mut self, frame: PhysFrame, flags: PageTableFlags, attr: PageTableAttribute) {
// is not a block
assert!(flags.contains(PageTableFlags::TABLE_OR_PAGE));
self.set(frame.start_address().as_u64() | flags.bits() | attr.value);
}
/// The descriptor gives the base address of a block of memory, and the attributes for that
/// memory region.
pub fn set_block<S: PageSize>(&mut self, addr: PhysAddr, flags: PageTableFlags, attr: PageTableAttribute) {
// is a block
assert!(!flags.contains(PageTableFlags::TABLE_OR_PAGE));
self.set(addr.align_down(S::SIZE).as_u64() | flags.bits() | attr.value);
}
/// Map the entry to the specified physical address with the specified flags.
pub fn modify_addr(&mut self, addr: PhysAddr) {
self.entry = (self.entry & !ADDR_MASK) | addr.as_u64();
}
/// Sets the flags of this entry.
pub fn modify_flags(&mut self, flags: PageTableFlags) {
self.entry = (self.entry & !FLAGS_MASK) | flags.bits();
}
/// Sets the flags of this entry.
pub fn modify_attr(&mut self, attr: PageTableAttribute) {
self.entry = (self.entry & !MEMORY_ATTR_MASK) | attr.value;
}
}
impl fmt::Debug for PageTableEntry {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut f = f.debug_struct("PageTableEntry");
f.field("value", &self.entry);
f.field("addr", &self.addr());
f.field("flags", &self.flags());
f.field("attr", &self.attr().value);
f.finish()
}
}
register_bitfields! {u64,
// Memory attribute fields in the VMSAv8-64 translation table format descriptors (Page 2148~2152)
MEMORY_ATTRIBUTE [
/// Shareability field
SH OFFSET(8) NUMBITS(2) [
NonShareable = 0b00,
OuterShareable = 0b10,
InnerShareable = 0b11
],
/// Memory attributes index into the MAIR_EL1 register
AttrIndx OFFSET(2) NUMBITS(3) []
]
}
bitflags! {
/// Possible flags for a page table entry.
pub struct PageTableFlags: u64 {
/// identifies whether the descriptor is valid
const VALID = 1 << 0;
/// the descriptor type
/// 0, Block
/// 1, Table/Page
const TABLE_OR_PAGE = 1 << 1;
/// Access permission: accessable at EL0
const AP_EL0 = 1 << 6;
/// Access permission: read-only
const AP_RO = 1 << 7;
/// Access flag
const AF = 1 << 10;
/// not global bit
const nG = 1 << 11;
/// Dirty Bit Modifier
const DBM = 1 << 51;
/// A hint bit indicating that the translation table entry is one of a contiguous set or
/// entries
const Contiguous = 1 << 52;
/// Privileged Execute-never
const PXN = 1 << 53;
/// Execute-never/Unprivileged execute-never
const XN = 1 << 54;
/// Software Dirty Bit Modifier
const WRITE = 1 << 51;
/// Software dirty bit
const DIRTY = 1 << 55;
/// Software swapped bit
const SWAPPED = 1 << 56;
/// Software writable shared bit for COW
const WRITABLE_SHARED = 1 << 57;
/// Software readonly shared bit for COW
const READONLY_SHARED = 1 << 58;
/// Privileged Execute-never for table descriptors
const PXNTable = 1 << 59;
/// Execute-never/Unprivileged execute-never for table descriptors
const XNTable = 1 << 60;
}
}
impl Default for PageTableFlags {
#[inline]
fn default() -> Self {
Self::VALID | Self::TABLE_OR_PAGE | Self::AF | Self::WRITE | Self::PXN | Self::XN
}
}
/// The number of entries in a page table.
const ENTRY_COUNT: usize = 512;
/// Represents a page table.
///
/// Always page-sized.
///
/// This struct implements the `Index` and `IndexMut` traits, so the entries can be accessed
/// through index operations. For example, `page_table[15]` returns the 15th page table entry.
#[repr(transparent)]
pub struct PageTable {
entries: [PageTableEntry; ENTRY_COUNT],
}
impl PageTable {
/// Clears all entries.
pub fn zero(&mut self) {
for entry in self.entries.iter_mut() {
entry.set_unused();
}
}
}
impl Index<usize> for PageTable {
type Output = PageTableEntry;
fn index(&self, index: usize) -> &Self::Output {
&self.entries[index]
}
}
impl IndexMut<usize> for PageTable {
fn index_mut(&mut self, index: usize) -> &mut Self::Output {
&mut self.entries[index]
}
}
impl Index<u9> for PageTable {
type Output = PageTableEntry;
fn index(&self, index: u9) -> &Self::Output {
&self.entries[usize_from(u16::from(index))]
}
}
impl IndexMut<u9> for PageTable {
fn index_mut(&mut self, index: u9) -> &mut Self::Output {
&mut self.entries[usize_from(u16::from(index))]
}
}
impl fmt::Debug for PageTable {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.entries[..].fmt(f)
}
}

@ -1,425 +0,0 @@
#![cfg(target_arch = "aarch64")]
use paging::{
frame_alloc::FrameAllocator,
page_table::{FrameError, PageTable, PageTableEntry, PageTableFlags},
NotGiantPageSize, Page, PageSize, PhysFrame, Size4KiB,
};
use paging::{page_table::PageTableFlags as Flags, PageTableAttribute, memory_attribute::*};
use asm::{ttbr_el1_read, tlb_invalidate};
use barrier;
use ux::u9;
use addr::{PhysAddr, VirtAddr};
/// This type represents a page whose mapping has changed in the page table.
///
/// The old mapping might be still cached in the translation lookaside buffer (TLB), so it needs
/// to be flushed from the TLB before it's accessed. This type is returned from function that
/// change the mapping of a page to ensure that the TLB flush is not forgotten.
#[derive(Debug)]
#[must_use = "Page Table changes must be flushed or ignored."]
pub struct MapperFlush<S: PageSize>(Page<S>);
impl<S: PageSize> MapperFlush<S> {
/// Create a new flush promise
fn new(page: Page<S>) -> Self {
MapperFlush(page)
}
/// Flush the page from the TLB to ensure that the newest mapping is used.
pub fn flush(self) {
tlb_invalidate(self.0.start_address());
}
/// Don't flush the TLB and silence the “must be used” warning.
pub fn ignore(self) {}
}
/// A trait for common page table operations.
pub trait Mapper<S: PageSize> {
/// Creates a new mapping in the page table.
///
/// This function might need additional physical frames to create new page tables. These
/// frames are allocated from the `allocator` argument. At most three frames are required.
fn map_to<A>(
&mut self,
page: Page<S>,
frame: PhysFrame<S>,
flags: PageTableFlags,
attr: PageTableAttribute,
allocator: &mut A,
) -> Result<MapperFlush<S>, MapToError>
where
A: FrameAllocator<Size4KiB>;
/// Removes a mapping from the page table and returns the frame that used to be mapped.
///
/// Note that no page tables or pages are deallocated.
fn unmap(&mut self, page: Page<S>) -> Result<(PhysFrame<S>, MapperFlush<S>), UnmapError>;
/// Updates the flags of an existing mapping.
fn update_flags(
&mut self,
page: Page<S>,
flags: PageTableFlags,
) -> Result<MapperFlush<S>, FlagUpdateError>;
/// Return the frame that the specified page is mapped to.
fn translate_page(&self, page: Page<S>) -> Option<PhysFrame<S>>;
/// Maps the given frame to the virtual page with the same address.
fn identity_map<A>(
&mut self,
frame: PhysFrame<S>,
flags: PageTableFlags,
attr: PageTableAttribute,
allocator: &mut A,
) -> Result<MapperFlush<S>, MapToError>
where
A: FrameAllocator<Size4KiB>,
S: PageSize,
Self: Mapper<S>,
{
let page = Page::containing_address(VirtAddr::new(frame.start_address().as_u64()));
self.map_to(page, frame, flags, attr, allocator)
}
}
/// A recursive page table is a last level page table with an entry mapped to the table itself.
///
/// This recursive mapping allows accessing all page tables in the hierarchy:
///
/// - To access the level 4 page table, we “loop“ (i.e. follow the recursively mapped entry) four
/// times.
/// - To access a level 3 page table, we “loop” three times and then use the level 4 index.
/// - To access a level 2 page table, we “loop” two times, then use the level 4 index, then the
/// level 3 index.
/// - To access a level 1 page table, we “loop” once, then use the level 4 index, then the
/// level 3 index, then the level 2 index.
///
/// This struct implements the `Mapper` trait.
#[derive(Debug)]
pub struct RecursivePageTable<'a> {
p4: &'a mut PageTable,
recursive_index: u9,
}
/// An error indicating that the given page table is not recursively mapped.
///
/// Returned from `RecursivePageTable::new`.
#[derive(Debug)]
pub struct NotRecursivelyMapped;
/// This error is returned from `map_to` and similar methods.
#[derive(Debug)]
pub enum MapToError {
/// An additional frame was needed for the mapping process, but the frame allocator
/// returned `None`.
FrameAllocationFailed,
/// An upper level page table entry has the `HUGE_PAGE` flag set, which means that the
/// given page is part of an already mapped huge page.
ParentEntryHugePage,
/// The given page is already mapped to a physical frame.
PageAlreadyMapped,
}
/// An error indicating that an `unmap` call failed.
#[derive(Debug)]
pub enum UnmapError {
/// An upper level page table entry has the `HUGE_PAGE` flag set, which means that the
/// given page is part of a huge page and can't be freed individually.
ParentEntryHugePage,
/// The given page is not mapped to a physical frame.
PageNotMapped,
/// The page table entry for the given page points to an invalid physical address.
InvalidFrameAddress(PhysAddr),
}
/// An error indicating that an `update_flags` call failed.
#[derive(Debug)]
pub enum FlagUpdateError {
/// The given page is not mapped to a physical frame.
PageNotMapped,
}
impl<'a> RecursivePageTable<'a> {
/// Creates a new RecursivePageTable from the passed level 4 PageTable.
///
/// The page table must be recursively mapped, that means:
///
/// - The page table must have one recursive entry, i.e. an entry that points to the table
/// itself.
/// - The reference must use that “loop”, i.e. be of the form `0o_xxx_xxx_xxx_xxx_0000`
/// where `xxx` is the recursive entry.
/// - The page table must be active, i.e. the CR3 register must contain its physical address.
///
/// Otherwise `Err(NotRecursivelyMapped)` is returned.
pub fn new(table: &'a mut PageTable) -> Result<Self, NotRecursivelyMapped> {
let page = Page::containing_address(VirtAddr::new(table as *const _ as u64));
let recursive_index = page.p4_index();
if page.p3_index() != recursive_index
|| page.p2_index() != recursive_index
|| page.p1_index() != recursive_index
{
return Err(NotRecursivelyMapped);
}
if Ok(ttbr_el1_read(page.start_address().va_range().unwrap() as u8)) !=
table[recursive_index].frame()
{
return Err(NotRecursivelyMapped);
}
Ok(RecursivePageTable {
p4: table,
recursive_index,
})
}
/// Creates a new RecursivePageTable without performing any checks.
///
/// The `recursive_index` parameter must be the index of the recursively mapped entry.
pub unsafe fn new_unchecked(table: &'a mut PageTable, recursive_index: u9) -> Self {
RecursivePageTable {
p4: table,
recursive_index,
}
}
/// Internal helper function to create the page table of the next level if needed.
///
/// If the passed entry is unused, a new frame is allocated from the given allocator, zeroed,
/// and the entry is updated to that address. If the passed entry is already mapped, the next
/// table is returned directly.
///
/// The `next_page_table` page must be the page of the next page table in the hierarchy.
///
/// Returns `MapToError::FrameAllocationFailed` if the entry is unused and the allocator
/// returned `None`. Returns `MapToError::ParentEntryHugePage` if the `HUGE_PAGE` flag is set
/// in the passed entry.
unsafe fn create_next_table<'b, A>(
entry: &'b mut PageTableEntry,
next_table_page: Page,
allocator: &mut A,
) -> Result<&'b mut PageTable, MapToError>
where
A: FrameAllocator<Size4KiB>,
{
/// This inner function is used to limit the scope of `unsafe`.
///
/// This is a safe function, so we need to use `unsafe` blocks when we do something unsafe.
fn inner<'b, A>(
entry: &'b mut PageTableEntry,
next_table_page: Page,
allocator: &mut A,
) -> Result<&'b mut PageTable, MapToError>
where
A: FrameAllocator<Size4KiB>,
{
let created;
if entry.is_unused() {
if let Some(frame) = allocator.alloc() {
entry.set_frame(frame, Flags::default(), MairNormal::attr_value());
created = true;
} else {
return Err(MapToError::FrameAllocationFailed);
}
} else {
created = false;
}
// is a huge page (block)
if !entry.flags().contains(Flags::TABLE_OR_PAGE) {
return Err(MapToError::ParentEntryHugePage);
}
let page_table_ptr = next_table_page.start_address().as_mut_ptr();
let page_table: &mut PageTable = unsafe { &mut *(page_table_ptr) };
if created {
unsafe { barrier::dsb(barrier::ISHST); }
page_table.zero();
}
Ok(page_table)
}
inner(entry, next_table_page, allocator)
}
pub fn p3_ptr<S: PageSize>(&self, page: Page<S>) -> *mut PageTable {
self.p3_page(page).start_address().as_mut_ptr()
}
pub fn p2_ptr<S: NotGiantPageSize>(&self, page: Page<S>) -> *mut PageTable {
self.p2_page(page).start_address().as_mut_ptr()
}
pub fn p1_ptr(&self, page: Page<Size4KiB>) -> *mut PageTable {
self.p1_page(page).start_address().as_mut_ptr()
}
fn p3_page<S: PageSize>(&self, page: Page<S>) -> Page {
Page::from_page_table_indices(
self.recursive_index,
self.recursive_index,
self.recursive_index,
page.p4_index(),
)
}
fn p2_page<S: NotGiantPageSize>(&self, page: Page<S>) -> Page {
Page::from_page_table_indices(
self.recursive_index,
self.recursive_index,
page.p4_index(),
page.p3_index(),
)
}
fn p1_page(&self, page: Page<Size4KiB>) -> Page {
Page::from_page_table_indices(
self.recursive_index,
page.p4_index(),
page.p3_index(),
page.p2_index(),
)
}
}
impl<'a> Mapper<Size4KiB> for RecursivePageTable<'a> {
fn map_to<A>(
&mut self,
page: Page<Size4KiB>,
frame: PhysFrame<Size4KiB>,
flags: PageTableFlags,
attr: PageTableAttribute,
allocator: &mut A,
) -> Result<MapperFlush<Size4KiB>, MapToError>
where
A: FrameAllocator<Size4KiB>,
{
let self_mut = unsafe { &mut *(self as *const _ as *mut Self) };
let p4 = &mut self_mut.p4;
let p3_page = self.p3_page(page);
let p3 = unsafe { Self::create_next_table(&mut p4[page.p4_index()], p3_page, allocator)? };
let p2_page = self.p2_page(page);
let p2 = unsafe { Self::create_next_table(&mut p3[page.p3_index()], p2_page, allocator)? };
let p1_page = self.p1_page(page);
let p1 = unsafe { Self::create_next_table(&mut p2[page.p2_index()], p1_page, allocator)? };
if !p1[page.p1_index()].is_unused() {
return Err(MapToError::PageAlreadyMapped);
}
p1[page.p1_index()].set_frame(frame, flags, attr);
Ok(MapperFlush::new(page))
}
fn unmap(
&mut self,
page: Page<Size4KiB>,
) -> Result<(PhysFrame<Size4KiB>, MapperFlush<Size4KiB>), UnmapError> {
let self_mut = unsafe { &mut *(self as *const _ as *mut Self) };
let p4 = &mut self_mut.p4;
let p4_entry = &p4[page.p4_index()];
p4_entry.frame().map_err(|err| match err {
FrameError::FrameNotPresent => UnmapError::PageNotMapped,
FrameError::HugeFrame => UnmapError::ParentEntryHugePage,
})?;
let p3 = unsafe { &mut *(self.p3_ptr(page)) };
let p3_entry = &p3[page.p3_index()];
p3_entry.frame().map_err(|err| match err {
FrameError::FrameNotPresent => UnmapError::PageNotMapped,
FrameError::HugeFrame => UnmapError::ParentEntryHugePage,
})?;
let p2 = unsafe { &mut *(self.p2_ptr(page)) };
let p2_entry = &p2[page.p2_index()];
p2_entry.frame().map_err(|err| match err {
FrameError::FrameNotPresent => UnmapError::PageNotMapped,
FrameError::HugeFrame => UnmapError::ParentEntryHugePage,
})?;
let p1 = unsafe { &mut *(self.p1_ptr(page)) };
let p1_entry = &mut p1[page.p1_index()];
let frame = p1_entry.frame().map_err(|err| match err {
FrameError::FrameNotPresent => UnmapError::PageNotMapped,
FrameError::HugeFrame => UnmapError::ParentEntryHugePage,
})?;
p1_entry.set_unused();
Ok((frame, MapperFlush::new(page)))
}
fn update_flags(
&mut self,
page: Page<Size4KiB>,
flags: PageTableFlags,
) -> Result<MapperFlush<Size4KiB>, FlagUpdateError> {
let self_mut = unsafe { &mut *(self as *const _ as *mut Self) };
let p4 = &mut self_mut.p4;
if p4[page.p4_index()].is_unused() {
return Err(FlagUpdateError::PageNotMapped);
}
let p3 = unsafe { &mut *(self.p3_ptr(page)) };
if p3[page.p3_index()].is_unused() {
return Err(FlagUpdateError::PageNotMapped);
}
let p2 = unsafe { &mut *(self.p2_ptr(page)) };
if p2[page.p2_index()].is_unused() {
return Err(FlagUpdateError::PageNotMapped);
}
let p1 = unsafe { &mut *(self.p1_ptr(page)) };
if p1[page.p1_index()].is_unused() {
return Err(FlagUpdateError::PageNotMapped);
}
p1[page.p1_index()].modify_flags(flags);
Ok(MapperFlush::new(page))
}
fn translate_page(&self, page: Page<Size4KiB>) -> Option<PhysFrame<Size4KiB>> {
let self_mut = unsafe { &mut *(self as *const _ as *mut Self) };
let p4 = &mut self_mut.p4;
if p4[page.p4_index()].is_unused() {
return None;
}
let p3 = unsafe { &*(self.p3_ptr(page)) };
let p3_entry = &p3[page.p3_index()];
if p3_entry.is_unused() {
return None;
}
let p2 = unsafe { &*(self.p2_ptr(page)) };
let p2_entry = &p2[page.p2_index()];
if p2_entry.is_unused() {
return None;
}
let p1 = unsafe { &*(self.p1_ptr(page)) };
let p1_entry = &p1[page.p1_index()];
if p1_entry.is_unused() {
return None;
}
PhysFrame::from_start_address(p1_entry.addr()).ok()
}
}

@ -1,31 +0,0 @@
/*
* Copyright (c) 2018 by the author(s)
*
* =============================================================================
*
* Licensed under either of
* - Apache License, Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
* - MIT License (http://opensource.org/licenses/MIT)
* at your option.
*
* =============================================================================
*
* Author(s):
* - Andre Richter <andre.o.richter@gmail.com>
*/
//! Counter-timer Frequency register - EL0
//!
//! This register is provided so that software can discover the frequency of the
//! system counter. It must be programmed with this value as part of system
//! initialization. The value of the register is not interpreted by hardware.
use register::cpu::RegisterReadOnly;
pub struct Reg;
impl RegisterReadOnly<u32, ()> for Reg {
sys_coproc_read_raw!(u32, "CNTFRQ_EL0");
}
pub static CNTFRQ_EL0: Reg = Reg {};

@ -1,75 +0,0 @@
/*
* Copyright (c) 2018 by the author(s)
*
* =============================================================================
*
* Licensed under either of
* - Apache License, Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
* - MIT License (http://opensource.org/licenses/MIT)
* at your option.
*
* =============================================================================
*
* Author(s):
* - Andre Richter <andre.o.richter@gmail.com>
*/
//! Counter-timer Hypervisor Control register - EL2
//!
//! Controls the generation of an event stream from the physical counter, and
//! access from Non-secure EL1 to the physical counter and the Non-secure EL1
//! physical timer.
use register::cpu::RegisterReadWrite;
// When HCR_EL2.E2H == 0:
// TODO: Figure out how we can differentiate depending on HCR_EL2.E2H state
//
// For now, implement the HCR_EL2.E2H == 0 version
register_bitfields! {u32,
CNTHCTL_EL2 [
/// Traps Non-secure EL0 and EL1 accesses to the physical timer
/// registers to EL2.
///
/// 0 From AArch64 state: Non-secure EL0 and EL1 accesses to the
/// CNTP_CTL_EL0, CNTP_CVAL_EL0, and CNTP_TVAL_EL0 are trapped to EL2,
/// unless it is trapped by CNTKCTL_EL1.EL0PTEN.
///
/// From AArch32 state: Non-secure EL0 and EL1 accesses to the
/// CNTP_CTL, CNTP_CVAL, and CNTP_TVAL are trapped to EL2, unless it
/// is trapped by CNTKCTL_EL1.EL0PTEN or CNTKCTL.PL0PTEN.
///
/// 1 This control does not cause any instructions to be trapped.
///
/// If EL3 is implemented and EL2 is not implemented, behavior is as if
/// this bit is 1 other than for the purpose of a direct read.
EL1PCEN OFFSET(1) NUMBITS(1) [],
/// Traps Non-secure EL0 and EL1 accesses to the physical counter
/// register to EL2.
///
/// 0 From AArch64 state: Non-secure EL0 and EL1 accesses to the
/// CNTPCT_EL0 are trapped to EL2, unless it is trapped by
/// CNTKCTL_EL1.EL0PCTEN.
///
/// From AArch32 state: Non-secure EL0 and EL1 accesses to the CNTPCT
/// are trapped to EL2, unless it is trapped by CNTKCTL_EL1.EL0PCTEN
/// or CNTKCTL.PL0PCTEN.
///
/// 1 This control does not cause any instructions to be trapped.
///
/// If EL3 is implemented and EL2 is not implemented, behavior is as if
/// this bit is 1 other than for the purpose of a direct read.
EL1PCTEN OFFSET(0) NUMBITS(1) []
]
}
pub struct Reg;
impl RegisterReadWrite<u32, CNTHCTL_EL2::Register> for Reg {
sys_coproc_read_raw!(u32, "CNTHCTL_EL2");
sys_coproc_write_raw!(u32, "CNTHCTL_EL2");
}
#[allow(non_upper_case_globals)]
pub static CNTHCTL_EL2: Reg = Reg {};

@ -1,62 +0,0 @@
/*
* Copyright (c) 2018 by the author(s)
*
* =============================================================================
*
* Licensed under either of
* - Apache License, Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
* - MIT License (http://opensource.org/licenses/MIT)
* at your option.
*
* =============================================================================
*
* Author(s):
* - Andre Richter <andre.o.richter@gmail.com>
*/
//! Counter-timer Physical Timer Control register - EL0
//!
//! Control register for the EL1 physical timer.
use register::cpu::RegisterReadWrite;
register_bitfields! {u32,
CNTP_CTL_EL0 [
/// The status of the timer. This bit indicates whether the timer
/// condition is met:
///
/// 0 Timer condition is not met.
/// 1 Timer condition is met.
///
/// When the value of the ENABLE bit is 1, ISTATUS indicates whether the
/// timer condition is met. ISTATUS takes no account of the value of the
/// IMASK bit. If the value of ISTATUS is 1 and the value of IMASK is 0
/// then the timer interrupt is asserted.
///
/// When the value of the ENABLE bit is 0, the ISTATUS field is UNKNOWN.
///
/// This bit is read-only.
ISTATUS OFFSET(2) NUMBITS(1) [],
/// Timer interrupt mask bit. Permitted values are:
///
/// 0 Timer interrupt is not masked by the IMASK bit.
/// 1 Timer interrupt is masked by the IMASK bit.
IMASK OFFSET(1) NUMBITS(1) [],
/// Enables the timer. Permitted values are:
///
/// 0 Timer disabled.
/// 1 Timer enabled.
ENABLE OFFSET(0) NUMBITS(1) []
]
}
pub struct Reg;
impl RegisterReadWrite<u32, CNTP_CTL_EL0::Register> for Reg {
sys_coproc_read_raw!(u32, "CNTP_CTL_EL0");
sys_coproc_write_raw!(u32, "CNTP_CTL_EL0");
}
pub static CNTP_CTL_EL0: Reg = Reg {};

@ -1,30 +0,0 @@
/*
* Copyright (c) 2018 by the author(s)
*
* =============================================================================
*
* Licensed under either of
* - Apache License, Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
* - MIT License (http://opensource.org/licenses/MIT)
* at your option.
*
* =============================================================================
*
* Author(s):
* - Andre Richter <andre.o.richter@gmail.com>
*/
//! Counter-timer Physical Timer TimerValue register - EL0
//!
//! Holds the timer value for the EL1 physical timer.
use register::cpu::RegisterReadWrite;
pub struct Reg;
impl RegisterReadWrite<u32, ()> for Reg {
sys_coproc_read_raw!(u32, "CNTP_TVAL_EL0");
sys_coproc_write_raw!(u32, "CNTP_TVAL_EL0");
}
pub static CNTP_TVAL_EL0: Reg = Reg {};

@ -1,29 +0,0 @@
/*
* Copyright (c) 2018 by the author(s)
*
* =============================================================================
*
* Licensed under either of
* - Apache License, Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
* - MIT License (http://opensource.org/licenses/MIT)
* at your option.
*
* =============================================================================
*
* Author(s):
* - Andre Richter <andre.o.richter@gmail.com>
*/
//! Counter-timer Physical Count register - EL0
//!
//! Holds the 64-bit physical count value.
use register::cpu::RegisterReadOnly;
pub struct Reg;
impl RegisterReadOnly<u64, ()> for Reg {
sys_coproc_read_raw!(u64, "CNTPCT_EL0");
}
pub static CNTPCT_EL0: Reg = Reg {};

@ -1,32 +0,0 @@
/*
* Copyright (c) 2018 by the author(s)
*
* =============================================================================
*
* Licensed under either of
* - Apache License, Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
* - MIT License (http://opensource.org/licenses/MIT)
* at your option.
*
* =============================================================================
*
* Author(s):
* - Andre Richter <andre.o.richter@gmail.com>
*/
//! Counter-timer Virtual Offset register - EL2
//!
//! Holds the 64-bit virtual offset. This is the offset between the physical
//! count value visible in CNTPCT_EL0 and the virtual count value visible in
//! CNTVCT_EL0.
use register::cpu::RegisterReadWrite;
pub struct Reg;
impl RegisterReadWrite<u64, ()> for Reg {
sys_coproc_read_raw!(u64, "CNTVOFF_EL2");
sys_coproc_write_raw!(u64, "CNTVOFF_EL2");
}
pub static CNTVOFF_EL2: Reg = Reg {};

@ -1,52 +0,0 @@
/*
* Copyright (c) 2018 by the author(s)
*
* =============================================================================
*
* Licensed under either of
* - Apache License, Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
* - MIT License (http://opensource.org/licenses/MIT)
* at your option.
*
* =============================================================================
*
* Author(s):
* - Andre Richter <andre.o.richter@gmail.com>
*/
//! Current Exception Level
//!
//! Holds the current Exception level.
use register::cpu::RegisterReadOnly;
register_bitfields! {u32,
CurrentEL [
/// Current Exception level. Possible values of this field are:
///
/// 00 EL0
/// 01 EL1
/// 10 EL2
/// 11 EL3
///
/// When the HCR_EL2.NV bit is 1, Non-secure EL1 read accesses to the
/// CurrentEL register return the value of 0x2 in this field.
///
/// This field resets to a value that is architecturally UNKNOWN.
EL OFFSET(2) NUMBITS(2) [
EL0 = 0,
EL1 = 1,
EL2 = 2,
EL3 = 3
]
]
}
pub struct Reg;
impl RegisterReadOnly<u32, CurrentEL::Register> for Reg {
sys_coproc_read_raw!(u32, "CurrentEL");
}
#[allow(non_upper_case_globals)]
pub static CurrentEL: Reg = Reg {};

@ -1,90 +0,0 @@
/*
* Copyright (c) 2018 by the author(s)
*
* =============================================================================
*
* Licensed under either of
* - Apache License, Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
* - MIT License (http://opensource.org/licenses/MIT)
* at your option.
*
* =============================================================================
*
* Author(s):
* - Andre Richter <andre.o.richter@gmail.com>
*/
//! Interrupt Mask Bits
//!
//! Allows access to the interrupt mask bits.
use register::cpu::RegisterReadWrite;
register_bitfields! {u32,
DAIF [
/// Process state D mask. The possible values of this bit are:
///
/// 0 Watchpoint, Breakpoint, and Software Step exceptions targeted at
/// the current Exception level are not masked.
///
/// 1 Watchpoint, Breakpoint, and Software Step exceptions targeted at
/// the current Exception level are masked.
///
/// When the target Exception level of the debug exception is higher
/// than the current Exception level, the exception is not masked by
/// this bit.
///
/// When this register has an architecturally-defined reset value, this
/// field resets to 1.
D OFFSET(9) NUMBITS(1) [
Unmasked = 0,
Masked = 1
],
/// SError interrupt mask bit. The possible values of this bit are:
///
/// 0 Exception not masked.
/// 1 Exception masked.
///
/// When this register has an architecturally-defined reset value, this
/// field resets to 1 .
A OFFSET(8) NUMBITS(1) [
Unmasked = 0,
Masked = 1
],
/// IRQ mask bit. The possible values of this bit are:
///
/// 0 Exception not masked.
/// 1 Exception masked.
///
/// When this register has an architecturally-defined reset value, this
/// field resets to 1 .
I OFFSET(7) NUMBITS(1) [
Unmasked = 0,
Masked = 1
],
/// FIQ mask bit. The possible values of this bit are:
///
/// 0 Exception not masked.
/// 1 Exception masked.
///
/// When this register has an architecturally-defined reset value, this
/// field resets to 1 .
F OFFSET(6) NUMBITS(1) [
Unmasked = 0,
Masked = 1
]
]
}
pub struct Reg;
impl RegisterReadWrite<u32, DAIF::Register> for Reg {
sys_coproc_read_raw!(u32, "DAIF");
sys_coproc_write_raw!(u32, "DAIF");
}
pub static DAIF: Reg = Reg {};

@ -1,30 +0,0 @@
/*
* Copyright (c) 2018 by the author(s)
*
* =============================================================================
*
* Licensed under either of
* - Apache License, Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
* - MIT License (http://opensource.org/licenses/MIT)
* at your option.
*
* =============================================================================
*
* Author(s):
* - Andre Richter <andre.o.richter@gmail.com>
*/
//! Exception Link Register - EL2
//!
//! When taking an exception to EL2, holds the address to return to.
use register::cpu::RegisterReadWrite;
pub struct Reg;
impl RegisterReadWrite<u64, ()> for Reg {
sys_coproc_read_raw!(u64, "ELR_EL2");
sys_coproc_write_raw!(u64, "ELR_EL2");
}
pub static ELR_EL2: Reg = Reg {};

@ -1,31 +0,0 @@
/*
* Copyright (c) 2018 by the author(s)
*
* =============================================================================
*
* Licensed under either of
* - Apache License, Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
* - MIT License (http://opensource.org/licenses/MIT)
* at your option.
*
* =============================================================================
*
* Author(s):
* - Andre Richter <andre.o.richter@gmail.com>
*/
//! Fault Address Register - EL1
//!
//! Holds the faulting Virtual Address for all synchronous Instruction or Data
//! Abort, PC alignment fault and Watchpoint exceptions that are taken to EL1.
use register::cpu::RegisterReadWrite;
pub struct Reg;
impl RegisterReadWrite<u64, ()> for Reg {
sys_coproc_read_raw!(u64, "FAR_EL1");
sys_coproc_write_raw!(u64, "FAR_EL1");
}
pub static FAR_EL1: Reg = Reg {};

@ -1,123 +0,0 @@
/*
* Copyright (c) 2018 by the author(s)
*
* =============================================================================
*
* Licensed under either of
* - Apache License, Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
* - MIT License (http://opensource.org/licenses/MIT)
* at your option.
*
* =============================================================================
*
* Author(s):
* - Andre Richter <andre.o.richter@gmail.com>
*/
//! Hypervisor Configuration Register - EL2
//!
//! Provides configuration controls for virtualization, including defining
//! whether various Non-secure operations are trapped to EL2.
use register::cpu::RegisterReadWrite;
register_bitfields! {u64,
HCR_EL2 [
/// Execution state control for lower Exception levels:
///
/// 0 Lower levels are all AArch32.
/// 1 The Execution state for EL1 is AArch64. The Execution state for
/// EL0 is determined by the current value of PSTATE.nRW when
/// executing at EL0.
///
/// If all lower Exception levels cannot use AArch32 then this bit is
/// RAO/WI.
///
/// In an implementation that includes EL3, when SCR_EL3.NS==0, the PE
/// behaves as if this bit has the same value as the SCR_EL3.RW bit for
/// all purposes other than a direct read or write access of HCR_EL2.
///
/// The RW bit is permitted to be cached in a TLB.
///
/// When ARMv8.1-VHE is implemented, and the value of HCR_EL2.{E2H, TGE}
/// is {1, 1}, this field behaves as 1 for all purposes other than a
/// direct read of the value of this bit.
RW OFFSET(31) NUMBITS(1) [
AllLowerELsAreAarch32 = 0,
EL1IsAarch64 = 1
],
/// Default Cacheability.
///
/// 0 This control has no effect on the Non-secure EL1&0 translation
/// regime.
///
/// 1 In Non-secure state:
/// - When EL1 is using AArch64, the PE behaves as if the value of
/// the SCTLR_EL1.M field is 0 for all purposes other than
/// returning the value of a direct read of SCTLR_EL1.
///
/// - When EL1 is using AArch32, the PE behaves as if the value of
/// the SCTLR.M field is 0 for all purposes other than returning
/// the value of a direct read of SCTLR.
///
/// - The PE behaves as if the value of the HCR_EL2.VM field is 1
/// for all purposes other than returning the value of a direct
/// read of HCR_EL2.
///
/// - The memory type produced by stage 1 of the EL1&0 translation
/// regime is Normal Non-Shareable, Inner Write-Back Read-Allocate
/// Write-Allocate, Outer Write-Back Read-Allocate Write-Allocate.
///
/// This field has no effect on the EL2, EL2&0, and EL3 translation
/// regimes.
///
/// This field is permitted to be cached in a TLB.
///
/// In an implementation that includes EL3, when the value of SCR_EL3.NS
/// is 0 the PE behaves as if this field is 0 for all purposes other
/// than a direct read or write access of HCR_EL2.
///
/// When ARMv8.1-VHE is implemented, and the value of HCR_EL2.{E2H, TGE}
/// is {1, 1}, this field behaves as 0 for all purposes other than a
/// direct read of the value of this field.
DC OFFSET(12) NUMBITS(1) [],
/// Set/Way Invalidation Override. Causes Non-secure EL1 execution of
/// the data cache invalidate by set/way instructions to perform a data
/// cache clean and invalidate by set/way:
///
/// 0 This control has no effect on the operation of data cache
/// invalidate by set/way instructions.
///
/// 1 Data cache invalidate by set/way instructions perform a data cache
/// clean and invalidate by set/way.
///
/// When the value of this bit is 1:
///
/// AArch32: DCISW performs the same invalidation as a DCCISW
/// instruction.
///
/// AArch64: DC ISW performs the same invalidation as a DC CISW
/// instruction.
///
/// This bit can be implemented as RES 1.
///
/// In an implementation that includes EL3, when the value of SCR_EL3.NS
/// is 0 the PE behaves as if this field is 0 for all purposes other
/// than a direct read or write access of HCR_EL2.
///
/// When HCR_EL2.TGE is 1, the PE ignores the value of this field for
/// all purposes other than a direct read of this field.
SWIO OFFSET(1) NUMBITS(1) []
]
}
pub struct Reg;
impl RegisterReadWrite<u64, HCR_EL2::Register> for Reg {
sys_coproc_read_raw!(u64, "HCR_EL2");
sys_coproc_write_raw!(u64, "HCR_EL2");
}
pub static HCR_EL2: Reg = Reg {};

@ -1,82 +0,0 @@
/*
* Copyright (c) 2018 by the author(s)
*
* =============================================================================
*
* Licensed under either of
* - Apache License, Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
* - MIT License (http://opensource.org/licenses/MIT)
* at your option.
*
* =============================================================================
*
* Author(s):
* - Andre Richter <andre.o.richter@gmail.com>
*/
//! AArch64 Memory Model Feature Register 0 - EL1
//!
//! Provides information about the implemented memory model and memory
//! management support in AArch64 state.
use register::cpu::RegisterReadOnly;
register_bitfields! {u64,
ID_AA64MMFR0_EL1 [
/// Support for 4KiB memory translation granule size. Defined values
/// are:
///
/// 0000 4KiB granule supported.
/// 1111 4KiB granule not supported.
///
/// All other values are reserved.
TGran4 OFFSET(28) NUMBITS(4) [
Supported = 0b0000,
NotSupported = 0b1111
],
/// Support for 64KiB memory translation granule size. Defined values
/// are:
///
/// 0000 64KiB granule supported.
/// 1111 64KiB granule not supported.
///
/// All other values are reserved.
TGran64 OFFSET(24) NUMBITS(4) [
Supported = 0b0000,
NotSupported = 0b1111
],
/// Physical Address range supported. Defined values are:
///
/// 0000 32 bits, 4GiB.
/// 0001 36 bits, 64GiB.
/// 0010 40 bits, 1TiB.
/// 0011 42 bits, 4TiB.
/// 0100 44 bits, 16TiB.
/// 0101 48 bits, 256TiB.
/// 0110 52 bits, 4PiB.
///
/// All other values are reserved.
///
/// The value 0110 is permitted only if the implementation includes
/// ARMv8.2-LPA, otherwise it is reserved.
PARange OFFSET(0) NUMBITS(4) [
Bits_32 = 0b0000,
Bits_36 = 0b0001,
Bits_40 = 0b0010,
Bits_42 = 0b0011,
Bits_44 = 0b0100,
Bits_48 = 0b0101,
Bits_52 = 0b0110
]
]
}
pub struct Reg;
impl RegisterReadOnly<u64, ID_AA64MMFR0_EL1::Register> for Reg {
sys_coproc_read_raw!(u64, "ID_AA64MMFR0_EL1");
}
pub static ID_AA64MMFR0_EL1: Reg = Reg {};

@ -1,85 +0,0 @@
/*
* Copyright (c) 2018 by the author(s)
*
* =============================================================================
*
* Licensed under either of
* - Apache License, Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
* - MIT License (http://opensource.org/licenses/MIT)
* at your option.
*
* =============================================================================
*
* Author(s):
* - Andre Richter <andre.o.richter@gmail.com>
*/
macro_rules! __read_raw {
($width:ty, $asm_instr:tt, $asm_reg_name:tt) => {
/// Reads the raw bits of the CPU register.
#[inline]
fn get(&self) -> $width {
match () {
#[cfg(target_arch = "aarch64")]
() => {
let reg;
unsafe {
asm!(concat!($asm_instr, " $0, ", $asm_reg_name) : "=r"(reg) ::: "volatile");
}
reg
}
#[cfg(not(target_arch = "aarch64"))]
() => unimplemented!(),
}
}
};
}
macro_rules! __write_raw {
($width:ty, $asm_instr:tt, $asm_reg_name:tt) => {
/// Writes raw bits to the CPU register.
#[cfg_attr(not(target_arch = "aarch64"), allow(unused_variables))]
#[inline]
fn set(&self, value: $width) {
match () {
#[cfg(target_arch = "aarch64")]
() => {
unsafe {
asm!(concat!($asm_instr, " ", $asm_reg_name, ", $0") :: "r"(value) :: "volatile")
}
}
#[cfg(not(target_arch = "aarch64"))]
() => unimplemented!(),
}
}
};
}
/// Raw read from system coprocessor registers
macro_rules! sys_coproc_read_raw {
($width:ty, $asm_reg_name:tt) => {
__read_raw!($width, "mrs", $asm_reg_name);
};
}
/// Raw write to system coprocessor registers
macro_rules! sys_coproc_write_raw {
($width:ty, $asm_reg_name:tt) => {
__write_raw!($width, "msr", $asm_reg_name);
};
}
/// Raw read from (ordinary) registers
macro_rules! read_raw {
($width:ty, $asm_reg_name:tt) => {
__read_raw!($width, "mov", $asm_reg_name);
};
}
/// Raw write to (ordinary) registers
macro_rules! write_raw {
($width:ty, $asm_reg_name:tt) => {
__write_raw!($width, "mov", $asm_reg_name);
};
}

@ -1,82 +0,0 @@
/*
* Copyright (c) 2018 by the author(s)
*
* =============================================================================
*
* Licensed under either of
* - Apache License, Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
* - MIT License (http://opensource.org/licenses/MIT)
* at your option.
*
* =============================================================================
*
* Author(s):
* - Andre Richter <andre.o.richter@gmail.com>
*/
//! Memory Attribute Indirection Register - EL1
//!
//! Provides the memory attribute encodings corresponding to the possible
//! AttrIndx values in a Long-descriptor format translation table entry for
//! stage 1 translations at EL1.
use register::cpu::RegisterReadWrite;
register_bitfields! {u64,
MAIR_EL1 [
/// Attribute 7
Attr7 OFFSET(56) NUMBITS(8) [],
/// Attribute 6
Attr6 OFFSET(48) NUMBITS(8) [],
/// Attribute 5
Attr5 OFFSET(40) NUMBITS(8) [],
/// Attribute 4
Attr4 OFFSET(32) NUMBITS(8) [],
/// Attribute 3
Attr3 OFFSET(24) NUMBITS(8) [],
/// Attribute 2
Attr2 OFFSET(16) NUMBITS(8) [],
/// Attribute 1
Attr1 OFFSET(8) NUMBITS(8) [],
/// Attribute 0
Attr0 OFFSET(0) NUMBITS(8) []
]
}
register_bitfields! {u64,
MAIR_ATTR [
Attr_HIGH OFFSET(4) NUMBITS(4) [
Device = 0b0000,
Memory_OuterNonCacheable = 0b0100,
Memory_OuterWriteThrough_NonTransient_ReadAlloc_WriteAlloc = 0b1011,
Memory_OuterWriteBack_NonTransient_ReadAlloc_WriteAlloc = 0b1111
],
Attr_LOW_DEVICE OFFSET(0) NUMBITS(4) [
Device_nGnRnE = 0b0000,
Device_nGnRE = 0b0100,
Device_nGRE = 0b1000,
Device_GRE = 0b1100
],
Attr_LOW_MEMORY OFFSET(0) NUMBITS(4) [
InnerNonCacheable = 0b0100,
InnerWriteThrough_NonTransient_ReadAlloc_WriteAlloc = 0b1011,
InnerWriteBack_NonTransient_ReadAlloc_WriteAlloc = 0b1111
]
]
}
pub struct Reg;
impl RegisterReadWrite<u64, MAIR_EL1::Register> for Reg {
sys_coproc_read_raw!(u64, "MAIR_EL1");
sys_coproc_write_raw!(u64, "MAIR_EL1");
}
pub static MAIR_EL1: Reg = Reg {};

@ -1,55 +0,0 @@
//! Processor core registers
#[macro_use]
mod macros;
mod cntfrq_el0;
mod cnthctl_el2;
mod cntp_ctl_el0;
mod cntp_tval_el0;
mod cntpct_el0;
mod cntvoff_el2;
mod currentel;
mod daif;
mod elr_el2;
mod far_el1;
mod hcr_el2;
mod id_aa64mmfr0_el1;
mod mair_el1;
mod mpidr_el1;
mod sctlr_el1;
mod sp;
mod sp_el0;
mod sp_el1;
mod spsel;
mod spsr_el2;
mod tcr_el1;
mod ttbr0_el1;
mod ttbr1_el1;
// Export only the R/W traits and the static reg definitions
pub use register::cpu::*;
pub use self::cntfrq_el0::CNTFRQ_EL0;
pub use self::cnthctl_el2::CNTHCTL_EL2;
pub use self::cntp_ctl_el0::CNTP_CTL_EL0;
pub use self::cntp_tval_el0::CNTP_TVAL_EL0;
pub use self::cntpct_el0::CNTPCT_EL0;
pub use self::cntvoff_el2::CNTVOFF_EL2;
pub use self::currentel::CurrentEL;
pub use self::daif::DAIF;
pub use self::elr_el2::ELR_EL2;
pub use self::far_el1::FAR_EL1;
pub use self::hcr_el2::HCR_EL2;
pub use self::id_aa64mmfr0_el1::ID_AA64MMFR0_EL1;
pub use self::mair_el1::{MAIR_EL1, MAIR_ATTR};
pub use self::mpidr_el1::MPIDR_EL1;
pub use self::sctlr_el1::SCTLR_EL1;
pub use self::sp::SP;
pub use self::sp_el0::SP_EL0;
pub use self::sp_el1::SP_EL1;
pub use self::spsel::SPSel;
pub use self::spsr_el2::SPSR_EL2;
pub use self::tcr_el1::TCR_EL1;
pub use self::ttbr0_el1::TTBR0_EL1;
pub use self::ttbr1_el1::TTBR1_EL1;

@ -1,30 +0,0 @@
/*
* Copyright (c) 2018 by the author(s)
*
* =============================================================================
*
* Licensed under either of
* - Apache License, Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
* - MIT License (http://opensource.org/licenses/MIT)
* at your option.
*
* =============================================================================
*
* Author(s):
* - Andre Richter <andre.o.richter@gmail.com>
*/
//! Multiprocessor Affinity Register - EL1
//!
//! In a multiprocessor system, provides an additional PE
//! identification mechanism for scheduling purposes.
use register::cpu::RegisterReadOnly;
pub struct Reg;
impl RegisterReadOnly<u64, ()> for Reg {
sys_coproc_read_raw!(u64, "MPIDR_EL1");
}
pub static MPIDR_EL1: Reg = Reg {};

@ -1,103 +0,0 @@
/*
* Copyright (c) 2018 by the author(s)
*
* =============================================================================
*
* Licensed under either of
* - Apache License, Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
* - MIT License (http://opensource.org/licenses/MIT)
* at your option.
*
* =============================================================================
*
* Author(s):
* - Andre Richter <andre.o.richter@gmail.com>
*/
//! System Control Register - EL1
//!
//! Provides top level control of the system, including its memory system, at
//! EL1 and EL0.
use register::cpu::RegisterReadWrite;
register_bitfields! {u32,
SCTLR_EL1 [
/// Instruction access Cacheability control, for accesses at EL0 and
/// EL1:
///
/// 0 All instruction access to Normal memory from EL0 and EL1 are
/// Non-cacheable for all levels of instruction and unified cache.
///
/// If the value of SCTLR_EL1.M is 0, instruction accesses from stage
/// 1 of the EL1&0 translation regime are to Normal, Outer Shareable,
/// Inner Non-cacheable, Outer Non-cacheable memory.
///
/// 1 This control has no effect on the Cacheability of instruction
/// access to Normal memory from EL0 and EL1.
///
/// If the value of SCTLR_EL1.M is 0, instruction accesses from stage
/// 1 of the EL1&0 translation regime are to Normal, Outer Shareable,
/// Inner Write-Through, Outer Write-Through memory.
///
/// When the value of the HCR_EL2.DC bit is 1, then instruction access
/// to Normal memory from EL0 and EL1 are Cacheable regardless of the
/// value of the SCTLR_EL1.I bit.
///
/// When ARMv8.1-VHE is implemented, and the value of HCR_EL2.{E2H, TGE}
/// is {1, 1}, this bit has no effect on the PE.
///
/// When this register has an architecturally-defined reset value, this
/// field resets to 0.
I OFFSET(12) NUMBITS(1) [
NonCacheable = 0,
Cacheable = 1
],
/// Cacheability control, for data accesses.
///
/// 0 All data access to Normal memory from EL0 and EL1, and all Normal
/// memory accesses to the EL1&0 stage 1 translation tables, are
/// Non-cacheable for all levels of data and unified cache.
///
/// 1 This control has no effect on the Cacheability of:
/// - Data access to Normal memory from EL0 and EL1.
/// - Normal memory accesses to the EL1&0 stage 1 translation
/// tables.
///
/// When the value of the HCR_EL2.DC bit is 1, the PE ignores
/// SCLTR.C. This means that Non-secure EL0 and Non-secure EL1 data
/// accesses to Normal memory are Cacheable.
///
/// When ARMv8.1-VHE is implemented, and the value of HCR_EL2.{E2H, TGE}
/// is {1, 1}, this bit has no effect on the PE.
///
/// When this register has an architecturally-defined reset value, this
/// field resets to 0.
C OFFSET(2) NUMBITS(1) [
NonCacheable = 0,
Cacheable = 1
],
/// MMU enable for EL1 and EL0 stage 1 address translation. Possible
/// values of this bit are:
///
/// 0 EL1 and EL0 stage 1 address translation disabled.
/// See the SCTLR_EL1.I field for the behavior of instruction accesses
/// to Normal memory.
/// 1 EL1 and EL0 stage 1 address translation enabled.
M OFFSET(0) NUMBITS(1) [
Disable = 0,
Enable = 1
]
]
}
pub struct Reg;
impl RegisterReadWrite<u32, SCTLR_EL1::Register> for Reg {
sys_coproc_read_raw!(u32, "SCTLR_EL1");
sys_coproc_write_raw!(u32, "SCTLR_EL1");
}
pub static SCTLR_EL1: Reg = Reg {};

@ -1,28 +0,0 @@
/*
* Copyright (c) 2018 by the author(s)
*
* =============================================================================
*
* Licensed under either of
* - Apache License, Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
* - MIT License (http://opensource.org/licenses/MIT)
* at your option.
*
* =============================================================================
*
* Author(s):
* - Andre Richter <andre.o.richter@gmail.com>
*/
//! The stack pointer
use register::cpu::RegisterReadWrite;
pub struct Reg;
impl RegisterReadWrite<u64, ()> for Reg {
read_raw!(u64, "sp");
write_raw!(u64, "sp");
}
pub static SP: Reg = Reg {};

@ -1,31 +0,0 @@
/*
* Copyright (c) 2018 by the author(s)
*
* =============================================================================
*
* Licensed under either of
* - Apache License, Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
* - MIT License (http://opensource.org/licenses/MIT)
* at your option.
*
* =============================================================================
*
* Author(s):
* - Andre Richter <andre.o.richter@gmail.com>
*/
//! The stack pointer - EL0
//!
//! Holds the stack pointer associated with EL0. At higher Exception levels,
//! this is used as the current stack pointer when the value of SPSel.SP is 0.
use register::cpu::RegisterReadWrite;
pub struct Reg;
impl RegisterReadWrite<u64, ()> for Reg {
sys_coproc_read_raw!(u64, "SP_EL0");
sys_coproc_write_raw!(u64, "SP_EL0");
}
pub static SP_EL0: Reg = Reg {};

@ -1,36 +0,0 @@
/*
* Copyright (c) 2018 by the author(s)
*
* =============================================================================
*
* Licensed under either of
* - Apache License, Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
* - MIT License (http://opensource.org/licenses/MIT)
* at your option.
*
* =============================================================================
*
* Author(s):
* - Andre Richter <andre.o.richter@gmail.com>
*/
//! The stack pointer - EL1
//!
//! Holds the stack pointer associated with EL1. When executing at EL1, the
//! value of SPSel.SP determines the current stack pointer:
//!
//! SPSel.SP | current stack pointer
//! --------------------------------
//! 0 | SP_EL0
//! 1 | SP_EL1
use register::cpu::RegisterReadWrite;
pub struct Reg;
impl RegisterReadWrite<u64, ()> for Reg {
sys_coproc_read_raw!(u64, "SP_EL1");
sys_coproc_write_raw!(u64, "SP_EL1");
}
pub static SP_EL1: Reg = Reg {};

@ -1,48 +0,0 @@
/*
* Copyright (c) 2018 by the author(s)
*
* =============================================================================
*
* Licensed under either of
* - Apache License, Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
* - MIT License (http://opensource.org/licenses/MIT)
* at your option.
*
* =============================================================================
*
* Author(s):
* - Andre Richter <andre.o.richter@gmail.com>
*/
//! Stack Pointer Select
//!
//! Allows the Stack Pointer to be selected between SP_EL0 and SP_ELx.
use register::cpu::RegisterReadWrite;
register_bitfields! {u32,
SPSel [
/// Stack pointer to use. Possible values of this bit are:
///
/// 0 Use SP_EL0 at all Exception levels.
/// 1 Use SP_ELx for Exception level ELx.
///
/// When this register has an architecturally-defined reset value, this
/// field resets to 1.
SP OFFSET(0) NUMBITS(1) [
EL0 = 0,
ELx = 1
]
]
}
pub struct Reg;
impl RegisterReadWrite<u32, SPSel::Register> for Reg {
sys_coproc_read_raw!(u32, "SPSEL");
sys_coproc_write_raw!(u32, "SPSEL");
}
#[allow(non_upper_case_globals)]
pub static SPSel: Reg = Reg {};

@ -1,106 +0,0 @@
/*
* Copyright (c) 2018 by the author(s)
*
* =============================================================================
*
* Licensed under either of
* - Apache License, Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
* - MIT License (http://opensource.org/licenses/MIT)
* at your option.
*
* =============================================================================
*
* Author(s):
* - Andre Richter <andre.o.richter@gmail.com>
*/
//! Saved Program Status Register - EL2
//!
//! Holds the saved process state when an exception is taken to EL2.
use register::cpu::RegisterReadWrite;
register_bitfields! {u32,
SPSR_EL2 [
/// Process state D mask. The possible values of this bit are:
///
/// 0 Watchpoint, Breakpoint, and Software Step exceptions targeted at
/// the current Exception level are not masked.
///
/// 1 Watchpoint, Breakpoint, and Software Step exceptions targeted at
/// the current Exception level are masked.
///
/// When the target Exception level of the debug exception is higher
/// than the current Exception level, the exception is not masked by
/// this bit.
D OFFSET(9) NUMBITS(1) [
Unmasked = 0,
Masked = 1
],
/// SError interrupt mask bit. The possible values of this bit are:
///
/// 0 Exception not masked.
/// 1 Exception masked.
A OFFSET(8) NUMBITS(1) [
Unmasked = 0,
Masked = 1
],
/// IRQ mask bit. The possible values of this bit are:
///
/// 0 Exception not masked.
/// 1 Exception masked.
I OFFSET(7) NUMBITS(1) [
Unmasked = 0,
Masked = 1
],
/// FIQ mask bit. The possible values of this bit are:
///
/// 0 Exception not masked.
/// 1 Exception masked.
F OFFSET(6) NUMBITS(1) [
Unmasked = 0,
Masked = 1
],
/// AArch64 state (Exception level and selected SP) that an exception
/// was taken from. The possible values are:
///
/// M[3:0] | State
/// --------------
/// 0b0000 | EL0t
/// 0b0100 | EL1t
/// 0b0101 | EL1h
/// 0b1000 | EL2t
/// 0b1001 | EL2h
///
/// Other values are reserved, and returning to an Exception level that
/// is using AArch64 with a reserved value in this field is treated as
/// an illegal exception return.
///
/// The bits in this field are interpreted as follows:
/// - M[3:2] holds the Exception Level.
/// - M[1] is unused and is RES 0 for all non-reserved values.
/// - M[0] is used to select the SP:
/// - 0 means the SP is always SP0.
/// - 1 means the exception SP is determined by the EL.
M OFFSET(0) NUMBITS(4) [
EL0t = 0b0000,
EL1t = 0b0100,
EL1h = 0b0101,
EL2t = 0b1000,
EL2h = 0b1001
]
]
}
pub struct Reg;
impl RegisterReadWrite<u32, SPSR_EL2::Register> for Reg {
sys_coproc_read_raw!(u32, "SPSR_EL2");
sys_coproc_write_raw!(u32, "SPSR_EL2");
}
pub static SPSR_EL2: Reg = Reg {};

@ -1,313 +0,0 @@
/*
* Copyright (c) 2018 by the author(s)
*
* =============================================================================
*
* Licensed under either of
* - Apache License, Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
* - MIT License (http://opensource.org/licenses/MIT)
* at your option.
*
* =============================================================================
*
* Author(s):
* - Andre Richter <andre.o.richter@gmail.com>
*/
//! Translation Control Register - EL1
//!
//! The control register for stage 1 of the EL1&0 translation regime.
use register::cpu::RegisterReadWrite;
register_bitfields! {u64,
TCR_EL1 [
/// Top Byte ignored - indicates whether the top byte of an address is
/// used for address match for the TTBR1_EL1 region, or ignored and used
/// for tagged addresses. Defined values are:
///
/// 0 Top Byte used in the address calculation.
/// 1 Top Byte ignored in the address calculation.
TBI1 OFFSET(38) NUMBITS(1) [
Used = 0,
Ignored = 1
],
/// Top Byte ignored - indicates whether the top byte of an address is
/// used for address match for the TTBR0_EL1 region, or ignored and used
/// for tagged addresses. Defined values are:
///
/// 0 Top Byte used in the address calculation.
/// 1 Top Byte ignored in the address calculation.
TBI0 OFFSET(37) NUMBITS(1) [
Used = 0,
Ignored = 1
],
/// ASID Size. Defined values are:
///
/// 0 8 bit - the upper 8 bits of TTBR0_EL1 and TTBR1_EL1 are ignored by
/// hardware for every purpose except reading back the register, and are
/// treated as if they are all zeros for when used for allocation and matching entries in the TLB.
/// 1 16 bit - the upper 16 bits of TTBR0_EL1 and TTBR1_EL1 are used for
/// allocation and matching in the TLB.
///
/// If the implementation has only 8 bits of ASID, this field is RES0.
AS OFFSET(36) NUMBITS(1) [
Bits_8 = 0,
Bits_16 = 1
],
/// Intermediate Physical Address Size.
///
/// 000 32 bits, 4GiB.
/// 001 36 bits, 64GiB.
/// 010 40 bits, 1TiB.
/// 011 42 bits, 4TiB.
/// 100 44 bits, 16TiB.
/// 101 48 bits, 256TiB.
/// 110 52 bits, 4PiB
///
/// Other values are reserved.
///
/// The reserved values behave in the same way as the 101 or 110
/// encoding, but software must not rely on this property as the
/// behavior of the reserved values might change in a future revision of
/// the architecture.
///
/// The value 110 is permitted only if ARMv8.2-LPA is implemented and
/// the translation granule size is 64KiB.
///
/// In an implementation that supports 52-bit PAs, if the value of this
/// field is not 110 , then bits[51:48] of every translation table base
/// address for the stage of translation controlled by TCR_EL1 are 0000
/// .
IPS OFFSET(32) NUMBITS(3) [
Bits_32 = 0b000,
Bits_36 = 0b001,
Bits_40 = 0b010,
Bits_42 = 0b011,
Bits_44 = 0b100,
Bits_48 = 0b101,
Bits_52 = 0b110
],
/// Granule size for the TTBR1_EL1.
///
/// 01 16KiB
/// 10 4KiB
/// 11 64KiB
///
/// Other values are reserved.
///
/// If the value is programmed to either a reserved value, or a size
/// that has not been implemented, then the hardware will treat the
/// field as if it has been programmed to an IMPLEMENTATION DEFINED
/// choice of the sizes that has been implemented for all purposes other
/// than the value read back from this register.
///
/// It is IMPLEMENTATION DEFINED whether the value read back is the
/// value programmed or the value that corresponds to the size chosen.
TG1 OFFSET(30) NUMBITS(2) [
KiB_4 = 0b10,
KiB_16 = 0b01,
KiB_64 = 0b11
],
/// Shareability attribute for memory associated with translation table
/// walks using TTBR1_EL1.
///
/// 00 Non-shareable
/// 10 Outer Shareable
/// 11 Inner Shareable
///
/// Other values are reserved.
SH1 OFFSET(28) NUMBITS(2) [
None = 0b00,
Outer = 0b10,
Inner = 0b11
],
/// Outer cacheability attribute for memory associated with translation
/// table walks using TTBR1_EL1.
///
/// 00 Normal memory, Outer Non-cacheable
///
/// 01 Normal memory, Outer Write-Back Read-Allocate Write-Allocate
/// Cacheable
///
/// 10 Normal memory, Outer Write-Through Read-Allocate No
/// Write-Allocate Cacheable
///
/// 11 Normal memory, Outer Write-Back Read-Allocate No Write-Allocate
/// Cacheable
ORGN1 OFFSET(26) NUMBITS(2) [
NonCacheable = 0b00,
WriteBack_ReadAlloc_WriteAlloc_Cacheable = 0b01,
WriteThrough_ReadAlloc_NoWriteAlloc_Cacheable = 0b10,
WriteBack_ReadAlloc_NoWriteAlloc_Cacheable = 0b11
],
/// Inner cacheability attribute for memory associated with translation
/// table walks using TTBR1_EL1.
///
/// 00 Normal memory, Inner Non-cacheable
///
/// 01 Normal memory, Inner Write-Back Read-Allocate Write-Allocate
/// Cacheable
///
/// 10 Normal memory, Inner Write-Through Read-Allocate No
/// Write-Allocate Cacheable
///
/// 11 Normal memory, Inner Write-Back Read-Allocate No Write-Allocate
/// Cacheable
IRGN1 OFFSET(24) NUMBITS(2) [
NonCacheable = 0b00,
WriteBack_ReadAlloc_WriteAlloc_Cacheable = 0b01,
WriteThrough_ReadAlloc_NoWriteAlloc_Cacheable = 0b10,
WriteBack_ReadAlloc_NoWriteAlloc_Cacheable = 0b11
],
/// Translation table walk disable for translations using
/// TTBR1_EL1. This bit controls whether a translation table walk is
/// performed on a TLB miss, for an address that is translated using
/// TTBR1_EL1. The encoding of this bit is:
///
/// 0 Perform translation table walks using TTBR1_EL1.
///
/// 1 A TLB miss on an address that is translated using TTBR1_EL1
/// generates a Translation fault. No translation table walk is
/// performed.
EPD1 OFFSET(23) NUMBITS(1) [
EnableTTBR1Walks = 0,
DisableTTBR1Walks = 1
],
/// Selects whether TTBR0_EL1 or TTBR1_EL1 defines the ASID. The encoding
/// of this bit is:
///
/// 0 TTBR0_EL1.ASID defines the ASID.
///
/// 1 TTBR1_EL1.ASID defines the ASID.
A1 OFFSET(22) NUMBITS(1) [
UseTTBR0ASID = 0b0,
UseTTBR1ASID = 0b1
],
/// The size offset of the memory region addressed by TTBR1_EL1. The
/// region size is 2^(64-T1SZ) bytes.
///
/// The maximum and minimum possible values for T1SZ depend on the level
/// of translation table and the memory translation granule size, as
/// described in the AArch64 Virtual Memory System Architecture chapter.
T1SZ OFFSET(16) NUMBITS(6) [],
/// Granule size for the TTBR0_EL1.
///
/// 00 4KiB
/// 01 64KiB
/// 10 16KiB
///
/// Other values are reserved.
///
/// If the value is programmed to either a reserved value, or a size
/// that has not been implemented, then the hardware will treat the
/// field as if it has been programmed to an IMPLEMENTATION DEFINED
/// choice of the sizes that has been implemented for all purposes other
/// than the value read back from this register.
///
/// It is IMPLEMENTATION DEFINED whether the value read back is the
/// value programmed or the value that corresponds to the size chosen.
TG0 OFFSET(14) NUMBITS(2) [
KiB_4 = 0b00,
KiB_16 = 0b10,
KiB_64 = 0b01
],
/// Shareability attribute for memory associated with translation table
/// walks using TTBR0_EL1.
///
/// 00 Non-shareable
/// 10 Outer Shareable
/// 11 Inner Shareable
///
/// Other values are reserved.
SH0 OFFSET(12) NUMBITS(2) [
None = 0b00,
Outer = 0b10,
Inner = 0b11
],
/// Outer cacheability attribute for memory associated with translation
/// table walks using TTBR0_EL1.
///
/// 00 Normal memory, Outer Non-cacheable
///
/// 01 Normal memory, Outer Write-Back Read-Allocate Write-Allocate
/// Cacheable
///
/// 10 Normal memory, Outer Write-Through Read-Allocate No
/// Write-Allocate Cacheable
///
/// 11 Normal memory, Outer Write-Back Read-Allocate No Write-Allocate
/// Cacheable
ORGN0 OFFSET(10) NUMBITS(2) [
NonCacheable = 0b00,
WriteBack_ReadAlloc_WriteAlloc_Cacheable = 0b01,
WriteThrough_ReadAlloc_NoWriteAlloc_Cacheable = 0b10,
WriteBack_ReadAlloc_NoWriteAlloc_Cacheable = 0b11
],
/// Inner cacheability attribute for memory associated with translation
/// table walks using TTBR0_EL1.
///
/// 00 Normal memory, Inner Non-cacheable
///
/// 01 Normal memory, Inner Write-Back Read-Allocate Write-Allocate
/// Cacheable
///
/// 10 Normal memory, Inner Write-Through Read-Allocate No
/// Write-Allocate Cacheable
///
/// 11 Normal memory, Inner Write-Back Read-Allocate No Write-Allocate
/// Cacheable
IRGN0 OFFSET(8) NUMBITS(2) [
NonCacheable = 0b00,
WriteBack_ReadAlloc_WriteAlloc_Cacheable = 0b01,
WriteThrough_ReadAlloc_NoWriteAlloc_Cacheable = 0b10,
WriteBack_ReadAlloc_NoWriteAlloc_Cacheable = 0b11
],
/// Translation table walk disable for translations using
/// TTBR0_EL1. This bit controls whether a translation table walk is
/// performed on a TLB miss, for an address that is translated using
/// TTBR0_EL1. The encoding of this bit is:
///
/// 0 Perform translation table walks using TTBR0_EL1.
///
/// 1 A TLB miss on an address that is translated using TTBR0_EL1
/// generates a Translation fault. No translation table walk is
/// performed.
EPD0 OFFSET(7) NUMBITS(1) [
EnableTTBR0Walks = 0,
DisableTTBR0Walks = 1
],
/// The size offset of the memory region addressed by TTBR0_EL1. The
/// region size is 2^(64-T0SZ) bytes.
///
/// The maximum and minimum possible values for T0SZ depend on the level
/// of translation table and the memory translation granule size, as
/// described in the AArch64 Virtual Memory System Architecture chapter.
T0SZ OFFSET(0) NUMBITS(6) []
]
}
pub struct Reg;
impl RegisterReadWrite<u64, TCR_EL1::Register> for Reg {
sys_coproc_read_raw!(u64, "TCR_EL1");
sys_coproc_write_raw!(u64, "TCR_EL1");
}
pub static TCR_EL1: Reg = Reg {};

@ -1,61 +0,0 @@
/*
* Copyright (c) 2018 by the author(s)
*
* =============================================================================
*
* Licensed under either of
* - Apache License, Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
* - MIT License (http://opensource.org/licenses/MIT)
* at your option.
*
* =============================================================================
*
* Author(s):
* - Andre Richter <andre.o.richter@gmail.com>
*/
//! Translation Table Base Register 0 - EL1
//!
//! Holds the base address of the translation table for the initial lookup for
//! stage 1 of the translation of an address from the lower VA range in the
//! EL1&0 translation regime, and other information for this translation regime.
use register::cpu::RegisterReadWrite;
register_bitfields! {u64,
TTBR0_EL1 [
/// An ASID for the translation table base address. The TCR_EL1.A1 field
/// selects either TTBR0_EL1.ASID or TTBR1_EL1.ASID.
///
/// If the implementation has only 8 bits of ASID, then the upper 8 bits
/// of this field are RES 0.
ASID OFFSET(48) NUMBITS(16) [],
/// Translation table base address
BADDR OFFSET(1) NUMBITS(47) [],
/// Common not Private
CnP OFFSET(0) NUMBITS(1) []
]
}
pub struct Reg;
impl RegisterReadWrite<u64, TTBR0_EL1::Register> for Reg {
sys_coproc_read_raw!(u64, "TTBR0_EL1");
sys_coproc_write_raw!(u64, "TTBR0_EL1");
}
impl Reg {
#[inline]
pub fn get_baddr(&self) -> u64 {
self.read(TTBR0_EL1::BADDR) << 1
}
#[inline]
pub fn set_baddr(&self, addr: u64) {
self.write(TTBR0_EL1::BADDR.val(addr >> 1));
}
}
pub static TTBR0_EL1: Reg = Reg {};

@ -1,61 +0,0 @@
/*
* Copyright (c) 2018 by the author(s)
*
* =============================================================================
*
* Licensed under either of
* - Apache License, Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
* - MIT License (http://opensource.org/licenses/MIT)
* at your option.
*
* =============================================================================
*
* Author(s):
* - Andre Richter <andre.o.richter@gmail.com>
*/
//! Translation Table Base Register 1 - EL1
//!
//! Holds the base address of the translation table for the initial lookup for
//! stage 1 of the translation of an address from the upper VA range in the
//! EL1&0 translation regime, and other information for this translation regime.
use register::cpu::RegisterReadWrite;
register_bitfields! {u64,
TTBR1_EL1 [
/// An ASID for the translation table base address. The TCR_EL1.A1 field
/// selects either TTBR0_EL1.ASID or TTBR1_EL1.ASID.
///
/// If the implementation has only 8 bits of ASID, then the upper 8 bits
/// of this field are RES 0.
ASID OFFSET(48) NUMBITS(16) [],
/// Translation table base address
BADDR OFFSET(1) NUMBITS(47) [],
/// Common not Private
CnP OFFSET(0) NUMBITS(1) []
]
}
pub struct Reg;
impl RegisterReadWrite<u64, TTBR1_EL1::Register> for Reg {
sys_coproc_read_raw!(u64, "TTBR1_EL1");
sys_coproc_write_raw!(u64, "TTBR1_EL1");
}
impl Reg {
#[inline]
pub fn get_baddr(&self) -> u64 {
self.read(TTBR1_EL1::BADDR) << 1
}
#[inline]
pub fn set_baddr(&self, addr: u64) {
self.write(TTBR1_EL1::BADDR.val(addr >> 1));
}
}
pub static TTBR1_EL1: Reg = Reg {};

6
kernel/Cargo.lock generated

@ -1,6 +1,7 @@
[[package]]
name = "aarch64"
version = "0.1.0"
version = "2.2.2"
source = "git+https://github.com/equation314/aarch64#47bf5439f5a1379f0fef6272853cf684207a4e45"
dependencies = [
"bare-metal 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
"bit_field 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
@ -256,7 +257,7 @@ dependencies = [
name = "ucore"
version = "0.1.0"
dependencies = [
"aarch64 0.1.0",
"aarch64 2.2.2 (git+https://github.com/equation314/aarch64)",
"atags 0.1.0",
"bbl 0.1.0",
"bcm2837 0.1.0",
@ -362,6 +363,7 @@ version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
[metadata]
"checksum aarch64 2.2.2 (git+https://github.com/equation314/aarch64)" = "<none>"
"checksum bare-metal 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "1bdcf9294ed648c7cd29b11db06ea244005aeef50ae8f605b1a3af2940bf8f92"
"checksum bit-vec 0.5.0 (git+https://github.com/AltSysrq/bit-vec.git)" = "<none>"
"checksum bit_field 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ed8765909f9009617974ab6b7d332625b320b33c326b1e9321382ef1999b5d56"

@ -44,7 +44,7 @@ riscv = { path = "../crate/riscv" }
bbl = { path = "../crate/bbl" }
[target.'cfg(target_arch = "aarch64")'.dependencies]
aarch64 = { path = "../crate/aarch64" }
aarch64 = { git = "https://github.com/equation314/aarch64" }
atags = { path = "../crate/atags" }
bcm2837 = { path = "../crate/bcm2837", features = ["use_generic_timer"] }

Loading…
Cancel
Save