koumingyang 6 years ago
parent 5610d0bdb0
commit ef213d60bb

@ -0,0 +1,68 @@
[[package]]
name = "aarch64"
version = "0.1.0"
dependencies = [
"bit_field 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
"bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
"cortex-a 2.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"os_bootinfo 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"register 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"usize_conversions 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
"ux 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "bit_field"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "bitflags"
version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "cortex-a"
version = "2.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"register 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "os_bootinfo"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "register"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"tock-registers 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "tock-registers"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "usize_conversions"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "ux"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
[metadata]
"checksum bit_field 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ed8765909f9009617974ab6b7d332625b320b33c326b1e9321382ef1999b5d56"
"checksum bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "228047a76f468627ca71776ecdebd732a3423081fcf5125585bcd7c49886ce12"
"checksum cortex-a 2.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "2b187d0d728b4a99ba1d79f9671b976bcdd71a8a2c719585218fd2dc14a4d08c"
"checksum os_bootinfo 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "66481dbeb5e773e7bd85b63cd6042c30786f834338288c5ec4f3742673db360a"
"checksum register 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "e10f31b6d2299e5620986ad9fcdd66463e125ad72af4f403f9aedf7592d5ccdb"
"checksum tock-registers 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3a385d94f3f62e60445a0adb9ff8d9621faa272234530d4c0f848ec98f88e316"
"checksum usize_conversions 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f70329e2cbe45d6c97a5112daad40c34cd9a4e18edb5a2a18fefeb584d8d25e5"
"checksum ux 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "53d8df5dd8d07fedccd202de1887d94481fadaea3db70479f459e8163a1fab41"

@ -0,0 +1,16 @@
[package]
name = "aarch64"
version = "0.1.0"
authors = ["koumingyang <1761674434@qq.com>"]
[dependencies]
register = "0.2.0"
bit_field = "0.9.0"
bitflags = "1.0.1"
usize_conversions = "0.2.0"
os_bootinfo = "0.2.0"
bare-metal = "0.2.0"
[dependencies.ux]
default-features = false
version = "0.1.0"

@ -0,0 +1,418 @@
use core::convert::{Into, TryInto};
use core::fmt;
use core::ops::{Add, AddAssign, Sub, SubAssign};
use bit_field::BitField;
use usize_conversions::FromUsize;
use ux::*;
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
#[repr(transparent)]
pub struct VirtAddr(u64);
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
#[repr(transparent)]
pub struct PhysAddr(u64);
#[derive(Debug)]
pub struct VirtAddrNotValid(u64);
impl VirtAddr {
/// Creates a new canonical virtual address.
///
/// This function performs sign extension of bit 47 to make the address canonical. Panics
/// if the bits in the range 48 to 64 contain data (i.e. are not null and no sign extension).
pub fn new(addr: u64) -> VirtAddr {
Self::try_new(addr).expect(
"invalid virtual address",
)
}
/// Tries to create a new canonical virtual address.
/// in aarch64, valid virtual address starts with 0x0000 or 0xffff.
pub fn try_new(addr: u64) -> Result<VirtAddr, VirtAddrNotValid> {
match addr.get_bits(48..64) {
0 | 0xffff => Ok(VirtAddr(addr)), // address is canonical
other => Err(VirtAddrNotValid(other)),
}
}
pub fn new_unchecked(addr: u64) -> VirtAddr {
VirtAddr(addr)
}
/// Creates a virtual address that points to `0`.
pub const fn zero() -> VirtAddr {
VirtAddr(0)
}
/// Converts the address to an `u64`.
pub fn as_u64(self) -> u64 {
self.0
}
/// Creates a virtual address from the given pointer
pub fn from_ptr<T>(ptr: *const T) -> Self {
use usize_conversions::FromUsize;
Self::new(u64::from_usize(ptr as usize))
}
/// Converts the address to a raw pointer.
#[cfg(target_pointer_width = "64")]
pub fn as_ptr<T>(self) -> *const T {
use usize_conversions::usize_from;
usize_from(self.as_u64()) as *const T
}
/// Converts the address to a mutable raw pointer.
#[cfg(target_pointer_width = "64")]
pub fn as_mut_ptr<T>(self) -> *mut T {
self.as_ptr::<T>() as *mut T
}
/// Aligns the virtual address upwards to the given alignment.
///
/// See the `align_up` function for more information.
pub fn align_up<U>(self, align: U) -> Self
where
U: Into<u64>,
{
VirtAddr(align_up(self.0, align.into()))
}
/// Aligns the virtual address downwards to the given alignment.
///
/// See the `align_down` function for more information.
pub fn align_down<U>(self, align: U) -> Self
where
U: Into<u64>,
{
VirtAddr(align_down(self.0, align.into()))
}
/// Checks whether the virtual address has the demanded alignment.
pub fn is_aligned<U>(self, align: U) -> bool
where
U: Into<u64>,
{
self.align_down(align) == self
}
/// Returns the 12-bit page offset of this virtual address.
pub fn page_offset(&self) -> u12 {
u12::new((self.0 & 0xfff).try_into().unwrap())
}
/// Returns the 9-bit level 1 page table index.
pub fn p1_index(&self) -> u9 {
u9::new(((self.0 >> 12) & 0o777).try_into().unwrap())
}
/// Returns the 9-bit level 2 page table index.
pub fn p2_index(&self) -> u9 {
u9::new(((self.0 >> 12 >> 9) & 0o777).try_into().unwrap())
}
/// Returns the 9-bit level 3 page table index.
pub fn p3_index(&self) -> u9 {
u9::new(((self.0 >> 12 >> 9 >> 9) & 0o777).try_into().unwrap())
}
/// Returns the 9-bit level 4 page table index.
pub fn p4_index(&self) -> u9 {
u9::new(((self.0 >> 12 >> 9 >> 9 >> 9) & 0o777).try_into().unwrap())
}
}
impl fmt::Debug for VirtAddr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "VirtAddr({:#x})", self.0)
}
}
impl Add<u64> for VirtAddr {
type Output = Self;
fn add(self, rhs: u64) -> Self::Output {
VirtAddr::new(self.0 + rhs)
}
}
impl AddAssign<u64> for VirtAddr {
fn add_assign(&mut self, rhs: u64) {
*self = *self + rhs;
}
}
impl Add<usize> for VirtAddr
where
u64: FromUsize,
{
type Output = Self;
fn add(self, rhs: usize) -> Self::Output {
self + u64::from_usize(rhs)
}
}
impl AddAssign<usize> for VirtAddr
where
u64: FromUsize,
{
fn add_assign(&mut self, rhs: usize) {
self.add_assign(u64::from_usize(rhs))
}
}
impl Sub<u64> for VirtAddr {
type Output = Self;
fn sub(self, rhs: u64) -> Self::Output {
VirtAddr::new(self.0.checked_sub(rhs).unwrap())
}
}
impl SubAssign<u64> for VirtAddr {
fn sub_assign(&mut self, rhs: u64) {
*self = *self - rhs;
}
}
impl Sub<usize> for VirtAddr
where
u64: FromUsize,
{
type Output = Self;
fn sub(self, rhs: usize) -> Self::Output {
self - u64::from_usize(rhs)
}
}
impl SubAssign<usize> for VirtAddr
where
u64: FromUsize,
{
fn sub_assign(&mut self, rhs: usize) {
self.sub_assign(u64::from_usize(rhs))
}
}
impl Sub<VirtAddr> for VirtAddr {
type Output = u64;
fn sub(self, rhs: VirtAddr) -> Self::Output {
self.as_u64().checked_sub(rhs.as_u64()).unwrap()
}
}
/// A passed `u64` was not a valid physical address.
///
/// This means that bits 52 to 64 are not were not all null.
#[derive(Debug)]
pub struct PhysAddrNotValid(u64);
impl PhysAddr {
/// Creates a new physical address.
///
/// Panics if a bit in the range 52 to 64 is set.
pub fn new(addr: u64) -> PhysAddr {
assert_eq!(
addr.get_bits(52..64),
0,
"physical addresses must not have any bits in the range 52 to 64 set"
);
PhysAddr(addr)
}
/// Tries to create a new physical address.
///
/// Fails if any bits in the range 52 to 64 are set.
pub fn try_new(addr: u64) -> Result<PhysAddr, PhysAddrNotValid> {
match addr.get_bits(52..64) {
0 => Ok(PhysAddr(addr)), // address is valid
other => Err(PhysAddrNotValid(other)),
}
}
/// Converts the address to an `u64`.
pub fn as_u64(self) -> u64 {
self.0
}
/// Convenience method for checking if a physical address is null.
pub fn is_null(&self) -> bool {
self.0 == 0
}
/// Aligns the physical address upwards to the given alignment.
///
/// See the `align_up` function for more information.
pub fn align_up<U>(self, align: U) -> Self
where
U: Into<u64>,
{
PhysAddr(align_up(self.0, align.into()))
}
/// Aligns the physical address downwards to the given alignment.
///
/// See the `align_down` function for more information.
pub fn align_down<U>(self, align: U) -> Self
where
U: Into<u64>,
{
PhysAddr(align_down(self.0, align.into()))
}
/// Checks whether the physical address has the demanded alignment.
pub fn is_aligned<U>(self, align: U) -> bool
where
U: Into<u64>,
{
self.align_down(align) == self
}
}
impl fmt::Debug for PhysAddr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "PhysAddr({:#x})", self.0)
}
}
impl fmt::Binary for PhysAddr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)
}
}
impl fmt::LowerHex for PhysAddr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)
}
}
impl fmt::Octal for PhysAddr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)
}
}
impl fmt::UpperHex for PhysAddr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)
}
}
impl Add<u64> for PhysAddr {
type Output = Self;
fn add(self, rhs: u64) -> Self::Output {
PhysAddr::new(self.0 + rhs)
}
}
impl AddAssign<u64> for PhysAddr {
fn add_assign(&mut self, rhs: u64) {
*self = *self + rhs;
}
}
impl Add<usize> for PhysAddr
where
u64: FromUsize,
{
type Output = Self;
fn add(self, rhs: usize) -> Self::Output {
self + u64::from_usize(rhs)
}
}
impl AddAssign<usize> for PhysAddr
where
u64: FromUsize,
{
fn add_assign(&mut self, rhs: usize) {
self.add_assign(u64::from_usize(rhs))
}
}
impl Sub<u64> for PhysAddr {
type Output = Self;
fn sub(self, rhs: u64) -> Self::Output {
PhysAddr::new(self.0.checked_sub(rhs).unwrap())
}
}
impl SubAssign<u64> for PhysAddr {
fn sub_assign(&mut self, rhs: u64) {
*self = *self - rhs;
}
}
impl Sub<usize> for PhysAddr
where
u64: FromUsize,
{
type Output = Self;
fn sub(self, rhs: usize) -> Self::Output {
self - u64::from_usize(rhs)
}
}
impl SubAssign<usize> for PhysAddr
where
u64: FromUsize,
{
fn sub_assign(&mut self, rhs: usize) {
self.sub_assign(u64::from_usize(rhs))
}
}
impl Sub<PhysAddr> for PhysAddr {
type Output = u64;
fn sub(self, rhs: PhysAddr) -> Self::Output {
self.as_u64().checked_sub(rhs.as_u64()).unwrap()
}
}
/// Align address downwards.
///
/// Returns the greatest x with alignment `align` so that x <= addr. The alignment must be
/// a power of 2.
pub fn align_down(addr: u64, align: u64) -> u64 {
assert!(align.is_power_of_two(), "`align` must be a power of two");
addr & !(align - 1)
}
/// Align address upwards.
///
/// Returns the smallest x with alignment `align` so that x >= addr. The alignment must be
/// a power of 2.
pub fn align_up(addr: u64, align: u64) -> u64 {
assert!(align.is_power_of_two(), "`align` must be a power of two");
let align_mask = align - 1;
if addr & align_mask == 0 {
addr // already aligned
} else {
(addr | align_mask) + 1
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
pub fn test_align_up() {
// align 1
assert_eq!(align_up(0, 1), 0);
assert_eq!(align_up(1234, 1), 1234);
assert_eq!(align_up(0xffffffffffffffff, 1), 0xffffffffffffffff);
// align 2
assert_eq!(align_up(0, 2), 0);
assert_eq!(align_up(1233, 2), 1234);
assert_eq!(align_up(0xfffffffffffffffe, 2), 0xfffffffffffffffe);
// address 0
assert_eq!(align_up(0, 128), 0);
assert_eq!(align_up(0, 1), 0);
assert_eq!(align_up(0, 2), 0);
assert_eq!(align_up(0, 0x8000000000000000), 0);
}
}

@ -0,0 +1,162 @@
use paging::PhysFrame;
use addr::PhysAddr;
use regs::*;
#[inline(always)]
pub fn tlb_invalidate() {
unsafe{
asm!("dsb ishst
tlbi vmalle1is
dsb ish
tlbi vmalle1is
isb");
}
}
/// Returns the current stack pointer.
#[inline(always)]
pub fn sp() -> *const u8 {
let ptr: usize;
unsafe {
asm!("mov $0, sp" : "=r"(ptr));
}
ptr as *const u8
}
#[inline(always)]
pub unsafe fn get_pc() -> usize {
let pc: usize;
asm!("ADR $0, ." : "=r"(pc));
pc
}
/// Returns the current exception level.
///
/// # Safety
/// This function should only be called when EL is >= 1.
#[inline(always)]
pub unsafe fn current_el() -> u8 {
let el_reg: u64;
asm!("mrs $0, CurrentEL" : "=r"(el_reg));
((el_reg & 0b1100) >> 2) as u8
}
#[inline(always)]
pub unsafe fn get_far() -> usize {
let far: usize;
asm!("mrs $0, far_el1" : "=r"(far));
far
}
#[inline(always)]
pub unsafe fn get_ttbr0() -> usize {
let ttbr0: usize;
asm!("mrs $0, ttbr0_el1" : "=r"(ttbr0));
ttbr0
}
#[inline(always)]
pub unsafe fn get_ttbr1() -> usize {
let ttbr0: usize;
asm!("mrs $0, ttbr1_el1" : "=r"(ttbr0));
ttbr0
}
/// Returns the SPSel value.
#[inline(always)]
pub fn sp_sel() -> u8 {
let ptr: u32;
unsafe {
asm!("mrs $0, SPSel" : "=r"(ptr));
}
(ptr & 1) as u8
}
/// Returns the core currently executing.
///
/// # Safety
///
/// This function should only be called when EL is >= 1.
pub unsafe fn affinity() -> usize {
let x: usize;
asm!("mrs $0, mpidr_el1
and $0, $0, #3"
: "=r"(x));
x
}
pub fn wfi() {
unsafe {
asm!("wfi" :::: "volatile");
}
}
/// The classic no-op
#[inline]
pub fn nop() {
match () {
#[cfg(target_arch = "aarch64")]
() => unsafe { asm!("nop" :::: "volatile") },
#[cfg(not(target_arch = "aarch64"))]
() => unimplemented!(),
}
}
/// Wait For Event
#[inline]
pub fn wfe() {
match () {
#[cfg(target_arch = "aarch64")]
() => unsafe { asm!("wfe" :::: "volatile") },
#[cfg(not(target_arch = "aarch64"))]
() => unimplemented!(),
}
}
/// Exception return
///
/// Will jump to wherever the corresponding link register points to, and
/// therefore never return.
#[inline]
pub fn eret() -> ! {
use core;
match () {
#[cfg(target_arch = "aarch64")]
() => unsafe {
asm!("eret" :::: "volatile");
core::intrinsics::unreachable()
},
#[cfg(not(target_arch = "aarch64"))]
() => unimplemented!(),
}
}
bitflags! {
/// Controls cache settings for the level 4 page table.
pub struct ttbr0_el1_Flags: u64 {
const COMMON_NOT_PRIVATE = 1 << 0;
}
}
pub fn ttbr0_el1_read() -> (PhysFrame, ttbr0_el1_Flags) {
let value = TTBR0_EL1.get();
let flags = ttbr0_el1_Flags::from_bits_truncate(value);
let addr = PhysAddr::new(value & 0x_000f_ffff_ffff_f000);
let frame = PhysFrame::containing_address(addr);
(frame, flags)
}
pub fn ttbr0_el1_write(frame: PhysFrame) {
let addr = frame.start_address();
let value = addr.as_u64();
TTBR0_EL1.set_baddr(value);
}

@ -0,0 +1,87 @@
/*
* Copyright (c) 2018 by the author(s)
*
* =============================================================================
*
* Licensed under either of
* - Apache License, Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
* - MIT License (http://opensource.org/licenses/MIT)
* at your option.
*
* =============================================================================
*
* Author(s):
* - Andre Richter <andre.o.richter@gmail.com>
*/
// Borrow implementations from the pending upstream ACLE implementation until it
// is merged. Afterwards, we'll probably just reexport them, hoping that the API
// doesn't change.
//
// https://github.com/rust-lang-nursery/stdsimd/pull/557
mod sealed {
pub trait Dmb {
unsafe fn __dmb(&self);
}
pub trait Dsb {
unsafe fn __dsb(&self);
}
pub trait Isb {
unsafe fn __isb(&self);
}
}
macro_rules! dmb_dsb {
($A:ident) => {
impl sealed::Dmb for $A {
#[inline(always)]
unsafe fn __dmb(&self) {
asm!(concat!("DMB ", stringify!($A)) : : : "memory" : "volatile")
}
}
impl sealed::Dsb for $A {
#[inline(always)]
unsafe fn __dsb(&self) {
asm!(concat!("DSB ", stringify!($A)) : : : "memory" : "volatile")
}
}
};
}
pub struct SY;
dmb_dsb!(SY);
impl sealed::Isb for SY {
#[inline(always)]
unsafe fn __isb(&self) {
asm!("ISB SY" : : : "memory" : "volatile")
}
}
#[inline(always)]
pub unsafe fn dmb<A>(arg: A)
where
A: sealed::Dmb,
{
arg.__dmb()
}
#[inline(always)]
pub unsafe fn dsb<A>(arg: A)
where
A: sealed::Dsb,
{
arg.__dsb()
}
#[inline(always)]
pub unsafe fn isb<A>(arg: A)
where
A: sealed::Isb,
{
arg.__isb()
}

@ -0,0 +1,29 @@
#![no_std]
//#![deny(warnings)]
#![feature(asm)]
#![feature(const_fn)]
#![feature(core_intrinsics)]
#![feature(try_from)]
extern crate bare_metal;
#[macro_use]
extern crate register;
#[macro_use]
extern crate bitflags;
extern crate bit_field;
extern crate os_bootinfo;
extern crate usize_conversions;
/// Provides the non-standard-width integer types `u2``u63`.
///
/// We use these integer types in various APIs, for example `u9` for page tables indices.
pub extern crate ux;
pub use addr::{align_down, align_up, PhysAddr, VirtAddr};
pub mod asm;
pub mod addr;
pub mod paging;
pub mod barrier;
pub mod regs;

@ -0,0 +1,15 @@
//! Traits for abstracting away frame allocation and deallocation.
use paging::{PageSize, PhysFrame};
/// A trait for types that can allocate a frame of memory.
pub trait FrameAllocator<S: PageSize> {
/// Allocate a frame of the appropriate size and return it if possible.
fn alloc(&mut self) -> Option<PhysFrame<S>>;
}
/// A trait for types that can deallocate a frame of memory.
pub trait FrameDeallocator<S: PageSize> {
/// Deallocate the given frame of memory.
fn dealloc(&mut self, frame: PhysFrame<S>);
}

@ -0,0 +1,528 @@
//! Abstractions for page tables and other paging related structures.
//!
//! Page tables translate virtual memory “pages” to physical memory “frames”.
pub use self::frame_alloc::*;
pub use self::page_table::*;
#[cfg(target_arch = "aarch64")]
pub use self::recursive::*;
use core::fmt;
use core::marker::PhantomData;
use core::ops::{Add, AddAssign, Sub, SubAssign};
use os_bootinfo;
use ux::*;
use addr::{PhysAddr, VirtAddr};
mod frame_alloc;
mod page_table;
mod recursive;
/// Trait for abstracting over the three possible page sizes on x86_64, 4KiB, 2MiB, 1GiB.
pub trait PageSize: Copy + Eq + PartialOrd + Ord {
/// The page size in bytes.
const SIZE: u64;
/// A string representation of the page size for debug output.
const SIZE_AS_DEBUG_STR: &'static str;
}
/// This trait is implemented for 4KiB and 2MiB pages, but not for 1GiB pages.
pub trait NotGiantPageSize: PageSize {}
/// A standard 4KiB page.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum Size4KiB {}
/// A “huge” 2MiB page.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum Size2MiB {}
/// A “giant” 1GiB page.
///
/// (Only available on newer x86_64 CPUs.)
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum Size1GiB {}
impl PageSize for Size4KiB {
const SIZE: u64 = 4096;
const SIZE_AS_DEBUG_STR: &'static str = "4KiB";
}
impl NotGiantPageSize for Size4KiB {}
impl PageSize for Size2MiB {
const SIZE: u64 = Size4KiB::SIZE * 512;
const SIZE_AS_DEBUG_STR: &'static str = "2MiB";
}
impl NotGiantPageSize for Size2MiB {}
impl PageSize for Size1GiB {
const SIZE: u64 = Size2MiB::SIZE * 512;
const SIZE_AS_DEBUG_STR: &'static str = "1GiB";
}
/// A virtual memory page.
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
#[repr(C)]
pub struct Page<S: PageSize = Size4KiB> {
start_address: VirtAddr,
size: PhantomData<S>,
}
impl<S: PageSize> Page<S> {
/// Returns the page that starts at the given virtual address.
///
/// Returns an error if the address is not correctly aligned (i.e. is not a valid page start).
pub fn from_start_address(address: VirtAddr) -> Result<Self, ()> {
if !address.is_aligned(S::SIZE) {
return Err(());
}
Ok(Page::containing_address(address))
}
/// Returns the page that contains the given virtual address.
pub fn containing_address(address: VirtAddr) -> Self {
Page {
start_address: address.align_down(S::SIZE),
size: PhantomData,
}
}
/// Returns the start address of the page.
pub fn start_address(&self) -> VirtAddr {
self.start_address
}
/// Returns the size the page (4KB, 2MB or 1GB).
pub const fn size(&self) -> u64 {
S::SIZE
}
/// Returns the level 4 page table index of this page.
pub fn p4_index(&self) -> u9 {
self.start_address().p4_index()
}
/// Returns the level 3 page table index of this page.
pub fn p3_index(&self) -> u9 {
self.start_address().p3_index()
}
/// Returns a range of pages, exclusive `end`.
pub fn range(start: Self, end: Self) -> PageRange<S> {
PageRange { start, end }
}
/// Returns a range of pages, inclusive `end`.
pub fn range_inclusive(start: Self, end: Self) -> PageRangeInclusive<S> {
PageRangeInclusive { start, end }
}
}
impl<S: NotGiantPageSize> Page<S> {
/// Returns the level 2 page table index of this page.
pub fn p2_index(&self) -> u9 {
self.start_address().p2_index()
}
}
impl Page<Size1GiB> {
/// Returns the 1GiB memory page with the specified page table indices.
pub fn from_page_table_indices_1gib(p4_index: u9, p3_index: u9) -> Self {
use bit_field::BitField;
let mut addr = 0;
addr.set_bits(39..48, u64::from(p4_index));
addr.set_bits(30..39, u64::from(p3_index));
Page::containing_address(VirtAddr::new(addr))
}
}
impl Page<Size2MiB> {
/// Returns the 2MiB memory page with the specified page table indices.
pub fn from_page_table_indices_2mib(p4_index: u9, p3_index: u9, p2_index: u9) -> Self {
use bit_field::BitField;
let mut addr = 0;
addr.set_bits(39..48, u64::from(p4_index));
addr.set_bits(30..39, u64::from(p3_index));
addr.set_bits(21..30, u64::from(p2_index));
Page::containing_address(VirtAddr::new(addr))
}
}
impl Page<Size4KiB> {
/// Returns the 4KiB memory page with the specified page table indices.
pub fn from_page_table_indices(p4_index: u9, p3_index: u9, p2_index: u9, p1_index: u9) -> Self {
use bit_field::BitField;
let mut addr = 0;
addr.set_bits(39..48, u64::from(p4_index));
addr.set_bits(30..39, u64::from(p3_index));
addr.set_bits(21..30, u64::from(p2_index));
addr.set_bits(12..21, u64::from(p1_index));
Page::containing_address(VirtAddr::new(addr))
}
/// Returns the level 1 page table index of this page.
pub fn p1_index(&self) -> u9 {
self.start_address().p1_index()
}
}
impl<S: PageSize> fmt::Debug for Page<S> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_fmt(format_args!(
"Page[{}]({:#x})",
S::SIZE_AS_DEBUG_STR,
self.start_address().as_u64()
))
}
}
impl<S: PageSize> Add<u64> for Page<S> {
type Output = Self;
fn add(self, rhs: u64) -> Self::Output {
Page::containing_address(self.start_address() + rhs * u64::from(S::SIZE))
}
}
impl<S: PageSize> AddAssign<u64> for Page<S> {
fn add_assign(&mut self, rhs: u64) {
*self = self.clone() + rhs;
}
}
impl<S: PageSize> Sub<u64> for Page<S> {
type Output = Self;
fn sub(self, rhs: u64) -> Self::Output {
Page::containing_address(self.start_address() - rhs * u64::from(S::SIZE))
}
}
impl<S: PageSize> SubAssign<u64> for Page<S> {
fn sub_assign(&mut self, rhs: u64) {
*self = self.clone() - rhs;
}
}
impl<S: PageSize> Sub<Self> for Page<S> {
type Output = u64;
fn sub(self, rhs: Self) -> Self::Output {
(self.start_address - rhs.start_address) / S::SIZE
}
}
/// A range of pages with exclusive upper bound.
#[derive(Clone, Copy, PartialEq, Eq)]
#[repr(C)]
pub struct PageRange<S: PageSize = Size4KiB> {
/// The start of the range, inclusive.
pub start: Page<S>,
/// The end of the range, exclusive.
pub end: Page<S>,
}
impl<S: PageSize> PageRange<S> {
/// Returns wether this range contains no pages.
pub fn is_empty(&self) -> bool {
!(self.start < self.end)
}
}
impl<S: PageSize> Iterator for PageRange<S> {
type Item = Page<S>;
fn next(&mut self) -> Option<Self::Item> {
if self.start < self.end {
let page = self.start.clone();
self.start += 1;
Some(page)
} else {
None
}
}
}
impl PageRange<Size2MiB> {
/// Converts the range of 2MiB pages to a range of 4KiB pages.
pub fn as_4kib_page_range(self) -> PageRange<Size4KiB> {
PageRange {
start: Page::containing_address(self.start.start_address()),
end: Page::containing_address(self.end.start_address()),
}
}
}
impl<S: PageSize> fmt::Debug for PageRange<S> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("PageRange")
.field("start", &self.start)
.field("end", &self.end)
.finish()
}
}
/// A range of pages with inclusive upper bound.
#[derive(Clone, Copy, PartialEq, Eq)]
#[repr(C)]
pub struct PageRangeInclusive<S: PageSize = Size4KiB> {
/// The start of the range, inclusive.
pub start: Page<S>,
/// The end of the range, inclusive.
pub end: Page<S>,
}
impl<S: PageSize> PageRangeInclusive<S> {
/// Returns wether this range contains no pages.
pub fn is_empty(&self) -> bool {
!(self.start <= self.end)
}
}
impl<S: PageSize> Iterator for PageRangeInclusive<S> {
type Item = Page<S>;
fn next(&mut self) -> Option<Self::Item> {
if self.start <= self.end {
let page = self.start.clone();
self.start += 1;
Some(page)
} else {
None
}
}
}
impl<S: PageSize> fmt::Debug for PageRangeInclusive<S> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("PageRangeInclusive")
.field("start", &self.start)
.field("end", &self.end)
.finish()
}
}
/// A physical memory frame.
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
#[repr(C)]
pub struct PhysFrame<S: PageSize = Size4KiB> {
start_address: PhysAddr,
size: PhantomData<S>,
}
impl<S: PageSize> PhysFrame<S> {
/// Returns the frame that starts at the given virtual address.
///
/// Returns an error if the address is not correctly aligned (i.e. is not a valid frame start).
pub fn from_start_address(address: PhysAddr) -> Result<Self, ()> {
if !address.is_aligned(S::SIZE) {
return Err(());
}
Ok(PhysFrame::containing_address(address))
}
/// Returns the frame that contains the given physical address.
pub fn containing_address(address: PhysAddr) -> Self {
PhysFrame {
start_address: address.align_down(S::SIZE),
size: PhantomData,
}
}
/// Returns the start address of the frame.
pub fn start_address(&self) -> PhysAddr {
self.start_address
}
/// Returns the size the frame (4KB, 2MB or 1GB).
pub fn size(&self) -> u64 {
S::SIZE
}
/// Returns a range of frames, exclusive `end`.
pub fn range(start: PhysFrame<S>, end: PhysFrame<S>) -> PhysFrameRange<S> {
PhysFrameRange { start, end }
}
/// Returns a range of frames, inclusive `end`.
pub fn range_inclusive(start: PhysFrame<S>, end: PhysFrame<S>) -> PhysFrameRangeInclusive<S> {
PhysFrameRangeInclusive { start, end }
}
}
impl<S: PageSize> fmt::Debug for PhysFrame<S> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_fmt(format_args!(
"PhysFrame[{}]({:#x})",
S::SIZE_AS_DEBUG_STR,
self.start_address().as_u64()
))
}
}
impl<S: PageSize> Add<u64> for PhysFrame<S> {
type Output = Self;
fn add(self, rhs: u64) -> Self::Output {
PhysFrame::containing_address(self.start_address() + rhs * u64::from(S::SIZE))
}
}
impl<S: PageSize> AddAssign<u64> for PhysFrame<S> {
fn add_assign(&mut self, rhs: u64) {
*self = self.clone() + rhs;
}
}
impl<S: PageSize> Sub<u64> for PhysFrame<S> {
type Output = Self;
fn sub(self, rhs: u64) -> Self::Output {
PhysFrame::containing_address(self.start_address() - rhs * u64::from(S::SIZE))
}
}
impl<S: PageSize> SubAssign<u64> for PhysFrame<S> {
fn sub_assign(&mut self, rhs: u64) {
*self = self.clone() - rhs;
}
}
impl<S: PageSize> Sub<PhysFrame<S>> for PhysFrame<S> {
type Output = u64;
fn sub(self, rhs: PhysFrame<S>) -> Self::Output {
(self.start_address - rhs.start_address) / S::SIZE
}
}
/// An range of physical memory frames, exclusive the upper bound.
#[derive(Clone, Copy, PartialEq, Eq)]
#[repr(C)]
pub struct PhysFrameRange<S: PageSize = Size4KiB> {
/// The start of the range, inclusive.
pub start: PhysFrame<S>,
/// The end of the range, exclusive.
pub end: PhysFrame<S>,
}
impl<S: PageSize> PhysFrameRange<S> {
/// Returns whether the range contains no frames.
pub fn is_empty(&self) -> bool {
!(self.start < self.end)
}
}
impl<S: PageSize> Iterator for PhysFrameRange<S> {
type Item = PhysFrame<S>;
fn next(&mut self) -> Option<Self::Item> {
if self.start < self.end {
let frame = self.start.clone();
self.start += 1;
Some(frame)
} else {
None
}
}
}
impl From<os_bootinfo::FrameRange> for PhysFrameRange {
fn from(range: os_bootinfo::FrameRange) -> Self {
PhysFrameRange {
start: PhysFrame::from_start_address(PhysAddr::new(range.start_addr())).unwrap(),
end: PhysFrame::from_start_address(PhysAddr::new(range.end_addr())).unwrap(),
}
}
}
impl Into<os_bootinfo::FrameRange> for PhysFrameRange {
fn into(self) -> os_bootinfo::FrameRange {
os_bootinfo::FrameRange::new(
self.start.start_address().as_u64(),
self.end.start_address().as_u64(),
)
}
}
impl<S: PageSize> fmt::Debug for PhysFrameRange<S> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("PhysFrameRange")
.field("start", &self.start)
.field("end", &self.end)
.finish()
}
}
/// An range of physical memory frames, inclusive the upper bound.
#[derive(Clone, Copy, PartialEq, Eq)]
#[repr(C)]
pub struct PhysFrameRangeInclusive<S: PageSize = Size4KiB> {
/// The start of the range, inclusive.
pub start: PhysFrame<S>,
/// The start of the range, exclusive.
pub end: PhysFrame<S>,
}
impl<S: PageSize> PhysFrameRangeInclusive<S> {
/// Returns whether the range contains no frames.
pub fn is_empty(&self) -> bool {
!(self.start <= self.end)
}
}
impl<S: PageSize> Iterator for PhysFrameRangeInclusive<S> {
type Item = PhysFrame<S>;
fn next(&mut self) -> Option<Self::Item> {
if self.start <= self.end {
let frame = self.start.clone();
self.start += 1;
Some(frame)
} else {
None
}
}
}
impl<S: PageSize> fmt::Debug for PhysFrameRangeInclusive<S> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("PhysFrameRangeInclusive")
.field("start", &self.start)
.field("end", &self.end)
.finish()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
pub fn test_page_ranges() {
let page_size = Size4KiB::SIZE;
let number = 1000;
let start_addr = VirtAddr::new(0xdeadbeaf);
let start: Page = Page::containing_address(start_addr);
let end = start.clone() + number;
let mut range = Page::range(start.clone(), end.clone());
for i in 0..number {
assert_eq!(
range.next(),
Some(Page::containing_address(start_addr + page_size * i))
);
}
assert_eq!(range.next(), None);
let mut range_inclusive = Page::range_inclusive(start, end);
for i in 0..=number {
assert_eq!(
range_inclusive.next(),
Some(Page::containing_address(start_addr + page_size * i))
);
}
assert_eq!(range_inclusive.next(), None);
}
}

@ -0,0 +1,185 @@
use core::fmt;
use core::ops::{Index, IndexMut};
use super::{PageSize, PhysFrame, Size4KiB};
use addr::PhysAddr;
use usize_conversions::usize_from;
use ux::*;
/// The error returned by the `PageTableEntry::frame` method.
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum FrameError {
/// The entry does not have the `PRESENT` flag set, so it isn't currently mapped to a frame.
FrameNotPresent,
/// The entry does have the `HUGE_PAGE` flag set. The `frame` method has a standard 4KiB frame
/// as return type, so a huge frame can't be returned.
HugeFrame,
}
/// A 64-bit page table entry.
#[derive(Clone)]
#[repr(transparent)]
pub struct PageTableEntry {
entry: u64,
}
impl PageTableEntry {
/// Returns whether this entry is zero.
pub fn is_unused(&self) -> bool {
self.entry == 0
}
/// Sets this entry to zero.
pub fn set_unused(&mut self) {
self.entry = 0;
}
/// Returns the flags of this entry.
pub fn flags(&self) -> PageTableFlags {
PageTableFlags::from_bits_truncate(self.entry)
}
/// Returns the physical address mapped by this entry, might be zero.
pub fn addr(&self) -> PhysAddr {
PhysAddr::new(self.entry & 0x000fffff_fffff000)
}
/// Returns the physical frame mapped by this entry.
///
/// Returns the following errors:
///
/// - `FrameError::FrameNotPresent` if the entry doesn't have the `PRESENT` flag set.
/// - `FrameError::HugeFrame` if the entry has the `HUGE_PAGE` flag set (for huge pages the
/// `addr` function must be used)
pub fn frame(&self) -> Result<PhysFrame, FrameError> {
if !self.flags().contains(PageTableFlags::PRESENT) {
Err(FrameError::FrameNotPresent)
} else if self.flags().contains(PageTableFlags::HUGE_PAGE) {
Err(FrameError::HugeFrame)
} else {
Ok(PhysFrame::containing_address(self.addr()))
}
}
/// Map the entry to the specified physical address with the specified flags.
pub fn set_addr(&mut self, addr: PhysAddr, flags: PageTableFlags) {
assert!(addr.is_aligned(Size4KiB::SIZE));
self.entry = (addr.as_u64()) | flags.bits();
}
/// Map the entry to the specified physical frame with the specified flags.
pub fn set_frame(&mut self, frame: PhysFrame, flags: PageTableFlags) {
assert!(!flags.contains(PageTableFlags::HUGE_PAGE));
self.set_addr(frame.start_address(), flags)
}
/// Sets the flags of this entry.
pub fn set_flags(&mut self, flags: PageTableFlags) {
self.entry = self.addr().as_u64() | flags.bits();
}
}
impl fmt::Debug for PageTableEntry {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut f = f.debug_struct("PageTableEntry");
f.field("addr", &self.addr());
f.field("flags", &self.flags());
f.finish()
}
}
bitflags! {
/// Possible flags for a page table entry.
pub struct PageTableFlags: u64 {
const ALL = 0xffffffff_ffffffff;
const TYPE_MASK = 3 << 0;
const TYPE_FAULT = 0 << 0;
const TYPE_PAGE = 3 << 0;
const TABLE_BIT = 1 << 1;
const PRESENT = 1 << 0;
const USER_ACCESSIBLE = 1 << 6; /* AP[1] */
const RDONLY = 1 << 7; /* AP[2] */
const SHARED = 3 << 8; /* SH[1:0], inner shareable */
const BIT_8 = 1 << 8;
const BIT_9 = 1 << 9;
/*
pub const ATTRIB_SH_NON_SHAREABLE: usize = 0x0 << 8;
pub const ATTRIB_SH_OUTER_SHAREABLE: usize = 0x2 << 8;
pub const ATTRIB_SH_INNER_SHAREABLE: usize = 0x3 << 8;
*/
const ACCESSED = 1 << 10; /* AF, Access Flag */
const NONE_GLOBAL = 1 << 11; /* None Global */
const GLOBAL = (!(1 << 11));
const DBM = 1 << 51; /* Dirty Bit Management */
const WRITE = 1 << 51; /* DBM */
const CONT = 1 << 52; /* Contiguous range */
const PXN = 1 << 53; /* Privileged XN */
const UXN = 1 << 54; /* User XN */
const HYP_XN = 1 << 54; /* HYP XN */
const DIRTY = 1 << 55;
const SWAPPED = 1 << 56;
const HUGE_PAGE = 1 << 57;
const PROT_NONE = 1 << 58;
}
}
/// The number of entries in a page table.
const ENTRY_COUNT: usize = 512;
/// Represents a page table.
///
/// Always page-sized.
///
/// This struct implements the `Index` and `IndexMut` traits, so the entries can be accessed
/// through index operations. For example, `page_table[15]` returns the 15th page table entry.
#[repr(transparent)]
pub struct PageTable {
entries: [PageTableEntry; ENTRY_COUNT],
}
impl PageTable {
/// Clears all entries.
pub fn zero(&mut self) {
for entry in self.entries.iter_mut() {
entry.set_unused();
}
}
}
impl Index<usize> for PageTable {
type Output = PageTableEntry;
fn index(&self, index: usize) -> &Self::Output {
&self.entries[index]
}
}
impl IndexMut<usize> for PageTable {
fn index_mut(&mut self, index: usize) -> &mut Self::Output {
&mut self.entries[index]
}
}
impl Index<u9> for PageTable {
type Output = PageTableEntry;
fn index(&self, index: u9) -> &Self::Output {
&self.entries[usize_from(u16::from(index))]
}
}
impl IndexMut<u9> for PageTable {
fn index_mut(&mut self, index: u9) -> &mut Self::Output {
&mut self.entries[usize_from(u16::from(index))]
}
}
impl fmt::Debug for PageTable {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.entries[..].fmt(f)
}
}

@ -0,0 +1,623 @@
#![cfg(target_arch = "aarch64")]
use asm::tlb_invalidate;
use paging::{
frame_alloc::FrameAllocator,
page_table::{FrameError, PageTable, PageTableEntry, PageTableFlags},
NotGiantPageSize, Page, PageSize, PhysFrame, Size1GiB, Size2MiB, Size4KiB,
};
use paging::page_table::PageTableFlags as Flags;
use asm::ttbr0_el1_read;
use ux::u9;
use addr::{PhysAddr, VirtAddr};
/// This type represents a page whose mapping has changed in the page table.
///
/// The old mapping might be still cached in the translation lookaside buffer (TLB), so it needs
/// to be flushed from the TLB before it's accessed. This type is returned from function that
/// change the mapping of a page to ensure that the TLB flush is not forgotten.
#[derive(Debug)]
#[must_use = "Page Table changes must be flushed or ignored."]
pub struct MapperFlush<S: PageSize>(Page<S>);
impl<S: PageSize> MapperFlush<S> {
/// Create a new flush promise
fn new(page: Page<S>) -> Self {
MapperFlush(page)
}
/// Flush the page from the TLB to ensure that the newest mapping is used.
pub fn flush(self) {
tlb_invalidate();
}
/// Don't flush the TLB and silence the “must be used” warning.
pub fn ignore(self) {}
}
/// A trait for common page table operations.
pub trait Mapper<S: PageSize> {
/// Creates a new mapping in the page table.
///
/// This function might need additional physical frames to create new page tables. These
/// frames are allocated from the `allocator` argument. At most three frames are required.
fn map_to<A>(
&mut self,
page: Page<S>,
frame: PhysFrame<S>,
flags: PageTableFlags,
allocator: &mut A,
) -> Result<MapperFlush<S>, MapToError>
where
A: FrameAllocator<Size4KiB>;
/// Removes a mapping from the page table and returns the frame that used to be mapped.
///
/// Note that no page tables or pages are deallocated.
fn unmap(&mut self, page: Page<S>) -> Result<(PhysFrame<S>, MapperFlush<S>), UnmapError>;
/// Updates the flags of an existing mapping.
fn update_flags(
&mut self,
page: Page<S>,
flags: PageTableFlags,
) -> Result<MapperFlush<S>, FlagUpdateError>;
/// Return the frame that the specified page is mapped to.
fn translate_page(&self, page: Page<S>) -> Option<PhysFrame<S>>;
/// Maps the given frame to the virtual page with the same address.
fn identity_map<A>(
&mut self,
frame: PhysFrame<S>,
flags: PageTableFlags,
allocator: &mut A,
) -> Result<MapperFlush<S>, MapToError>
where
A: FrameAllocator<Size4KiB>,
S: PageSize,
Self: Mapper<S>,
{
let page = Page::containing_address(VirtAddr::new(frame.start_address().as_u64()));
self.map_to(page, frame, flags, allocator)
}
}
/// A recursive page table is a last level page table with an entry mapped to the table itself.
///
/// This recursive mapping allows accessing all page tables in the hierarchy:
///
/// - To access the level 4 page table, we “loop“ (i.e. follow the recursively mapped entry) four
/// times.
/// - To access a level 3 page table, we “loop” three times and then use the level 4 index.
/// - To access a level 2 page table, we “loop” two times, then use the level 4 index, then the
/// level 3 index.
/// - To access a level 1 page table, we “loop” once, then use the level 4 index, then the
/// level 3 index, then the level 2 index.
///
/// This struct implements the `Mapper` trait.
#[derive(Debug)]
pub struct RecursivePageTable<'a> {
p4: &'a mut PageTable,
recursive_index: u9,
}
/// An error indicating that the given page table is not recursively mapped.
///
/// Returned from `RecursivePageTable::new`.
#[derive(Debug)]
pub struct NotRecursivelyMapped;
/// This error is returned from `map_to` and similar methods.
#[derive(Debug)]
pub enum MapToError {
/// An additional frame was needed for the mapping process, but the frame allocator
/// returned `None`.
FrameAllocationFailed,
/// An upper level page table entry has the `HUGE_PAGE` flag set, which means that the
/// given page is part of an already mapped huge page.
ParentEntryHugePage,
/// The given page is already mapped to a physical frame.
PageAlreadyMapped,
}
/// An error indicating that an `unmap` call failed.
#[derive(Debug)]
pub enum UnmapError {
/// An upper level page table entry has the `HUGE_PAGE` flag set, which means that the
/// given page is part of a huge page and can't be freed individually.
ParentEntryHugePage,
/// The given page is not mapped to a physical frame.
PageNotMapped,
/// The page table entry for the given page points to an invalid physical address.
InvalidFrameAddress(PhysAddr),
}
/// An error indicating that an `update_flags` call failed.
#[derive(Debug)]
pub enum FlagUpdateError {
/// The given page is not mapped to a physical frame.
PageNotMapped,
}
impl<'a> RecursivePageTable<'a> {
/// Creates a new RecursivePageTable from the passed level 4 PageTable.
///
/// The page table must be recursively mapped, that means:
///
/// - The page table must have one recursive entry, i.e. an entry that points to the table
/// itself.
/// - The reference must use that “loop”, i.e. be of the form `0o_xxx_xxx_xxx_xxx_0000`
/// where `xxx` is the recursive entry.
/// - The page table must be active, i.e. the CR3 register must contain its physical address.
///
/// Otherwise `Err(NotRecursivelyMapped)` is returned.
pub fn new(table: &'a mut PageTable) -> Result<Self, NotRecursivelyMapped> {
let page = Page::containing_address(VirtAddr::new(table as *const _ as u64));
let recursive_index = page.p4_index();
if page.p3_index() != recursive_index
|| page.p2_index() != recursive_index
|| page.p1_index() != recursive_index
{
return Err(NotRecursivelyMapped);
}
if Ok(ttbr0_el1_read().0) != table[recursive_index].frame() {
return Err(NotRecursivelyMapped);
}
Ok(RecursivePageTable {
p4: table,
recursive_index,
})
}
/// Creates a new RecursivePageTable without performing any checks.
///
/// The `recursive_index` parameter must be the index of the recursively mapped entry.
pub unsafe fn new_unchecked(table: &'a mut PageTable, recursive_index: u9) -> Self {
RecursivePageTable {
p4: table,
recursive_index,
}
}
/// Internal helper function to create the page table of the next level if needed.
///
/// If the passed entry is unused, a new frame is allocated from the given allocator, zeroed,
/// and the entry is updated to that address. If the passed entry is already mapped, the next
/// table is returned directly.
///
/// The `next_page_table` page must be the page of the next page table in the hierarchy.
///
/// Returns `MapToError::FrameAllocationFailed` if the entry is unused and the allocator
/// returned `None`. Returns `MapToError::ParentEntryHugePage` if the `HUGE_PAGE` flag is set
/// in the passed entry.
unsafe fn create_next_table<'b, A>(
entry: &'b mut PageTableEntry,
next_table_page: Page,
allocator: &mut A,
) -> Result<&'b mut PageTable, MapToError>
where
A: FrameAllocator<Size4KiB>,
{
/// This inner function is used to limit the scope of `unsafe`.
///
/// This is a safe function, so we need to use `unsafe` blocks when we do something unsafe.
fn inner<'b, A>(
entry: &'b mut PageTableEntry,
next_table_page: Page,
allocator: &mut A,
) -> Result<&'b mut PageTable, MapToError>
where
A: FrameAllocator<Size4KiB>,
{
let created;
if entry.is_unused() {
if let Some(frame) = allocator.alloc() {
entry.set_frame(frame, Flags::PRESENT | Flags::WRITE);
created = true;
} else {
return Err(MapToError::FrameAllocationFailed);
}
} else {
created = false;
}
if entry.flags().contains(Flags::HUGE_PAGE) {
return Err(MapToError::ParentEntryHugePage);
}
let page_table_ptr = next_table_page.start_address().as_mut_ptr();
let page_table: &mut PageTable = unsafe { &mut *(page_table_ptr) };
if created {
page_table.zero();
}
Ok(page_table)
}
inner(entry, next_table_page, allocator)
}
}
impl<'a> Mapper<Size1GiB> for RecursivePageTable<'a> {
fn map_to<A>(
&mut self,
page: Page<Size1GiB>,
frame: PhysFrame<Size1GiB>,
flags: PageTableFlags,
allocator: &mut A,
) -> Result<MapperFlush<Size1GiB>, MapToError>
where
A: FrameAllocator<Size4KiB>,
{
let p4 = &mut self.p4;
let p3_page = p3_page(page, self.recursive_index);
let p3 = unsafe { Self::create_next_table(&mut p4[page.p4_index()], p3_page, allocator)? };
if !p3[page.p3_index()].is_unused() {
return Err(MapToError::PageAlreadyMapped);
}
p3[page.p3_index()].set_addr(frame.start_address(), flags | Flags::HUGE_PAGE);
Ok(MapperFlush::new(page))
}
fn unmap(
&mut self,
page: Page<Size1GiB>,
) -> Result<(PhysFrame<Size1GiB>, MapperFlush<Size1GiB>), UnmapError> {
let p4 = &mut self.p4;
let p4_entry = &p4[page.p4_index()];
p4_entry.frame().map_err(|err| match err {
FrameError::FrameNotPresent => UnmapError::PageNotMapped,
FrameError::HugeFrame => UnmapError::ParentEntryHugePage,
})?;
let p3 = unsafe { &mut *(p3_ptr(page, self.recursive_index)) };
let p3_entry = &mut p3[page.p3_index()];
let flags = p3_entry.flags();
if !flags.contains(PageTableFlags::PRESENT) {
return Err(UnmapError::PageNotMapped);
}
if !flags.contains(PageTableFlags::HUGE_PAGE) {
return Err(UnmapError::ParentEntryHugePage);
}
let frame = PhysFrame::from_start_address(p3_entry.addr())
.map_err(|()| UnmapError::InvalidFrameAddress(p3_entry.addr()))?;
p3_entry.set_unused();
Ok((frame, MapperFlush::new(page)))
}
fn update_flags(
&mut self,
page: Page<Size1GiB>,
flags: PageTableFlags,
) -> Result<MapperFlush<Size1GiB>, FlagUpdateError> {
let p4 = &mut self.p4;
if p4[page.p4_index()].is_unused() {
return Err(FlagUpdateError::PageNotMapped);
}
let p3 = unsafe { &mut *(p3_ptr(page, self.recursive_index)) };
if p3[page.p3_index()].is_unused() {
return Err(FlagUpdateError::PageNotMapped);
}
p3[page.p3_index()].set_flags(flags | Flags::HUGE_PAGE);
Ok(MapperFlush::new(page))
}
fn translate_page(&self, page: Page<Size1GiB>) -> Option<PhysFrame<Size1GiB>> {
let p4 = &self.p4;
if p4[page.p4_index()].is_unused() {
return None;
}
let p3 = unsafe { &*(p3_ptr(page, self.recursive_index)) };
let p3_entry = &p3[page.p3_index()];
if p3_entry.is_unused() {
return None;
}
PhysFrame::from_start_address(p3_entry.addr()).ok()
}
}
impl<'a> Mapper<Size2MiB> for RecursivePageTable<'a> {
fn map_to<A>(
&mut self,
page: Page<Size2MiB>,
frame: PhysFrame<Size2MiB>,
flags: PageTableFlags,
allocator: &mut A,
) -> Result<MapperFlush<Size2MiB>, MapToError>
where
A: FrameAllocator<Size4KiB>,
{
let p4 = &mut self.p4;
let p3_page = p3_page(page, self.recursive_index);
let p3 = unsafe { Self::create_next_table(&mut p4[page.p4_index()], p3_page, allocator)? };
let p2_page = p2_page(page, self.recursive_index);
let p2 = unsafe { Self::create_next_table(&mut p3[page.p3_index()], p2_page, allocator)? };
if !p2[page.p2_index()].is_unused() {
return Err(MapToError::PageAlreadyMapped);
}
p2[page.p2_index()].set_addr(frame.start_address(), flags | Flags::HUGE_PAGE);
Ok(MapperFlush::new(page))
}
fn unmap(
&mut self,
page: Page<Size2MiB>,
) -> Result<(PhysFrame<Size2MiB>, MapperFlush<Size2MiB>), UnmapError> {
let p4 = &mut self.p4;
let p4_entry = &p4[page.p4_index()];
p4_entry.frame().map_err(|err| match err {
FrameError::FrameNotPresent => UnmapError::PageNotMapped,
FrameError::HugeFrame => UnmapError::ParentEntryHugePage,
})?;
let p3 = unsafe { &mut *(p3_ptr(page, self.recursive_index)) };
let p3_entry = &p3[page.p3_index()];
p3_entry.frame().map_err(|err| match err {
FrameError::FrameNotPresent => UnmapError::PageNotMapped,
FrameError::HugeFrame => UnmapError::ParentEntryHugePage,
})?;
let p2 = unsafe { &mut *(p2_ptr(page, self.recursive_index)) };
let p2_entry = &mut p2[page.p2_index()];
let flags = p2_entry.flags();
if !flags.contains(PageTableFlags::PRESENT) {
return Err(UnmapError::PageNotMapped);
}
if !flags.contains(PageTableFlags::HUGE_PAGE) {
return Err(UnmapError::ParentEntryHugePage);
}
let frame = PhysFrame::from_start_address(p2_entry.addr())
.map_err(|()| UnmapError::InvalidFrameAddress(p2_entry.addr()))?;
p2_entry.set_unused();
Ok((frame, MapperFlush::new(page)))
}
fn update_flags(
&mut self,
page: Page<Size2MiB>,
flags: PageTableFlags,
) -> Result<MapperFlush<Size2MiB>, FlagUpdateError> {
let p4 = &mut self.p4;
if p4[page.p4_index()].is_unused() {
return Err(FlagUpdateError::PageNotMapped);
}
let p3 = unsafe { &mut *(p3_ptr(page, self.recursive_index)) };
if p3[page.p3_index()].is_unused() {
return Err(FlagUpdateError::PageNotMapped);
}
let p2 = unsafe { &mut *(p2_ptr(page, self.recursive_index)) };
if p2[page.p2_index()].is_unused() {
return Err(FlagUpdateError::PageNotMapped);
}
p2[page.p2_index()].set_flags(flags | Flags::HUGE_PAGE);
Ok(MapperFlush::new(page))
}
fn translate_page(&self, page: Page<Size2MiB>) -> Option<PhysFrame<Size2MiB>> {
let p4 = &self.p4;
if p4[page.p4_index()].is_unused() {
return None;
}
let p3 = unsafe { &*(p3_ptr(page, self.recursive_index)) };
let p3_entry = &p3[page.p3_index()];
if p3_entry.is_unused() {
return None;
}
let p2 = unsafe { &*(p2_ptr(page, self.recursive_index)) };
let p2_entry = &p2[page.p2_index()];
if p2_entry.is_unused() {
return None;
}
PhysFrame::from_start_address(p2_entry.addr()).ok()
}
}
impl<'a> Mapper<Size4KiB> for RecursivePageTable<'a> {
fn map_to<A>(
&mut self,
page: Page<Size4KiB>,
frame: PhysFrame<Size4KiB>,
flags: PageTableFlags,
allocator: &mut A,
) -> Result<MapperFlush<Size4KiB>, MapToError>
where
A: FrameAllocator<Size4KiB>,
{
let p4 = &mut self.p4;
let p3_page = p3_page(page, self.recursive_index);
let p3 = unsafe { Self::create_next_table(&mut p4[page.p4_index()], p3_page, allocator)? };
let p2_page = p2_page(page, self.recursive_index);
let p2 = unsafe { Self::create_next_table(&mut p3[page.p3_index()], p2_page, allocator)? };
let p1_page = p1_page(page, self.recursive_index);
let p1 = unsafe { Self::create_next_table(&mut p2[page.p2_index()], p1_page, allocator)? };
if !p1[page.p1_index()].is_unused() {
return Err(MapToError::PageAlreadyMapped);
}
p1[page.p1_index()].set_frame(frame, flags);
Ok(MapperFlush::new(page))
}
fn unmap(
&mut self,
page: Page<Size4KiB>,
) -> Result<(PhysFrame<Size4KiB>, MapperFlush<Size4KiB>), UnmapError> {
let p4 = &mut self.p4;
let p4_entry = &p4[page.p4_index()];
p4_entry.frame().map_err(|err| match err {
FrameError::FrameNotPresent => UnmapError::PageNotMapped,
FrameError::HugeFrame => UnmapError::ParentEntryHugePage,
})?;
let p3 = unsafe { &mut *(p3_ptr(page, self.recursive_index)) };
let p3_entry = &p3[page.p3_index()];
p3_entry.frame().map_err(|err| match err {
FrameError::FrameNotPresent => UnmapError::PageNotMapped,
FrameError::HugeFrame => UnmapError::ParentEntryHugePage,
})?;
let p2 = unsafe { &mut *(p2_ptr(page, self.recursive_index)) };
let p2_entry = &p2[page.p2_index()];
p2_entry.frame().map_err(|err| match err {
FrameError::FrameNotPresent => UnmapError::PageNotMapped,
FrameError::HugeFrame => UnmapError::ParentEntryHugePage,
})?;
let p1 = unsafe { &mut *(p1_ptr(page, self.recursive_index)) };
let p1_entry = &mut p1[page.p1_index()];
let frame = p1_entry.frame().map_err(|err| match err {
FrameError::FrameNotPresent => UnmapError::PageNotMapped,
FrameError::HugeFrame => UnmapError::ParentEntryHugePage,
})?;
p1_entry.set_unused();
Ok((frame, MapperFlush::new(page)))
}
fn update_flags(
&mut self,
page: Page<Size4KiB>,
flags: PageTableFlags,
) -> Result<MapperFlush<Size4KiB>, FlagUpdateError> {
let p4 = &mut self.p4;
if p4[page.p4_index()].is_unused() {
return Err(FlagUpdateError::PageNotMapped);
}
let p3 = unsafe { &mut *(p3_ptr(page, self.recursive_index)) };
if p3[page.p3_index()].is_unused() {
return Err(FlagUpdateError::PageNotMapped);
}
let p2 = unsafe { &mut *(p2_ptr(page, self.recursive_index)) };
if p2[page.p2_index()].is_unused() {
return Err(FlagUpdateError::PageNotMapped);
}
let p1 = unsafe { &mut *(p1_ptr(page, self.recursive_index)) };
if p1[page.p1_index()].is_unused() {
return Err(FlagUpdateError::PageNotMapped);
}
p1[page.p1_index()].set_flags(flags);
Ok(MapperFlush::new(page))
}
fn translate_page(&self, page: Page<Size4KiB>) -> Option<PhysFrame<Size4KiB>> {
let p4 = &self.p4;
if p4[page.p4_index()].is_unused() {
return None;
}
let p3 = unsafe { &*(p3_ptr(page, self.recursive_index)) };
let p3_entry = &p3[page.p3_index()];
if p3_entry.is_unused() {
return None;
}
let p2 = unsafe { &*(p2_ptr(page, self.recursive_index)) };
let p2_entry = &p2[page.p2_index()];
if p2_entry.is_unused() {
return None;
}
let p1 = unsafe { &*(p1_ptr(page, self.recursive_index)) };
let p1_entry = &p1[page.p1_index()];
if p1_entry.is_unused() {
return None;
}
PhysFrame::from_start_address(p1_entry.addr()).ok()
}
}
fn p3_ptr<S: PageSize>(page: Page<S>, recursive_index: u9) -> *mut PageTable {
p3_page(page, recursive_index).start_address().as_mut_ptr()
}
fn p3_page<S: PageSize>(page: Page<S>, recursive_index: u9) -> Page {
Page::from_page_table_indices(
recursive_index,
recursive_index,
recursive_index,
page.p4_index(),
)
}
fn p2_ptr<S: NotGiantPageSize>(page: Page<S>, recursive_index: u9) -> *mut PageTable {
p2_page(page, recursive_index).start_address().as_mut_ptr()
}
fn p2_page<S: NotGiantPageSize>(page: Page<S>, recursive_index: u9) -> Page {
Page::from_page_table_indices(
recursive_index,
recursive_index,
page.p4_index(),
page.p3_index(),
)
}
fn p1_ptr(page: Page<Size4KiB>, recursive_index: u9) -> *mut PageTable {
p1_page(page, recursive_index).start_address().as_mut_ptr()
}
fn p1_page(page: Page<Size4KiB>, recursive_index: u9) -> Page {
Page::from_page_table_indices(
recursive_index,
page.p4_index(),
page.p3_index(),
page.p2_index(),
)
}

@ -0,0 +1,31 @@
/*
* Copyright (c) 2018 by the author(s)
*
* =============================================================================
*
* Licensed under either of
* - Apache License, Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
* - MIT License (http://opensource.org/licenses/MIT)
* at your option.
*
* =============================================================================
*
* Author(s):
* - Andre Richter <andre.o.richter@gmail.com>
*/
//! Counter-timer Frequency register - EL0
//!
//! This register is provided so that software can discover the frequency of the
//! system counter. It must be programmed with this value as part of system
//! initialization. The value of the register is not interpreted by hardware.
use register::cpu::RegisterReadOnly;
pub struct Reg;
impl RegisterReadOnly<u32, ()> for Reg {
sys_coproc_read_raw!(u32, "CNTFRQ_EL0");
}
pub static CNTFRQ_EL0: Reg = Reg {};

@ -0,0 +1,75 @@
/*
* Copyright (c) 2018 by the author(s)
*
* =============================================================================
*
* Licensed under either of
* - Apache License, Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
* - MIT License (http://opensource.org/licenses/MIT)
* at your option.
*
* =============================================================================
*
* Author(s):
* - Andre Richter <andre.o.richter@gmail.com>
*/
//! Counter-timer Hypervisor Control register - EL2
//!
//! Controls the generation of an event stream from the physical counter, and
//! access from Non-secure EL1 to the physical counter and the Non-secure EL1
//! physical timer.
use register::cpu::RegisterReadWrite;
// When HCR_EL2.E2H == 0:
// TODO: Figure out how we can differentiate depending on HCR_EL2.E2H state
//
// For now, implement the HCR_EL2.E2H == 0 version
register_bitfields! {u32,
CNTHCTL_EL2 [
/// Traps Non-secure EL0 and EL1 accesses to the physical timer
/// registers to EL2.
///
/// 0 From AArch64 state: Non-secure EL0 and EL1 accesses to the
/// CNTP_CTL_EL0, CNTP_CVAL_EL0, and CNTP_TVAL_EL0 are trapped to EL2,
/// unless it is trapped by CNTKCTL_EL1.EL0PTEN.
///
/// From AArch32 state: Non-secure EL0 and EL1 accesses to the
/// CNTP_CTL, CNTP_CVAL, and CNTP_TVAL are trapped to EL2, unless it
/// is trapped by CNTKCTL_EL1.EL0PTEN or CNTKCTL.PL0PTEN.
///
/// 1 This control does not cause any instructions to be trapped.
///
/// If EL3 is implemented and EL2 is not implemented, behavior is as if
/// this bit is 1 other than for the purpose of a direct read.
EL1PCEN OFFSET(1) NUMBITS(1) [],
/// Traps Non-secure EL0 and EL1 accesses to the physical counter
/// register to EL2.
///
/// 0 From AArch64 state: Non-secure EL0 and EL1 accesses to the
/// CNTPCT_EL0 are trapped to EL2, unless it is trapped by
/// CNTKCTL_EL1.EL0PCTEN.
///
/// From AArch32 state: Non-secure EL0 and EL1 accesses to the CNTPCT
/// are trapped to EL2, unless it is trapped by CNTKCTL_EL1.EL0PCTEN
/// or CNTKCTL.PL0PCTEN.
///
/// 1 This control does not cause any instructions to be trapped.
///
/// If EL3 is implemented and EL2 is not implemented, behavior is as if
/// this bit is 1 other than for the purpose of a direct read.
EL1PCTEN OFFSET(0) NUMBITS(1) []
]
}
pub struct Reg;
impl RegisterReadWrite<u32, CNTHCTL_EL2::Register> for Reg {
sys_coproc_read_raw!(u32, "CNTHCTL_EL2");
sys_coproc_write_raw!(u32, "CNTHCTL_EL2");
}
#[allow(non_upper_case_globals)]
pub static CNTHCTL_EL2: Reg = Reg {};

@ -0,0 +1,62 @@
/*
* Copyright (c) 2018 by the author(s)
*
* =============================================================================
*
* Licensed under either of
* - Apache License, Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
* - MIT License (http://opensource.org/licenses/MIT)
* at your option.
*
* =============================================================================
*
* Author(s):
* - Andre Richter <andre.o.richter@gmail.com>
*/
//! Counter-timer Physical Timer Control register - EL0
//!
//! Control register for the EL1 physical timer.
use register::cpu::RegisterReadWrite;
register_bitfields! {u32,
CNTP_CTL_EL0 [
/// The status of the timer. This bit indicates whether the timer
/// condition is met:
///
/// 0 Timer condition is not met.
/// 1 Timer condition is met.
///
/// When the value of the ENABLE bit is 1, ISTATUS indicates whether the
/// timer condition is met. ISTATUS takes no account of the value of the
/// IMASK bit. If the value of ISTATUS is 1 and the value of IMASK is 0
/// then the timer interrupt is asserted.
///
/// When the value of the ENABLE bit is 0, the ISTATUS field is UNKNOWN.
///
/// This bit is read-only.
ISTATUS OFFSET(2) NUMBITS(1) [],
/// Timer interrupt mask bit. Permitted values are:
///
/// 0 Timer interrupt is not masked by the IMASK bit.
/// 1 Timer interrupt is masked by the IMASK bit.
IMASK OFFSET(1) NUMBITS(1) [],
/// Enables the timer. Permitted values are:
///
/// 0 Timer disabled.
/// 1 Timer enabled.
ENABLE OFFSET(0) NUMBITS(1) []
]
}
pub struct Reg;
impl RegisterReadWrite<u32, CNTP_CTL_EL0::Register> for Reg {
sys_coproc_read_raw!(u32, "CNTP_CTL_EL0");
sys_coproc_write_raw!(u32, "CNTP_CTL_EL0");
}
pub static CNTP_CTL_EL0: Reg = Reg {};

@ -0,0 +1,30 @@
/*
* Copyright (c) 2018 by the author(s)
*
* =============================================================================
*
* Licensed under either of
* - Apache License, Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
* - MIT License (http://opensource.org/licenses/MIT)
* at your option.
*
* =============================================================================
*
* Author(s):
* - Andre Richter <andre.o.richter@gmail.com>
*/
//! Counter-timer Physical Timer TimerValue register - EL0
//!
//! Holds the timer value for the EL1 physical timer.
use register::cpu::RegisterReadWrite;
pub struct Reg;
impl RegisterReadWrite<u32, ()> for Reg {
sys_coproc_read_raw!(u32, "CNTP_TVAL_EL0");
sys_coproc_write_raw!(u32, "CNTP_TVAL_EL0");
}
pub static CNTP_TVAL_EL0: Reg = Reg {};

@ -0,0 +1,29 @@
/*
* Copyright (c) 2018 by the author(s)
*
* =============================================================================
*
* Licensed under either of
* - Apache License, Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
* - MIT License (http://opensource.org/licenses/MIT)
* at your option.
*
* =============================================================================
*
* Author(s):
* - Andre Richter <andre.o.richter@gmail.com>
*/
//! Counter-timer Physical Count register - EL0
//!
//! Holds the 64-bit physical count value.
use register::cpu::RegisterReadOnly;
pub struct Reg;
impl RegisterReadOnly<u64, ()> for Reg {
sys_coproc_read_raw!(u64, "CNTPCT_EL0");
}
pub static CNTPCT_EL0: Reg = Reg {};

@ -0,0 +1,32 @@
/*
* Copyright (c) 2018 by the author(s)
*
* =============================================================================
*
* Licensed under either of
* - Apache License, Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
* - MIT License (http://opensource.org/licenses/MIT)
* at your option.
*
* =============================================================================
*
* Author(s):
* - Andre Richter <andre.o.richter@gmail.com>
*/
//! Counter-timer Virtual Offset register - EL2
//!
//! Holds the 64-bit virtual offset. This is the offset between the physical
//! count value visible in CNTPCT_EL0 and the virtual count value visible in
//! CNTVCT_EL0.
use register::cpu::RegisterReadWrite;
pub struct Reg;
impl RegisterReadWrite<u64, ()> for Reg {
sys_coproc_read_raw!(u64, "CNTVOFF_EL2");
sys_coproc_write_raw!(u64, "CNTVOFF_EL2");
}
pub static CNTVOFF_EL2: Reg = Reg {};

@ -0,0 +1,52 @@
/*
* Copyright (c) 2018 by the author(s)
*
* =============================================================================
*
* Licensed under either of
* - Apache License, Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
* - MIT License (http://opensource.org/licenses/MIT)
* at your option.
*
* =============================================================================
*
* Author(s):
* - Andre Richter <andre.o.richter@gmail.com>
*/
//! Current Exception Level
//!
//! Holds the current Exception level.
use register::cpu::RegisterReadOnly;
register_bitfields! {u32,
CurrentEL [
/// Current Exception level. Possible values of this field are:
///
/// 00 EL0
/// 01 EL1
/// 10 EL2
/// 11 EL3
///
/// When the HCR_EL2.NV bit is 1, Non-secure EL1 read accesses to the
/// CurrentEL register return the value of 0x2 in this field.
///
/// This field resets to a value that is architecturally UNKNOWN.
EL OFFSET(2) NUMBITS(2) [
EL0 = 0,
EL1 = 1,
EL2 = 2,
EL3 = 3
]
]
}
pub struct Reg;
impl RegisterReadOnly<u32, CurrentEL::Register> for Reg {
sys_coproc_read_raw!(u32, "CurrentEL");
}
#[allow(non_upper_case_globals)]
pub static CurrentEL: Reg = Reg {};

@ -0,0 +1,90 @@
/*
* Copyright (c) 2018 by the author(s)
*
* =============================================================================
*
* Licensed under either of
* - Apache License, Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
* - MIT License (http://opensource.org/licenses/MIT)
* at your option.
*
* =============================================================================
*
* Author(s):
* - Andre Richter <andre.o.richter@gmail.com>
*/
//! Interrupt Mask Bits
//!
//! Allows access to the interrupt mask bits.
use register::cpu::RegisterReadWrite;
register_bitfields! {u32,
DAIF [
/// Process state D mask. The possible values of this bit are:
///
/// 0 Watchpoint, Breakpoint, and Software Step exceptions targeted at
/// the current Exception level are not masked.
///
/// 1 Watchpoint, Breakpoint, and Software Step exceptions targeted at
/// the current Exception level are masked.
///
/// When the target Exception level of the debug exception is higher
/// than the current Exception level, the exception is not masked by
/// this bit.
///
/// When this register has an architecturally-defined reset value, this
/// field resets to 1.
D OFFSET(9) NUMBITS(1) [
Unmasked = 0,
Masked = 1
],
/// SError interrupt mask bit. The possible values of this bit are:
///
/// 0 Exception not masked.
/// 1 Exception masked.
///
/// When this register has an architecturally-defined reset value, this
/// field resets to 1 .
A OFFSET(8) NUMBITS(1) [
Unmasked = 0,
Masked = 1
],
/// IRQ mask bit. The possible values of this bit are:
///
/// 0 Exception not masked.
/// 1 Exception masked.
///
/// When this register has an architecturally-defined reset value, this
/// field resets to 1 .
I OFFSET(7) NUMBITS(1) [
Unmasked = 0,
Masked = 1
],
/// FIQ mask bit. The possible values of this bit are:
///
/// 0 Exception not masked.
/// 1 Exception masked.
///
/// When this register has an architecturally-defined reset value, this
/// field resets to 1 .
F OFFSET(6) NUMBITS(1) [
Unmasked = 0,
Masked = 1
]
]
}
pub struct Reg;
impl RegisterReadWrite<u32, DAIF::Register> for Reg {
sys_coproc_read_raw!(u32, "DAIF");
sys_coproc_write_raw!(u32, "DAIF");
}
pub static DAIF: Reg = Reg {};

@ -0,0 +1,30 @@
/*
* Copyright (c) 2018 by the author(s)
*
* =============================================================================
*
* Licensed under either of
* - Apache License, Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
* - MIT License (http://opensource.org/licenses/MIT)
* at your option.
*
* =============================================================================
*
* Author(s):
* - Andre Richter <andre.o.richter@gmail.com>
*/
//! Exception Link Register - EL2
//!
//! When taking an exception to EL2, holds the address to return to.
use register::cpu::RegisterReadWrite;
pub struct Reg;
impl RegisterReadWrite<u64, ()> for Reg {
sys_coproc_read_raw!(u64, "ELR_EL2");
sys_coproc_write_raw!(u64, "ELR_EL2");
}
pub static ELR_EL2: Reg = Reg {};

@ -0,0 +1,123 @@
/*
* Copyright (c) 2018 by the author(s)
*
* =============================================================================
*
* Licensed under either of
* - Apache License, Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
* - MIT License (http://opensource.org/licenses/MIT)
* at your option.
*
* =============================================================================
*
* Author(s):
* - Andre Richter <andre.o.richter@gmail.com>
*/
//! Hypervisor Configuration Register - EL2
//!
//! Provides configuration controls for virtualization, including defining
//! whether various Non-secure operations are trapped to EL2.
use register::cpu::RegisterReadWrite;
register_bitfields! {u64,
HCR_EL2 [
/// Execution state control for lower Exception levels:
///
/// 0 Lower levels are all AArch32.
/// 1 The Execution state for EL1 is AArch64. The Execution state for
/// EL0 is determined by the current value of PSTATE.nRW when
/// executing at EL0.
///
/// If all lower Exception levels cannot use AArch32 then this bit is
/// RAO/WI.
///
/// In an implementation that includes EL3, when SCR_EL3.NS==0, the PE
/// behaves as if this bit has the same value as the SCR_EL3.RW bit for
/// all purposes other than a direct read or write access of HCR_EL2.
///
/// The RW bit is permitted to be cached in a TLB.
///
/// When ARMv8.1-VHE is implemented, and the value of HCR_EL2.{E2H, TGE}
/// is {1, 1}, this field behaves as 1 for all purposes other than a
/// direct read of the value of this bit.
RW OFFSET(31) NUMBITS(1) [
AllLowerELsAreAarch32 = 0,
EL1IsAarch64 = 1
],
/// Default Cacheability.
///
/// 0 This control has no effect on the Non-secure EL1&0 translation
/// regime.
///
/// 1 In Non-secure state:
/// - When EL1 is using AArch64, the PE behaves as if the value of
/// the SCTLR_EL1.M field is 0 for all purposes other than
/// returning the value of a direct read of SCTLR_EL1.
///
/// - When EL1 is using AArch32, the PE behaves as if the value of
/// the SCTLR.M field is 0 for all purposes other than returning
/// the value of a direct read of SCTLR.
///
/// - The PE behaves as if the value of the HCR_EL2.VM field is 1
/// for all purposes other than returning the value of a direct
/// read of HCR_EL2.
///
/// - The memory type produced by stage 1 of the EL1&0 translation
/// regime is Normal Non-Shareable, Inner Write-Back Read-Allocate
/// Write-Allocate, Outer Write-Back Read-Allocate Write-Allocate.
///
/// This field has no effect on the EL2, EL2&0, and EL3 translation
/// regimes.
///
/// This field is permitted to be cached in a TLB.
///
/// In an implementation that includes EL3, when the value of SCR_EL3.NS
/// is 0 the PE behaves as if this field is 0 for all purposes other
/// than a direct read or write access of HCR_EL2.
///
/// When ARMv8.1-VHE is implemented, and the value of HCR_EL2.{E2H, TGE}
/// is {1, 1}, this field behaves as 0 for all purposes other than a
/// direct read of the value of this field.
DC OFFSET(12) NUMBITS(1) [],
/// Set/Way Invalidation Override. Causes Non-secure EL1 execution of
/// the data cache invalidate by set/way instructions to perform a data
/// cache clean and invalidate by set/way:
///
/// 0 This control has no effect on the operation of data cache
/// invalidate by set/way instructions.
///
/// 1 Data cache invalidate by set/way instructions perform a data cache
/// clean and invalidate by set/way.
///
/// When the value of this bit is 1:
///
/// AArch32: DCISW performs the same invalidation as a DCCISW
/// instruction.
///
/// AArch64: DC ISW performs the same invalidation as a DC CISW
/// instruction.
///
/// This bit can be implemented as RES 1.
///
/// In an implementation that includes EL3, when the value of SCR_EL3.NS
/// is 0 the PE behaves as if this field is 0 for all purposes other
/// than a direct read or write access of HCR_EL2.
///
/// When HCR_EL2.TGE is 1, the PE ignores the value of this field for
/// all purposes other than a direct read of this field.
SWIO OFFSET(1) NUMBITS(1) []
]
}
pub struct Reg;
impl RegisterReadWrite<u64, HCR_EL2::Register> for Reg {
sys_coproc_read_raw!(u64, "HCR_EL2");
sys_coproc_write_raw!(u64, "HCR_EL2");
}
pub static HCR_EL2: Reg = Reg {};

@ -0,0 +1,82 @@
/*
* Copyright (c) 2018 by the author(s)
*
* =============================================================================
*
* Licensed under either of
* - Apache License, Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
* - MIT License (http://opensource.org/licenses/MIT)
* at your option.
*
* =============================================================================
*
* Author(s):
* - Andre Richter <andre.o.richter@gmail.com>
*/
//! AArch64 Memory Model Feature Register 0 - EL1
//!
//! Provides information about the implemented memory model and memory
//! management support in AArch64 state.
use register::cpu::RegisterReadOnly;
register_bitfields! {u64,
ID_AA64MMFR0_EL1 [
/// Support for 4KiB memory translation granule size. Defined values
/// are:
///
/// 0000 4KiB granule supported.
/// 1111 4KiB granule not supported.
///
/// All other values are reserved.
TGran4 OFFSET(28) NUMBITS(4) [
Supported = 0b0000,
NotSupported = 0b1111
],
/// Support for 64KiB memory translation granule size. Defined values
/// are:
///
/// 0000 64KiB granule supported.
/// 1111 64KiB granule not supported.
///
/// All other values are reserved.
TGran64 OFFSET(24) NUMBITS(4) [
Supported = 0b0000,
NotSupported = 0b1111
],
/// Physical Address range supported. Defined values are:
///
/// 0000 32 bits, 4GiB.
/// 0001 36 bits, 64GiB.
/// 0010 40 bits, 1TiB.
/// 0011 42 bits, 4TiB.
/// 0100 44 bits, 16TiB.
/// 0101 48 bits, 256TiB.
/// 0110 52 bits, 4PiB.
///
/// All other values are reserved.
///
/// The value 0110 is permitted only if the implementation includes
/// ARMv8.2-LPA, otherwise it is reserved.
PARange OFFSET(0) NUMBITS(4) [
Bits_32 = 0b0000,
Bits_36 = 0b0001,
Bits_40 = 0b0010,
Bits_42 = 0b0011,
Bits_44 = 0b0100,
Bits_48 = 0b0101,
Bits_52 = 0b0110
]
]
}
pub struct Reg;
impl RegisterReadOnly<u64, ID_AA64MMFR0_EL1::Register> for Reg {
sys_coproc_read_raw!(u64, "ID_AA64MMFR0_EL1");
}
pub static ID_AA64MMFR0_EL1: Reg = Reg {};

@ -0,0 +1,85 @@
/*
* Copyright (c) 2018 by the author(s)
*
* =============================================================================
*
* Licensed under either of
* - Apache License, Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
* - MIT License (http://opensource.org/licenses/MIT)
* at your option.
*
* =============================================================================
*
* Author(s):
* - Andre Richter <andre.o.richter@gmail.com>
*/
macro_rules! __read_raw {
($width:ty, $asm_instr:tt, $asm_reg_name:tt) => {
/// Reads the raw bits of the CPU register.
#[inline]
fn get(&self) -> $width {
match () {
#[cfg(target_arch = "aarch64")]
() => {
let reg;
unsafe {
asm!(concat!($asm_instr, " $0, ", $asm_reg_name) : "=r"(reg) ::: "volatile");
}
reg
}
#[cfg(not(target_arch = "aarch64"))]
() => unimplemented!(),
}
}
};
}
macro_rules! __write_raw {
($width:ty, $asm_instr:tt, $asm_reg_name:tt) => {
/// Writes raw bits to the CPU register.
#[cfg_attr(not(target_arch = "aarch64"), allow(unused_variables))]
#[inline]
fn set(&self, value: $width) {
match () {
#[cfg(target_arch = "aarch64")]
() => {
unsafe {
asm!(concat!($asm_instr, " ", $asm_reg_name, ", $0") :: "r"(value) :: "volatile")
}
}
#[cfg(not(target_arch = "aarch64"))]
() => unimplemented!(),
}
}
};
}
/// Raw read from system coprocessor registers
macro_rules! sys_coproc_read_raw {
($width:ty, $asm_reg_name:tt) => {
__read_raw!($width, "mrs", $asm_reg_name);
};
}
/// Raw write to system coprocessor registers
macro_rules! sys_coproc_write_raw {
($width:ty, $asm_reg_name:tt) => {
__write_raw!($width, "msr", $asm_reg_name);
};
}
/// Raw read from (ordinary) registers
macro_rules! read_raw {
($width:ty, $asm_reg_name:tt) => {
__read_raw!($width, "mov", $asm_reg_name);
};
}
/// Raw write to (ordinary) registers
macro_rules! write_raw {
($width:ty, $asm_reg_name:tt) => {
__write_raw!($width, "mov", $asm_reg_name);
};
}

@ -0,0 +1,105 @@
/*
* Copyright (c) 2018 by the author(s)
*
* =============================================================================
*
* Licensed under either of
* - Apache License, Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
* - MIT License (http://opensource.org/licenses/MIT)
* at your option.
*
* =============================================================================
*
* Author(s):
* - Andre Richter <andre.o.richter@gmail.com>
*/
//! Memory Attribute Indirection Register - EL1
//!
//! Provides the memory attribute encodings corresponding to the possible
//! AttrIndx values in a Long-descriptor format translation table entry for
//! stage 1 translations at EL1.
use register::cpu::RegisterReadWrite;
register_bitfields! {u64,
MAIR_EL1 [
// TODO: Macrofy this
/// Attribute 7
Attr7_HIGH OFFSET(60) NUMBITS(4) [],
Attr7_LOW_DEVICE OFFSET(56) NUMBITS(4) [],
Attr7_LOW_MEMORY OFFSET(56) NUMBITS(4) [],
/// Attribute 6
Attr6_HIGH OFFSET(52) NUMBITS(4) [],
Attr6_LOW_DEVICE OFFSET(48) NUMBITS(4) [],
Attr6_LOW_MEMORY OFFSET(48) NUMBITS(4) [],
/// Attribute 5
Attr5_HIGH OFFSET(44) NUMBITS(4) [],
Attr5_LOW_DEVICE OFFSET(40) NUMBITS(4) [],
Attr5_LOW_MEMORY OFFSET(40) NUMBITS(4) [],
/// Attribute 4
Attr4_HIGH OFFSET(36) NUMBITS(4) [],
Attr4_LOW_DEVICE OFFSET(32) NUMBITS(4) [],
Attr4_LOW_MEMORY OFFSET(32) NUMBITS(4) [],
/// Attribute 3
Attr3_HIGH OFFSET(28) NUMBITS(4) [],
Attr3_LOW_DEVICE OFFSET(24) NUMBITS(4) [],
Attr3_LOW_MEMORY OFFSET(24) NUMBITS(4) [],
/// Attribute 2
Attr2_HIGH OFFSET(20) NUMBITS(4) [
Device = 0b0000,
Memory_OuterNonCacheable = 0b0100,
Memory_OuterWriteBack_NonTransient_ReadAlloc_WriteAlloc = 0b1111
],
Attr2_LOW_DEVICE OFFSET(16) NUMBITS(4) [
Device_nGnRE = 0b0100
],
Attr2_LOW_MEMORY OFFSET(16) NUMBITS(4) [
InnerNonCacheable = 0b0100,
InnerWriteBack_NonTransient_ReadAlloc_WriteAlloc = 0b1111
],
/// Attribute 1
Attr1_HIGH OFFSET(12) NUMBITS(4) [
Device = 0b0000,
Memory_OuterNonCacheable = 0b0100,
Memory_OuterWriteBack_NonTransient_ReadAlloc_WriteAlloc = 0b1111
],
Attr1_LOW_DEVICE OFFSET(8) NUMBITS(4) [
Device_nGnRE = 0b0100
],
Attr1_LOW_MEMORY OFFSET(8) NUMBITS(4) [
InnerNonCacheable = 0b0100,
InnerWriteBack_NonTransient_ReadAlloc_WriteAlloc = 0b1111
],
/// Attribute 0
Attr0_HIGH OFFSET(4) NUMBITS(4) [
Device = 0b0000,
Memory_OuterNonCacheable = 0b0100,
Memory_OuterWriteBack_NonTransient_ReadAlloc_WriteAlloc = 0b1111
],
Attr0_LOW_DEVICE OFFSET(0) NUMBITS(4) [
Device_nGnRE = 0b0100
],
Attr0_LOW_MEMORY OFFSET(0) NUMBITS(4) [
InnerNonCacheable = 0b0100,
InnerWriteBack_NonTransient_ReadAlloc_WriteAlloc = 0b1111
]
]
}
pub struct Reg;
impl RegisterReadWrite<u64, MAIR_EL1::Register> for Reg {
sys_coproc_read_raw!(u64, "MAIR_EL1");
sys_coproc_write_raw!(u64, "MAIR_EL1");
}
pub static MAIR_EL1: Reg = Reg {};

@ -0,0 +1,51 @@
//! Processor core registers
#[macro_use]
mod macros;
mod cntfrq_el0;
mod cnthctl_el2;
mod cntp_ctl_el0;
mod cntp_tval_el0;
mod cntpct_el0;
mod cntvoff_el2;
mod currentel;
mod daif;
mod elr_el2;
mod hcr_el2;
mod id_aa64mmfr0_el1;
mod mair_el1;
mod mpidr_el1;
mod sctlr_el1;
mod sp;
mod sp_el0;
mod sp_el1;
mod spsel;
mod spsr_el2;
mod tcr_el1;
mod ttbr0_el1;
// Export only the R/W traits and the static reg definitions
pub use register::cpu::*;
pub use self::cntfrq_el0::CNTFRQ_EL0;
pub use self::cnthctl_el2::CNTHCTL_EL2;
pub use self::cntp_ctl_el0::CNTP_CTL_EL0;
pub use self::cntp_tval_el0::CNTP_TVAL_EL0;
pub use self::cntpct_el0::CNTPCT_EL0;
pub use self::cntvoff_el2::CNTVOFF_EL2;
pub use self::currentel::CurrentEL;
pub use self::daif::DAIF;
pub use self::elr_el2::ELR_EL2;
pub use self::hcr_el2::HCR_EL2;
pub use self::id_aa64mmfr0_el1::ID_AA64MMFR0_EL1;
pub use self::mair_el1::MAIR_EL1;
pub use self::mpidr_el1::MPIDR_EL1;
pub use self::sctlr_el1::SCTLR_EL1;
pub use self::sp::SP;
pub use self::sp_el0::SP_EL0;
pub use self::sp_el1::SP_EL1;
pub use self::spsel::SPSel;
pub use self::spsr_el2::SPSR_EL2;
pub use self::tcr_el1::TCR_EL1;
pub use self::ttbr0_el1::TTBR0_EL1;

@ -0,0 +1,30 @@
/*
* Copyright (c) 2018 by the author(s)
*
* =============================================================================
*
* Licensed under either of
* - Apache License, Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
* - MIT License (http://opensource.org/licenses/MIT)
* at your option.
*
* =============================================================================
*
* Author(s):
* - Andre Richter <andre.o.richter@gmail.com>
*/
//! Multiprocessor Affinity Register - EL1
//!
//! In a multiprocessor system, provides an additional PE
//! identification mechanism for scheduling purposes.
use register::cpu::RegisterReadOnly;
pub struct Reg;
impl RegisterReadOnly<u64, ()> for Reg {
sys_coproc_read_raw!(u64, "MPIDR_EL1");
}
pub static MPIDR_EL1: Reg = Reg {};

@ -0,0 +1,103 @@
/*
* Copyright (c) 2018 by the author(s)
*
* =============================================================================
*
* Licensed under either of
* - Apache License, Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
* - MIT License (http://opensource.org/licenses/MIT)
* at your option.
*
* =============================================================================
*
* Author(s):
* - Andre Richter <andre.o.richter@gmail.com>
*/
//! System Control Register - EL1
//!
//! Provides top level control of the system, including its memory system, at
//! EL1 and EL0.
use register::cpu::RegisterReadWrite;
register_bitfields! {u32,
SCTLR_EL1 [
/// Instruction access Cacheability control, for accesses at EL0 and
/// EL1:
///
/// 0 All instruction access to Normal memory from EL0 and EL1 are
/// Non-cacheable for all levels of instruction and unified cache.
///
/// If the value of SCTLR_EL1.M is 0, instruction accesses from stage
/// 1 of the EL1&0 translation regime are to Normal, Outer Shareable,
/// Inner Non-cacheable, Outer Non-cacheable memory.
///
/// 1 This control has no effect on the Cacheability of instruction
/// access to Normal memory from EL0 and EL1.
///
/// If the value of SCTLR_EL1.M is 0, instruction accesses from stage
/// 1 of the EL1&0 translation regime are to Normal, Outer Shareable,
/// Inner Write-Through, Outer Write-Through memory.
///
/// When the value of the HCR_EL2.DC bit is 1, then instruction access
/// to Normal memory from EL0 and EL1 are Cacheable regardless of the
/// value of the SCTLR_EL1.I bit.
///
/// When ARMv8.1-VHE is implemented, and the value of HCR_EL2.{E2H, TGE}
/// is {1, 1}, this bit has no effect on the PE.
///
/// When this register has an architecturally-defined reset value, this
/// field resets to 0.
I OFFSET(12) NUMBITS(1) [
NonCacheable = 0,
Cacheable = 1
],
/// Cacheability control, for data accesses.
///
/// 0 All data access to Normal memory from EL0 and EL1, and all Normal
/// memory accesses to the EL1&0 stage 1 translation tables, are
/// Non-cacheable for all levels of data and unified cache.
///
/// 1 This control has no effect on the Cacheability of:
/// - Data access to Normal memory from EL0 and EL1.
/// - Normal memory accesses to the EL1&0 stage 1 translation
/// tables.
///
/// When the value of the HCR_EL2.DC bit is 1, the PE ignores
/// SCLTR.C. This means that Non-secure EL0 and Non-secure EL1 data
/// accesses to Normal memory are Cacheable.
///
/// When ARMv8.1-VHE is implemented, and the value of HCR_EL2.{E2H, TGE}
/// is {1, 1}, this bit has no effect on the PE.
///
/// When this register has an architecturally-defined reset value, this
/// field resets to 0.
C OFFSET(2) NUMBITS(1) [
NonCacheable = 0,
Cacheable = 1
],
/// MMU enable for EL1 and EL0 stage 1 address translation. Possible
/// values of this bit are:
///
/// 0 EL1 and EL0 stage 1 address translation disabled.
/// See the SCTLR_EL1.I field for the behavior of instruction accesses
/// to Normal memory.
/// 1 EL1 and EL0 stage 1 address translation enabled.
M OFFSET(0) NUMBITS(1) [
Disable = 0,
Enable = 1
]
]
}
pub struct Reg;
impl RegisterReadWrite<u32, SCTLR_EL1::Register> for Reg {
sys_coproc_read_raw!(u32, "SCTLR_EL1");
sys_coproc_write_raw!(u32, "SCTLR_EL1");
}
pub static SCTLR_EL1: Reg = Reg {};

@ -0,0 +1,28 @@
/*
* Copyright (c) 2018 by the author(s)
*
* =============================================================================
*
* Licensed under either of
* - Apache License, Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
* - MIT License (http://opensource.org/licenses/MIT)
* at your option.
*
* =============================================================================
*
* Author(s):
* - Andre Richter <andre.o.richter@gmail.com>
*/
//! The stack pointer
use register::cpu::RegisterReadWrite;
pub struct Reg;
impl RegisterReadWrite<u64, ()> for Reg {
read_raw!(u64, "sp");
write_raw!(u64, "sp");
}
pub static SP: Reg = Reg {};

@ -0,0 +1,31 @@
/*
* Copyright (c) 2018 by the author(s)
*
* =============================================================================
*
* Licensed under either of
* - Apache License, Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
* - MIT License (http://opensource.org/licenses/MIT)
* at your option.
*
* =============================================================================
*
* Author(s):
* - Andre Richter <andre.o.richter@gmail.com>
*/
//! The stack pointer - EL0
//!
//! Holds the stack pointer associated with EL0. At higher Exception levels,
//! this is used as the current stack pointer when the value of SPSel.SP is 0.
use register::cpu::RegisterReadWrite;
pub struct Reg;
impl RegisterReadWrite<u64, ()> for Reg {
sys_coproc_read_raw!(u64, "SP_EL0");
sys_coproc_write_raw!(u64, "SP_EL0");
}
pub static SP_EL0: Reg = Reg {};

@ -0,0 +1,36 @@
/*
* Copyright (c) 2018 by the author(s)
*
* =============================================================================
*
* Licensed under either of
* - Apache License, Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
* - MIT License (http://opensource.org/licenses/MIT)
* at your option.
*
* =============================================================================
*
* Author(s):
* - Andre Richter <andre.o.richter@gmail.com>
*/
//! The stack pointer - EL1
//!
//! Holds the stack pointer associated with EL1. When executing at EL1, the
//! value of SPSel.SP determines the current stack pointer:
//!
//! SPSel.SP | current stack pointer
//! --------------------------------
//! 0 | SP_EL0
//! 1 | SP_EL1
use register::cpu::RegisterReadWrite;
pub struct Reg;
impl RegisterReadWrite<u64, ()> for Reg {
sys_coproc_read_raw!(u64, "SP_EL1");
sys_coproc_write_raw!(u64, "SP_EL1");
}
pub static SP_EL1: Reg = Reg {};

@ -0,0 +1,48 @@
/*
* Copyright (c) 2018 by the author(s)
*
* =============================================================================
*
* Licensed under either of
* - Apache License, Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
* - MIT License (http://opensource.org/licenses/MIT)
* at your option.
*
* =============================================================================
*
* Author(s):
* - Andre Richter <andre.o.richter@gmail.com>
*/
//! Stack Pointer Select
//!
//! Allows the Stack Pointer to be selected between SP_EL0 and SP_ELx.
use register::cpu::RegisterReadWrite;
register_bitfields! {u32,
SPSel [
/// Stack pointer to use. Possible values of this bit are:
///
/// 0 Use SP_EL0 at all Exception levels.
/// 1 Use SP_ELx for Exception level ELx.
///
/// When this register has an architecturally-defined reset value, this
/// field resets to 1.
SP OFFSET(0) NUMBITS(1) [
EL0 = 0,
ELx = 1
]
]
}
pub struct Reg;
impl RegisterReadWrite<u32, SPSel::Register> for Reg {
sys_coproc_read_raw!(u32, "SPSEL");
sys_coproc_write_raw!(u32, "SPSEL");
}
#[allow(non_upper_case_globals)]
pub static SPSel: Reg = Reg {};

@ -0,0 +1,106 @@
/*
* Copyright (c) 2018 by the author(s)
*
* =============================================================================
*
* Licensed under either of
* - Apache License, Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
* - MIT License (http://opensource.org/licenses/MIT)
* at your option.
*
* =============================================================================
*
* Author(s):
* - Andre Richter <andre.o.richter@gmail.com>
*/
//! Saved Program Status Register - EL2
//!
//! Holds the saved process state when an exception is taken to EL2.
use register::cpu::RegisterReadWrite;
register_bitfields! {u32,
SPSR_EL2 [
/// Process state D mask. The possible values of this bit are:
///
/// 0 Watchpoint, Breakpoint, and Software Step exceptions targeted at
/// the current Exception level are not masked.
///
/// 1 Watchpoint, Breakpoint, and Software Step exceptions targeted at
/// the current Exception level are masked.
///
/// When the target Exception level of the debug exception is higher
/// than the current Exception level, the exception is not masked by
/// this bit.
D OFFSET(9) NUMBITS(1) [
Unmasked = 0,
Masked = 1
],
/// SError interrupt mask bit. The possible values of this bit are:
///
/// 0 Exception not masked.
/// 1 Exception masked.
A OFFSET(8) NUMBITS(1) [
Unmasked = 0,
Masked = 1
],
/// IRQ mask bit. The possible values of this bit are:
///
/// 0 Exception not masked.
/// 1 Exception masked.
I OFFSET(7) NUMBITS(1) [
Unmasked = 0,
Masked = 1
],
/// FIQ mask bit. The possible values of this bit are:
///
/// 0 Exception not masked.
/// 1 Exception masked.
F OFFSET(6) NUMBITS(1) [
Unmasked = 0,
Masked = 1
],
/// AArch64 state (Exception level and selected SP) that an exception
/// was taken from. The possible values are:
///
/// M[3:0] | State
/// --------------
/// 0b0000 | EL0t
/// 0b0100 | EL1t
/// 0b0101 | EL1h
/// 0b1000 | EL2t
/// 0b1001 | EL2h
///
/// Other values are reserved, and returning to an Exception level that
/// is using AArch64 with a reserved value in this field is treated as
/// an illegal exception return.
///
/// The bits in this field are interpreted as follows:
/// - M[3:2] holds the Exception Level.
/// - M[1] is unused and is RES 0 for all non-reserved values.
/// - M[0] is used to select the SP:
/// - 0 means the SP is always SP0.
/// - 1 means the exception SP is determined by the EL.
M OFFSET(0) NUMBITS(4) [
EL0t = 0b0000,
EL1t = 0b0100,
EL1h = 0b0101,
EL2t = 0b1000,
EL2h = 0b1001
]
]
}
pub struct Reg;
impl RegisterReadWrite<u32, SPSR_EL2::Register> for Reg {
sys_coproc_read_raw!(u32, "SPSR_EL2");
sys_coproc_write_raw!(u32, "SPSR_EL2");
}
pub static SPSR_EL2: Reg = Reg {};

@ -0,0 +1,178 @@
/*
* Copyright (c) 2018 by the author(s)
*
* =============================================================================
*
* Licensed under either of
* - Apache License, Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
* - MIT License (http://opensource.org/licenses/MIT)
* at your option.
*
* =============================================================================
*
* Author(s):
* - Andre Richter <andre.o.richter@gmail.com>
*/
//! Translation Control Register - EL1
//!
//! The control register for stage 1 of the EL1&0 translation regime.
use register::cpu::RegisterReadWrite;
register_bitfields! {u64,
TCR_EL1 [
/// Top Byte ignored - indicates whether the top byte of an address is
/// used for address match for the TTBR0_EL1 region, or ignored and used
/// for tagged addresses. Defined values are:
///
/// 0 Top Byte used in the address calculation.
/// 1 Top Byte ignored in the address calculation.
TBI0 OFFSET(37) NUMBITS(1) [
Used = 0,
Ignored = 1
],
/// Intermediate Physical Address Size.
///
/// 000 32 bits, 4GiB.
/// 001 36 bits, 64GiB.
/// 010 40 bits, 1TiB.
/// 011 42 bits, 4TiB.
/// 100 44 bits, 16TiB.
/// 101 48 bits, 256TiB.
/// 110 52 bits, 4PiB
///
/// Other values are reserved.
///
/// The reserved values behave in the same way as the 101 or 110
/// encoding, but software must not rely on this property as the
/// behavior of the reserved values might change in a future revision of
/// the architecture.
///
/// The value 110 is permitted only if ARMv8.2-LPA is implemented and
/// the translation granule size is 64KiB.
///
/// In an implementation that supports 52-bit PAs, if the value of this
/// field is not 110 , then bits[51:48] of every translation table base
/// address for the stage of translation controlled by TCR_EL1 are 0000
/// .
IPS OFFSET(32) NUMBITS(3) [
Bits_32 = 0b000,
Bits_36 = 0b001,
Bits_40 = 0b010,
Bits_42 = 0b011,
Bits_44 = 0b100,
Bits_48 = 0b101,
Bits_52 = 0b110
],
/// Granule size for the TTBR0_EL1.
///
/// 00 4KiB
/// 01 64KiB
/// 10 16KiB
///
/// Other values are reserved.
///
/// If the value is programmed to either a reserved value, or a size
/// that has not been implemented, then the hardware will treat the
/// field as if it has been programmed to an IMPLEMENTATION DEFINED
/// choice of the sizes that has been implemented for all purposes other
/// than the value read back from this register.
///
/// It is IMPLEMENTATION DEFINED whether the value read back is the
/// value programmed or the value that corresponds to the size chosen.
TG0 OFFSET(14) NUMBITS(2) [
KiB_4 = 0b00,
KiB_16 = 0b10,
KiB_64 = 0b01
],
/// Shareability attribute for memory associated with translation table
/// walks using TTBR0_EL1.
///
/// 00 Non-shareable
/// 10 Outer Shareable
/// 11 Inner Shareable
///
/// Other values are reserved.
SH0 OFFSET(12) NUMBITS(2) [
None = 0b00,
Outer = 0b10,
Inner = 0b11
],
/// Outer cacheability attribute for memory associated with translation
/// table walks using TTBR0_EL1.
///
/// 00 Normal memory, Outer Non-cacheable
///
/// 01 Normal memory, Outer Write-Back Read-Allocate Write-Allocate
/// Cacheable
///
/// 10 Normal memory, Outer Write-Through Read-Allocate No
/// Write-Allocate Cacheable
///
/// 11 Normal memory, Outer Write-Back Read-Allocate No Write-Allocate
/// Cacheable
ORGN0 OFFSET(10) NUMBITS(2) [
NonCacheable = 0b00,
WriteBack_ReadAlloc_WriteAlloc_Cacheable = 0b01,
WriteThrough_ReadAlloc_NoWriteAlloc_Cacheable = 0b10,
WriteBack_ReadAlloc_NoWriteAlloc_Cacheable = 0b11
],
/// Inner cacheability attribute for memory associated with translation
/// table walks using TTBR0_EL1.
///
/// 00 Normal memory, Inner Non-cacheable
///
/// 01 Normal memory, Inner Write-Back Read-Allocate Write-Allocate
/// Cacheable
///
/// 10 Normal memory, Inner Write-Through Read-Allocate No
/// Write-Allocate Cacheable
///
/// 11 Normal memory, Inner Write-Back Read-Allocate No Write-Allocate
/// Cacheable
IRGN0 OFFSET(8) NUMBITS(2) [
NonCacheable = 0b00,
WriteBack_ReadAlloc_WriteAlloc_Cacheable = 0b01,
WriteThrough_ReadAlloc_NoWriteAlloc_Cacheable = 0b10,
WriteBack_ReadAlloc_NoWriteAlloc_Cacheable = 0b11
],
/// Translation table walk disable for translations using
/// TTBR0_EL1. This bit controls whether a translation table walk is
/// performed on a TLB miss, for an address that is translated using
/// TTBR0_EL1. The encoding of this bit is:
///
/// 0 Perform translation table walks using TTBR0_EL1.
///
/// 1 A TLB miss on an address that is translated using TTBR0_EL1
/// generates a Translation fault. No translation table walk is
/// performed.
EPD0 OFFSET(7) NUMBITS(1) [
EnableTTBR0Walks = 0,
DisableTTBR0Walks = 1
],
/// The size offset of the memory region addressed by TTBR0_EL1. The
/// region size is 2^(64-T0SZ) bytes.
///
/// The maximum and minimum possible values for T0SZ depend on the level
/// of translation table and the memory translation granule size, as
/// described in the AArch64 Virtual Memory System Architecture chapter.
T0SZ OFFSET(0) NUMBITS(6) []
]
}
pub struct Reg;
impl RegisterReadWrite<u64, TCR_EL1::Register> for Reg {
sys_coproc_read_raw!(u64, "TCR_EL1");
sys_coproc_write_raw!(u64, "TCR_EL1");
}
pub static TCR_EL1: Reg = Reg {};

@ -0,0 +1,56 @@
/*
* Copyright (c) 2018 by the author(s)
*
* =============================================================================
*
* Licensed under either of
* - Apache License, Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
* - MIT License (http://opensource.org/licenses/MIT)
* at your option.
*
* =============================================================================
*
* Author(s):
* - Andre Richter <andre.o.richter@gmail.com>
*/
//! Translation Table Base Register 0 - EL1
//!
//! Holds the base address of the translation table for the initial lookup for
//! stage 1 of the translation of an address from the lower VA range in the
//! EL1&0 translation regime, and other information for this translation regime.
use register::cpu::RegisterReadWrite;
register_bitfields! {u64,
TTBR0_EL1 [
/// An ASID for the translation table base address. The TCR_EL1.A1 field
/// selects either TTBR0_EL1.ASID or TTBR1_EL1.ASID.
///
/// If the implementation has only 8 bits of ASID, then the upper 8 bits
/// of this field are RES 0.
ASID OFFSET(48) NUMBITS(16) [],
/// Translation table base address
BADDR OFFSET(1) NUMBITS(47) [],
/// Common not Private
CnP OFFSET(0) NUMBITS(1) []
]
}
pub struct Reg;
impl RegisterReadWrite<u64, TTBR0_EL1::Register> for Reg {
sys_coproc_read_raw!(u64, "TTBR0_EL1");
sys_coproc_write_raw!(u64, "TTBR0_EL1");
}
impl Reg {
#[inline]
pub fn set_baddr(&self, addr: u64) {
self.write(TTBR0_EL1::BADDR.val(addr >> 1));
}
}
pub static TTBR0_EL1: Reg = Reg {};

14
kernel/Cargo.lock generated

@ -1,3 +1,16 @@
[[package]]
name = "aarch64"
version = "0.1.0"
dependencies = [
"bare-metal 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
"bit_field 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
"bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
"os_bootinfo 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"register 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"usize_conversions 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
"ux 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]] [[package]]
name = "atags" name = "atags"
version = "0.1.0" version = "0.1.0"
@ -243,6 +256,7 @@ dependencies = [
name = "ucore" name = "ucore"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"aarch64 0.1.0",
"atags 0.1.0", "atags 0.1.0",
"bbl 0.1.0", "bbl 0.1.0",
"bcm2837 0.1.0", "bcm2837 0.1.0",

@ -45,6 +45,7 @@ bbl = { path = "../crate/bbl" }
[target.'cfg(target_arch = "aarch64")'.dependencies] [target.'cfg(target_arch = "aarch64")'.dependencies]
cortex-a = "2.2.1" cortex-a = "2.2.1"
aarch64 = { path = "../crate/aarch64" }
atags = { path = "../crate/atags" } atags = { path = "../crate/atags" }
bcm2837 = { path = "../crate/bcm2837", features = ["use_generic_timer"] } bcm2837 = { path = "../crate/bcm2837", features = ["use_generic_timer"] }

@ -1,22 +1,78 @@
//! Memory initialization for aarch64. //! Memory initialization for aarch64.
use bit_allocator::BitAlloc;
use ucore_memory::PAGE_SIZE; use ucore_memory::PAGE_SIZE;
use memory::{FRAME_ALLOCATOR, init_heap};
use super::atags::atags::Atags; use super::atags::atags::Atags;
use super::super::HEAP_ALLOCATOR; //use super::super::HEAP_ALLOCATOR;
use aarch64::{barrier, regs::*};
use core::ops::Range;
/// Memory initialization. /// Memory initialization.
pub fn init() { pub fn init() {
let (start, end) = memory_map().expect("failed to find memory map"); /*let (start, end) = memory_map().expect("failed to find memory map");
unsafe { unsafe {
HEAP_ALLOCATOR.lock().init(start, end - start); HEAP_ALLOCATOR.lock().init(start, end - start);
} }*/
info!("memory: init end");
init_frame_allocator();
init_heap();
init_mmu();
} }
extern "C" { extern "C" {
static _end: u8; static _end: u8;
} }
fn init_frame_allocator() {
let mut ba = FRAME_ALLOCATOR.lock();
let (start, end) = memory_map().expect("failed to find memory map");
ba.insert(to_range(start, end));
info!("FrameAllocator init end");
fn to_range(start: usize, end: usize) -> Range<usize> {
let page_start = start / PAGE_SIZE;
let page_end = (end - 1) / PAGE_SIZE + 1;
page_start..page_end
}
}
fn init_mmu() {
// device.
MAIR_EL1.write(
// Attribute 1
MAIR_EL1::Attr1_HIGH::Device
+ MAIR_EL1::Attr1_LOW_DEVICE::Device_nGnRE
// Attribute 0
+ MAIR_EL1::Attr0_HIGH::Memory_OuterWriteBack_NonTransient_ReadAlloc_WriteAlloc
+ MAIR_EL1::Attr0_LOW_MEMORY::InnerWriteBack_NonTransient_ReadAlloc_WriteAlloc,
);
// Configure various settings of stage 1 of the EL1 translation regime.
let ips = ID_AA64MMFR0_EL1.read(ID_AA64MMFR0_EL1::PARange);
TCR_EL1.write(
TCR_EL1::TBI0::Ignored
+ TCR_EL1::IPS.val(ips)
+ TCR_EL1::TG0::KiB_4 // 4 KiB granule
+ TCR_EL1::SH0::Inner
+ TCR_EL1::ORGN0::WriteBack_ReadAlloc_WriteAlloc_Cacheable
+ TCR_EL1::IRGN0::WriteBack_ReadAlloc_WriteAlloc_Cacheable
+ TCR_EL1::EPD0::EnableTTBR0Walks
+ TCR_EL1::T0SZ.val(34), // Start walks at level 2
);
// Switch the MMU on.
//
// First, force all previous changes to be seen before the MMU is enabled.
unsafe { barrier::isb(barrier::SY); }
// Enable the MMU and turn on data and instruction caching.
SCTLR_EL1.modify(SCTLR_EL1::M::Enable + SCTLR_EL1::C::Cacheable + SCTLR_EL1::I::Cacheable);
// Force MMU init to complete before next instruction
unsafe { barrier::isb(barrier::SY); }
}
/// Returns the (start address, end address) of the available memory on this /// Returns the (start address, end address) of the available memory on this
/// system if it can be determined. If it cannot, `None` is returned. /// system if it can be determined. If it cannot, `None` is returned.
/// ///
@ -33,3 +89,4 @@ pub fn memory_map() -> Option<(usize, usize)> {
None None
} }

@ -1,218 +1,233 @@
//! Page table implementations for aarch64. //! Page table implementations for aarch64.
use bit_allocator::{BitAlloc};
// Depends on kernel
use memory::{active_table, alloc_frame, alloc_stack, dealloc_frame};
use ucore_memory::memory_set::*; use ucore_memory::memory_set::*;
use ucore_memory::PAGE_SIZE;
use ucore_memory::paging::*; use ucore_memory::paging::*;
use aarch64::asm::{tlb_invalidate, ttbr0_el1_read, ttbr0_el1_write};
use aarch64::{PhysAddr, VirtAddr};
use aarch64::paging::{Mapper, PageTable as Aarch64PageTable, PageTableEntry, PageTableFlags as EF, RecursivePageTable};
use aarch64::paging::{FrameAllocator, FrameDeallocator, Page, PageRange, PhysFrame as Frame, Size4KiB};
use aarch64::{regs::*};
type VirtAddr = usize; pub trait PageExt {
type PhysAddr = usize; fn of_addr(address: usize) -> Self;
fn range_of(begin: usize, end: usize) -> PageRange;
use alloc::alloc::{alloc, Layout};
use memory::{active_table, alloc_frame, alloc_stack, dealloc_frame};
/// TODO
pub struct ActivePageTable {
// TODO
} }
impl ActivePageTable { impl PageExt for Page {
/// TODO fn of_addr(address: usize) -> Self {
pub unsafe fn new() -> Self { Page::containing_address(VirtAddr::new(address as u64))
unimplemented!()
} }
fn range_of(begin: usize, end: usize) -> PageRange {
Page::range(Page::of_addr(begin), Page::of_addr(end - 1) + 1)
} }
impl PageTable for ActivePageTable {
type Entry = PageEntry;
fn map(&mut self, addr: VirtAddr, target: PhysAddr) -> &mut Self::Entry {
unimplemented!()
}
fn unmap(&mut self, addr: VirtAddr) {
unimplemented!()
}
fn get_entry(&mut self, addr: VirtAddr) -> &mut Self::Entry {
unimplemented!()
}
// For testing with mock
fn get_page_slice_mut<'a, 'b>(&'a mut self, addr: VirtAddr) -> &'b mut [u8] {
unimplemented!()
} }
fn read(&mut self, addr: VirtAddr) -> u8 { pub trait FrameExt {
unimplemented!() fn of_addr(address: usize) -> Self;
} }
fn write(&mut self, addr: VirtAddr, data: u8) { impl FrameExt for Frame {
unimplemented!() fn of_addr(address: usize) -> Self {
Frame::containing_address(PhysAddr::new(address as u64))
} }
} }
/// TODO pub struct ActivePageTable(RecursivePageTable<'static>);
pub struct PageEntry {
// TODO
}
impl Entry for PageEntry { pub struct PageEntry(PageTableEntry);
/// IMPORTANT!
/// This must be called after any change to ensure it become effective.
/// Usually this will make a flush to TLB/MMU.
fn update(&mut self) {
unimplemented!()
}
/// Will be set when accessed impl PageTable for ActivePageTable {
fn accessed(&self) -> bool { type Entry = PageEntry;
unimplemented!()
}
/// Will be set when written fn map(&mut self, addr: usize, target: usize) -> &mut PageEntry {
fn dirty(&self) -> bool { let flags = EF::PRESENT | EF::WRITE | EF::UXN;
unimplemented!() self.0.map_to(Page::of_addr(addr), Frame::of_addr(target), flags, &mut FrameAllocatorForAarch64)
.unwrap().flush();
self.get_entry(addr)
} }
/// Will PageFault when try to write page where writable=0 fn unmap(&mut self, addr: usize) {
fn writable(&self) -> bool { let (frame, flush) = self.0.unmap(Page::of_addr(addr)).unwrap();
unimplemented!() flush.flush();
} }
/// Will PageFault when try to access page where present=0 fn get_entry(&mut self, addr: usize) -> &mut PageEntry {
fn present(&self) -> bool { let entry_addr = ((addr >> 9) & 0o777_777_777_7770) | 0xffffff80_00000000;
unimplemented!() unsafe { &mut *(entry_addr as *mut PageEntry) }
} }
fn clear_accessed(&mut self) { fn get_page_slice_mut<'a, 'b>(&'a mut self, addr: usize) -> &'b mut [u8] {
unimplemented!() use core::slice;
unsafe { slice::from_raw_parts_mut((addr & !0xfffusize) as *mut u8, PAGE_SIZE) }
} }
fn clear_dirty(&mut self) { fn read(&mut self, addr: usize) -> u8 {
unimplemented!() unsafe { *(addr as *const u8) }
} }
fn set_writable(&mut self, value: bool) { fn write(&mut self, addr: usize, data: u8) {
unimplemented!() unsafe { *(addr as *mut u8) = data; }
} }
fn set_present(&mut self, value: bool) {
unimplemented!()
} }
fn target(&self) -> PhysAddr { impl ActivePageTable {
unimplemented!() pub unsafe fn new() -> Self {
ActivePageTable(RecursivePageTable::new(&mut *(0xffffffff_fffff000 as *mut _)).unwrap())
} }
fn with_temporary_map(&mut self, frame: &Frame, f: impl FnOnce(&mut ActivePageTable, &mut Aarch64PageTable)) {
fn set_target(&mut self, target: PhysAddr) { // Create a temporary page
unimplemented!() let page = Page::of_addr(0xcafebabe);
assert!(self.0.translate_page(page).is_none(), "temporary page is already mapped");
// Map it to table
self.map(page.start_address().as_u64() as usize, frame.start_address().as_u64() as usize);
// Call f
let table = unsafe { &mut *page.start_address().as_mut_ptr() };
f(self, table);
// Unmap the page
self.unmap(0xcafebabe);
} }
// For Copy-on-write extension
fn writable_shared(&self) -> bool {
unimplemented!()
} }
fn readonly_shared(&self) -> bool { impl Entry for PageEntry {
unimplemented!() fn update(&mut self) {
tlb_invalidate();
} }
fn set_shared(&mut self, writable: bool) { fn present(&self) -> bool { self.0.flags().contains(EF::PRESENT) }
unimplemented!() fn accessed(&self) -> bool { self.0.flags().contains(EF::ACCESSED) }
} fn writable(&self) -> bool { self.0.flags().contains(EF::WRITE) }
fn dirty(&self) -> bool { self.hw_dirty() && self.sw_dirty() }
fn clear_shared(&mut self) { fn clear_accessed(&mut self) { self.as_flags().remove(EF::ACCESSED); }
unimplemented!() fn clear_dirty(&mut self)
{
self.as_flags().remove(EF::DIRTY);
self.as_flags().insert(EF::RDONLY);
} }
fn set_writable(&mut self, value: bool)
// For Swap extension {
fn swapped(&self) -> bool { self.as_flags().set(EF::RDONLY, !value);
unimplemented!() self.as_flags().set(EF::WRITE, value);
} }
fn set_present(&mut self, value: bool) { self.as_flags().set(EF::PRESENT, value); }
fn set_swapped(&mut self, value: bool) { fn target(&self) -> usize { self.0.addr().as_u64() as usize }
unimplemented!() fn set_target(&mut self, target: usize) {
let flags = self.0.flags();
self.0.set_addr(PhysAddr::new(target as u64), flags);
} }
fn writable_shared(&self) -> bool { self.0.flags().contains(EF::BIT_9) }
fn user(&self) -> bool { fn readonly_shared(&self) -> bool { self.0.flags().contains(EF::BIT_9) }
unimplemented!() fn set_shared(&mut self, writable: bool) {
} let flags = self.as_flags();
flags.set(EF::BIT_8, writable);
flags.set(EF::BIT_9, writable);
}
fn clear_shared(&mut self) { self.as_flags().remove(EF::BIT_8 | EF::BIT_9); }
fn user(&self) -> bool { self.0.flags().contains(EF::USER_ACCESSIBLE) }
fn swapped(&self) -> bool { self.0.flags().contains(EF::SWAPPED) }
fn set_swapped(&mut self, value: bool) { self.as_flags().set(EF::SWAPPED, value); }
fn set_user(&mut self, value: bool) { fn set_user(&mut self, value: bool) {
unimplemented!() self.as_flags().set(EF::USER_ACCESSIBLE, value);
if value {
let mut addr = self as *const _ as usize;
for _ in 0..3 {
// Upper level entry
addr = ((addr >> 9) & 0o777_777_777_7770) | 0xffffff80_00000000;
// set USER_ACCESSIBLE
unsafe { (*(addr as *mut EF)).insert(EF::USER_ACCESSIBLE) };
} }
fn execute(&self) -> bool {
unimplemented!()
} }
fn set_execute(&mut self, value: bool) {
unimplemented!()
} }
fn execute(&self) -> bool { !self.0.flags().contains(EF::UXN) }
fn set_execute(&mut self, value: bool) { self.as_flags().set(EF::UXN, !value); }
} }
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] impl PageEntry {
pub struct MockFrame(PhysAddr); fn read_only(&self) -> bool { self.0.flags().contains(EF::RDONLY) }
fn hw_dirty(&self) -> bool { self.writable() && !self.read_only() }
impl MockFrame { fn sw_dirty(&self) -> bool { self.0.flags().contains(EF::DIRTY) }
pub fn of_addr(addr: PhysAddr) -> Self { fn as_flags(&mut self) -> &mut EF {
MockFrame(addr) unsafe { &mut *(self as *mut _ as *mut EF) }
}
pub fn start_address(&self) -> PhysAddr {
unimplemented!()
}
pub fn p2_index(&self) -> usize {
unimplemented!()
}
pub fn p1_index(&self) -> usize {
unimplemented!()
}
pub fn number(&self) -> usize {
unimplemented!()
} }
} }
/// TODO #[derive(Debug)]
pub struct InactivePageTable0 { pub struct InactivePageTable0 {
p4_frame: MockFrame, p4_frame: Frame,
} }
/// TODO
impl InactivePageTable for InactivePageTable0 { impl InactivePageTable for InactivePageTable0 {
type Active = ActivePageTable; type Active = ActivePageTable;
fn new() -> Self { fn new() -> Self {
unsafe { let mut pt = Self::new_bare();
let layout = Layout::new::<u64>(); pt.map_kernel();
let ptr = alloc(layout); pt
let frame = MockFrame::of_addr(*ptr as usize);
InactivePageTable0 { p4_frame: frame }
}
} }
fn new_bare() -> Self { fn new_bare() -> Self {
unimplemented!() let frame = Self::alloc_frame().map(|target| Frame::of_addr(target))
.expect("failed to allocate frame");
active_table().with_temporary_map(&frame, |_, table: &mut Aarch64PageTable| {
table.zero();
// set up recursive mapping for the table
table[511].set_frame(frame.clone(), EF::PRESENT | EF::WRITE);
});
InactivePageTable0 { p4_frame: frame }
} }
fn edit(&mut self, f: impl FnOnce(&mut Self::Active)) { fn edit(&mut self, f: impl FnOnce(&mut Self::Active)) {
unimplemented!() active_table().with_temporary_map(&ttbr0_el1_read().0, |active_table, p4_table: &mut Aarch64PageTable| {
let backup = p4_table[0o777].clone();
// overwrite recursive mapping
p4_table[0o777].set_frame(self.p4_frame.clone(), EF::PRESENT | EF::WRITE);
tlb_invalidate();
// execute f in the new context
f(active_table);
// restore recursive mapping to original p4 table
p4_table[0o777] = backup;
tlb_invalidate();
});
} }
unsafe fn activate(&self) { unsafe fn activate(&self) {
unimplemented!() let old_frame = ttbr0_el1_read().0;
let new_frame = self.p4_frame.clone();
debug!("switch table {:?} -> {:?}", old_frame, new_frame);
if old_frame != new_frame {
ttbr0_el1_write(new_frame);
}
} }
unsafe fn with(&self, f: impl FnOnce()) { unsafe fn with(&self, f: impl FnOnce()) {
unimplemented!() let old_frame = ttbr0_el1_read().0;
let new_frame = self.p4_frame.clone();
debug!("switch table {:?} -> {:?}", old_frame, new_frame);
if old_frame != new_frame {
ttbr0_el1_write(new_frame);
}
f();
debug!("switch table {:?} -> {:?}", new_frame, old_frame);
if old_frame != new_frame {
ttbr0_el1_write(old_frame);
}
} }
fn token(&self) -> usize { fn token(&self) -> usize {
0 self.p4_frame.start_address().as_u64() as usize // as CR3
} }
fn alloc_frame() -> Option<PhysAddr> { fn alloc_frame() -> Option<usize> {
alloc_frame() alloc_frame()
} }
fn dealloc_frame(target: PhysAddr) { fn dealloc_frame(target: usize) {
dealloc_frame(target) dealloc_frame(target)
} }
@ -220,3 +235,37 @@ impl InactivePageTable for InactivePageTable0 {
alloc_stack() alloc_stack()
} }
} }
impl InactivePageTable0 {
fn map_kernel(&mut self) {
let mut table = unsafe { &mut *(0xffffffff_fffff000 as *mut Aarch64PageTable) };
// Kernel at 0xffff_ff00_0000_0000
// Kernel stack at 0x0000_57ac_0000_0000 (defined in bootloader crate)
let e0 = table[0].clone();
self.edit(|_| {
table[0].set_addr(e0.addr(), e0.flags() & EF::GLOBAL);
//table[175].set_addr(estack.addr(), estack.flags() & EF::GLOBAL);
});
}
}
impl Drop for InactivePageTable0 {
fn drop(&mut self) {
info!("PageTable dropping: {:?}", self);
Self::dealloc_frame(self.p4_frame.start_address().as_u64() as usize);
}
}
struct FrameAllocatorForAarch64;
impl FrameAllocator<Size4KiB> for FrameAllocatorForAarch64 {
fn alloc(&mut self) -> Option<Frame> {
alloc_frame().map(|addr| Frame::of_addr(addr))
}
}
impl FrameDeallocator<Size4KiB> for FrameAllocatorForAarch64 {
fn dealloc(&mut self, frame: Frame) {
dealloc_frame(frame.start_address().as_u64() as usize);
}
}

@ -33,6 +33,8 @@ extern crate volatile;
#[cfg(target_arch = "x86_64")] #[cfg(target_arch = "x86_64")]
extern crate x86_64; extern crate x86_64;
extern crate xmas_elf; extern crate xmas_elf;
#[cfg(target_arch = "aarch64")]
extern crate aarch64;
use linked_list_allocator::LockedHeap; use linked_list_allocator::LockedHeap;

Loading…
Cancel
Save