Merge branch 'mmu' into arch-aarch64

toolchain_update
equation314 6 years ago
commit 163e092c29

@ -98,6 +98,7 @@ pub struct MemoryAttr {
user: bool,
readonly: bool,
execute: bool,
mmio: bool,
hide: bool,
}
@ -114,6 +115,10 @@ impl MemoryAttr {
self.execute = true;
self
}
pub fn mmio(mut self) -> Self {
self.mmio = true;
self
}
pub fn hide(mut self) -> Self {
self.hide = true;
self
@ -122,8 +127,9 @@ impl MemoryAttr {
if self.user { entry.set_user(true); }
if self.readonly { entry.set_writable(false); }
if self.execute { entry.set_execute(true); }
if self.mmio { entry.set_mmio(true); }
if self.hide { entry.set_present(false); }
if self.user || self.readonly || self.execute || self.hide { entry.update(); }
if self.user || self.readonly || self.execute || self.mmio || self.hide { entry.update(); }
}
}
@ -166,6 +172,9 @@ impl<T: InactivePageTable> MemorySet<T> {
pub fn iter(&self) -> impl Iterator<Item=&MemoryArea> {
self.areas.iter()
}
pub fn edit(&mut self, f: impl FnOnce(&mut T::Active)) {
self.page_table.edit(f);
}
pub unsafe fn with(&self, f: impl FnOnce()) {
self.page_table.with(f);
}
@ -223,4 +232,4 @@ impl<T: InactivePageTable> Debug for MemorySet<T> {
pub struct Stack {
pub top: usize,
pub bottom: usize,
}
}

@ -57,4 +57,6 @@ pub trait Entry {
fn set_user(&mut self, value: bool);
fn execute(&self) -> bool;
fn set_execute(&mut self, value: bool);
fn mmio(&self) -> bool;
fn set_mmio(&mut self, value: bool);
}

17
kernel/Cargo.lock generated

@ -1,3 +1,17 @@
[[package]]
name = "aarch64"
version = "2.2.2"
source = "git+https://github.com/equation314/aarch64#47bf5439f5a1379f0fef6272853cf684207a4e45"
dependencies = [
"bare-metal 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
"bit_field 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
"bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
"os_bootinfo 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"register 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"usize_conversions 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
"ux 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "atags"
version = "0.1.0"
@ -243,6 +257,7 @@ dependencies = [
name = "ucore"
version = "0.1.0"
dependencies = [
"aarch64 2.2.2 (git+https://github.com/equation314/aarch64)",
"atags 0.1.0",
"bbl 0.1.0",
"bcm2837 0.1.0",
@ -251,7 +266,6 @@ dependencies = [
"bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
"bootloader 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
"cc 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)",
"cortex-a 2.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"lazy_static 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"linked_list_allocator 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
@ -349,6 +363,7 @@ version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
[metadata]
"checksum aarch64 2.2.2 (git+https://github.com/equation314/aarch64)" = "<none>"
"checksum bare-metal 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "1bdcf9294ed648c7cd29b11db06ea244005aeef50ae8f605b1a3af2940bf8f92"
"checksum bit-vec 0.5.0 (git+https://github.com/AltSysrq/bit-vec.git)" = "<none>"
"checksum bit_field 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ed8765909f9009617974ab6b7d332625b320b33c326b1e9321382ef1999b5d56"

@ -44,7 +44,7 @@ riscv = { path = "../crate/riscv" }
bbl = { path = "../crate/bbl" }
[target.'cfg(target_arch = "aarch64")'.dependencies]
cortex-a = "2.2.1"
aarch64 = { git = "https://github.com/equation314/aarch64" }
atags = { path = "../crate/atags" }
bcm2837 = { path = "../crate/bcm2837", features = ["use_generic_timer"] }

@ -6,13 +6,13 @@ pub mod irq;
pub mod timer;
pub mod serial;
pub const IO_REMAP_BASE: usize = bcm2837::IO_BASE;
pub const IO_REMAP_END: usize = 0x40001000;
pub fn init() {
// FIXME
// assert_has_not_been_called!("board::init must be called only once");
assert_has_not_been_called!("board::init must be called only once");
unsafe {
serial::SERIAL_PORT.init();
}
serial::SERIAL_PORT.lock().init();
println!("Hello Raspberry Pi!");
}

@ -20,8 +20,7 @@ impl SerialPort {
/// Init a newly created SerialPort, can only be called once.
pub fn init(&mut self) {
// FIXME
// assert_has_not_been_called!("SerialPort::init must be called only once");
assert_has_not_been_called!("SerialPort::init must be called only once");
self.mu = Some(MiniUart::new());
}
@ -71,7 +70,4 @@ impl fmt::Write for SerialPort {
}
}
// FIXME
// pub static SERIAL_PORT: Mutex<SerialPort> = Mutex::new(SerialPort::new());
pub static mut SERIAL_PORT: SerialPort = SerialPort::new();
pub static SERIAL_PORT: Mutex<SerialPort> = Mutex::new(SerialPort::new());

@ -93,7 +93,7 @@ set_stack:
zero_bss:
# load the start address and number of bytes in BSS section
ldr x1, =__bss_start
ldr x1, =sbss
ldr x2, =__bss_length
zero_bss_loop:

@ -8,34 +8,44 @@ SECTIONS {
}
. = 0x100000; /* Load the kernel at this address. It's also kernel stack top address */
bootstacktop = .;
.text : {
stext = .;
*(.text.entry)
*(.text .text.* .gnu.linkonce.t*)
. = ALIGN(4K);
etext = .;
}
.rodata : {
srodata = .;
*(.rodata .rodata.* .gnu.linkonce.r*)
. = ALIGN(4K);
erodata = .;
}
.data : {
sdata = .;
*(.data .data.* .gnu.linkonce.d*)
. = ALIGN(4K);
edata = .;
}
.bss (NOLOAD) : {
. = ALIGN(32);
__bss_start = .;
sbss = .;
*(.bss .bss.*)
*(COMMON)
. = ALIGN(8);
__bss_end = .;
. = ALIGN(4K);
ebss = .;
}
/* end of the binary */
_end = ALIGN(8);
/* number of bytes in BSS section and complete binary */
__bss_length = (__bss_end - __bss_start);
__bss_length = (ebss - sbss);
__binary_length = (_end - _start);
/DISCARD/ : { *(.comment) *(.gnu*) *(.note*) *(.eh_frame*) }

@ -1,12 +1,20 @@
//! TrapFrame and context definitions for aarch64.
extern crate aarch64;
use spin::{Mutex};
use aarch64::barrier;
use aarch64::addr::PhysAddr;
use aarch64::paging::PhysFrame;
use aarch64::asm::{tlb_invalidate_all, ttbr_el1_write_asid};
#[repr(C)]
#[derive(Default, Debug, Copy, Clone)]
pub struct TrapFrame {
pub elr: usize,
pub spsr: usize,
pub sp: usize,
pub tpidr: usize,
pub tpidr: usize, // currently unused
// pub q0to31: [u128; 32], // disable SIMD/FP registers
pub x1to29: [usize; 29],
pub __reserved: usize,
@ -47,10 +55,14 @@ pub struct InitStack {
}
impl InitStack {
unsafe fn push_at(self, stack_top: usize) -> Context {
unsafe fn push_at(self, stack_top: usize, ttbr: usize) -> Context {
let ptr = (stack_top as *mut Self).offset(-1);
*ptr = self;
Context(ptr as usize)
Context {
stack_top: ptr as usize,
ttbr: PhysFrame::containing_address(PhysAddr::new(ttbr as u64)),
asid: Asid::default(),
}
}
}
@ -63,18 +75,20 @@ extern {
struct ContextData {
x19to29: [usize; 11],
lr: usize,
ttbr0: usize,
}
impl ContextData {
fn new(ttbr0: usize) -> Self {
ContextData { lr: __trapret as usize, ttbr0, ..ContextData::default() }
fn new() -> Self {
ContextData { lr: __trapret as usize, ..ContextData::default() }
}
}
#[derive(Debug)]
pub struct Context(usize);
pub struct Context {
stack_top: usize,
ttbr: PhysFrame,
asid: Asid,
}
impl Context {
/// Switch to another kernel thread.
@ -86,10 +100,10 @@ impl Context {
/// Pop all callee-saved registers, then return to the target.
#[naked]
#[inline(never)]
pub unsafe extern fn switch(&mut self, target: &mut Self) {
unsafe extern fn __switch(self_stack: &mut usize, target_stack: &mut usize) {
asm!(
"
mov x10, #-(13 * 8)
mov x10, #-(12 * 8)
add x8, sp, x10
str x8, [x0]
stp x19, x20, [x8], #16 // store callee-saved registers
@ -98,8 +112,6 @@ impl Context {
stp x25, x26, [x8], #16
stp x27, x28, [x8], #16
stp x29, lr, [x8], #16
mrs x9, ttbr0_el1
str x9, [x8], #8
ldr x8, [x1]
ldp x19, x20, [x8], #16 // restore callee-saved registers
@ -108,44 +120,88 @@ impl Context {
ldp x25, x26, [x8], #16
ldp x27, x28, [x8], #16
ldp x29, lr, [x8], #16
ldr x9, [x8], #8
mov sp, x8
msr ttbr0_el1, x9 // set new page directory
dsb ishst // ensure write has completed
tlbi vmalle1is // invalidate the TLB entry for the entry that changes
dsb ish // ensure TLB invalidation is complete
isb // synchronize context on this processor
str xzr, [x1]
ret"
: : : : "volatile" );
}
pub unsafe fn switch(&mut self, target: &mut Self) {
target.asid = ASID_ALLOCATOR.lock().alloc(target.asid);
// with ASID we needn't flush TLB frequently
ttbr_el1_write_asid(1, target.asid.value, target.ttbr);
barrier::dsb(barrier::ISH);
Self::__switch(&mut self.stack_top, &mut target.stack_top);
}
pub unsafe fn null() -> Self {
Context(0)
Context {
stack_top: 0,
ttbr: PhysFrame::containing_address(PhysAddr::new(0)),
asid: Asid::default(),
}
}
pub unsafe fn new_kernel_thread(entry: extern fn(usize) -> !, arg: usize, kstack_top: usize, ttbr0: usize) -> Self {
pub unsafe fn new_kernel_thread(entry: extern fn(usize) -> !, arg: usize, kstack_top: usize, ttbr: usize) -> Self {
InitStack {
context: ContextData::new(ttbr0),
context: ContextData::new(),
tf: TrapFrame::new_kernel_thread(entry, arg, kstack_top),
}.push_at(kstack_top)
}.push_at(kstack_top, ttbr)
}
pub unsafe fn new_user_thread(entry_addr: usize, ustack_top: usize, kstack_top: usize, is32: bool, ttbr0: usize) -> Self {
pub unsafe fn new_user_thread(entry_addr: usize, ustack_top: usize, kstack_top: usize, is32: bool, ttbr: usize) -> Self {
InitStack {
context: ContextData::new(ttbr0),
context: ContextData::new(),
tf: TrapFrame::new_user_thread(entry_addr, ustack_top),
}.push_at(kstack_top)
}.push_at(kstack_top, ttbr)
}
pub unsafe fn new_fork(tf: &TrapFrame, kstack_top: usize, ttbr0: usize) -> Self {
pub unsafe fn new_fork(tf: &TrapFrame, kstack_top: usize, ttbr: usize) -> Self {
InitStack {
context: ContextData::new(ttbr0),
context: ContextData::new(),
tf: {
let mut tf = tf.clone();
tf.x0 = 0;
tf
},
}.push_at(kstack_top)
}.push_at(kstack_top, ttbr)
}
}
const ASID_MASK: u16 = 0xffff;
#[derive(Debug, Copy, Clone, Default)]
#[repr(C)]
struct Asid {
value: u16,
generation: u16,
}
struct AsidAllocator(Asid);
impl AsidAllocator {
fn new() -> Self {
AsidAllocator(Asid { value: 0, generation: 1 })
}
fn alloc(&mut self, old_asid: Asid) -> Asid {
if self.0.generation == old_asid.generation {
return old_asid;
}
if self.0.value == ASID_MASK {
self.0.value = 0;
self.0.generation = self.0.generation.wrapping_add(1);
if self.0.generation == 0 {
self.0.generation += 1;
}
tlb_invalidate_all();
}
self.0.value += 1;
return self.0;
}
}
lazy_static! {
static ref ASID_ALLOCATOR: Mutex<AsidAllocator> = Mutex::new(AsidAllocator::new());
}

@ -2,7 +2,9 @@
use arch::board::irq::handle_irq;
use super::context::TrapFrame;
use super::syndrome::Syndrome;
use super::syndrome::{Fault, Syndrome};
use aarch64::regs::*;
global_asm!(include_str!("trap.S"));
global_asm!(include_str!("vector.S"));
@ -38,14 +40,22 @@ pub struct Info {
/// the trap frame for the exception.
#[no_mangle]
pub extern "C" fn rust_trap(info: Info, esr: u32, tf: &mut TrapFrame) {
let syndrome = Syndrome::from(esr);
trace!("Interrupt: {:?} from: {:?}", syndrome, info);
trace!("Interrupt: {:?}, ELR: {:#x?}", info, tf.elr);
match info.kind {
Kind::Synchronous => {
let syndrome = Syndrome::from(esr);
trace!("ESR: {:#x?}, Syndrome: {:?}", esr, syndrome);
// syndrome is only valid with sync
match syndrome {
Syndrome::Brk(brk) => handle_break(brk, tf),
Syndrome::Svc(_) => handle_syscall(tf),
Syndrome::Svc(svc) => handle_syscall(svc, tf),
Syndrome::DataAbort { kind, level: _ }
| Syndrome::InstructionAbort { kind, level: _ } => match kind {
Fault::Translation | Fault::AccessFlag | Fault::Permission => {
handle_page_fault(tf)
}
_ => ::trap::error(tf),
},
_ => ::trap::error(tf),
}
}
@ -56,12 +66,16 @@ pub extern "C" fn rust_trap(info: Info, esr: u32, tf: &mut TrapFrame) {
trace!("Interrupt end");
}
fn handle_break(num: u16, tf: &mut TrapFrame) {
fn handle_break(_num: u16, tf: &mut TrapFrame) {
// Skip the current brk instruction (ref: J1.1.2, page 6147)
tf.elr += 4;
}
fn handle_syscall(tf: &mut TrapFrame) {
fn handle_syscall(num: u16, tf: &mut TrapFrame) {
if num != 0 {
::trap::error(tf);
}
// svc instruction has been skipped in syscall (ref: J1.1.2, page 6152)
let ret = ::syscall::syscall(
tf.x1to29[7] as usize,
@ -77,3 +91,10 @@ fn handle_syscall(tf: &mut TrapFrame) {
);
tf.x0 = ret as usize;
}
fn handle_page_fault(tf: &mut TrapFrame) {
let addr = FAR_EL1.get();
trace!("\nEXCEPTION: Page Fault @ {:#x}", addr);
::trap::error(tf);
}

@ -4,7 +4,8 @@ mod handler;
mod context;
mod syndrome;
use super::cortex_a::regs::*;
use aarch64::regs::*;
pub use self::context::*;
pub use self::handler::*;

@ -4,15 +4,11 @@ use core::fmt::{Arguments, Write};
use super::board::serial::{SerialRead, SERIAL_PORT};
pub fn getchar() -> char {
// FIXME
unsafe {
SERIAL_PORT.receive() as char
}
unsafe { SERIAL_PORT.force_unlock(); }
SERIAL_PORT.lock().receive() as char
}
pub fn putfmt(fmt: Arguments) {
// FIXME
unsafe {
SERIAL_PORT.write_fmt(fmt).unwrap()
}
unsafe { SERIAL_PORT.force_unlock(); }
SERIAL_PORT.lock().write_fmt(fmt).unwrap()
}

@ -1,28 +1,131 @@
//! Memory initialization for aarch64.
use ucore_memory::PAGE_SIZE;
use memory::{FRAME_ALLOCATOR, init_heap, MemoryArea, MemoryAttr, MemorySet, Stack};
use super::atags::atags::Atags;
use super::super::HEAP_ALLOCATOR;
use aarch64::{barrier, regs::*, addr::*};
use aarch64::paging::{PhysFrame as Frame, memory_attribute::*};
/// Memory initialization.
pub fn init() {
init_frame_allocator();
init_heap();
remap_the_kernel();
info!("memory: init end");
}
/// initialize temporary paging and enable mmu immediately after boot. Serial port is disabled at this time.
pub fn init_mmu_early() {
#[repr(align(4096))]
struct PageData([u8; PAGE_SIZE]);
static PAGE_TABLE_LVL4: PageData = PageData([0; PAGE_SIZE]);
static PAGE_TABLE_LVL3: PageData = PageData([0; PAGE_SIZE]);
static PAGE_TABLE_LVL2: PageData = PageData([0; PAGE_SIZE]);
let frame_lvl4 = Frame::containing_address(PhysAddr::new(&PAGE_TABLE_LVL4 as *const _ as u64));
let frame_lvl3 = Frame::containing_address(PhysAddr::new(&PAGE_TABLE_LVL3 as *const _ as u64));
let frame_lvl2 = Frame::containing_address(PhysAddr::new(&PAGE_TABLE_LVL2 as *const _ as u64));
super::paging::setup_temp_page_table(frame_lvl4, frame_lvl3, frame_lvl2);
// device.
MAIR_EL1.write(
MAIR_EL1::Attr0.val(MairDevice::config_value()) +
MAIR_EL1::Attr1.val(MairNormal::config_value()) +
MAIR_EL1::Attr2.val(MairNormalNonCacheable::config_value()),
);
// Configure various settings of stage 1 of the EL1 translation regime.
let ips = ID_AA64MMFR0_EL1.read(ID_AA64MMFR0_EL1::PARange);
TCR_EL1.write(
TCR_EL1::TBI1::Ignored +
TCR_EL1::TBI0::Ignored +
TCR_EL1::AS::Bits_16 +
TCR_EL1::IPS.val(ips) +
TCR_EL1::TG1::KiB_4 +
TCR_EL1::SH1::Inner +
TCR_EL1::ORGN1::WriteBack_ReadAlloc_WriteAlloc_Cacheable +
TCR_EL1::IRGN1::WriteBack_ReadAlloc_WriteAlloc_Cacheable +
TCR_EL1::EPD1::EnableTTBR1Walks +
TCR_EL1::A1::UseTTBR1ASID +
TCR_EL1::T1SZ.val(16) +
TCR_EL1::TG0::KiB_4 +
TCR_EL1::SH0::Inner +
TCR_EL1::ORGN0::WriteBack_ReadAlloc_WriteAlloc_Cacheable +
TCR_EL1::IRGN0::WriteBack_ReadAlloc_WriteAlloc_Cacheable +
TCR_EL1::EPD0::EnableTTBR0Walks +
TCR_EL1::T0SZ.val(16),
);
// Switch the MMU on.
//
// First, force all previous changes to be seen before the MMU is enabled.
unsafe { barrier::isb(barrier::SY); }
// Enable the MMU and turn on data and instruction caching.
SCTLR_EL1.modify(SCTLR_EL1::M::Enable + SCTLR_EL1::C::Cacheable + SCTLR_EL1::I::Cacheable);
// Force MMU init to complete before next instruction
unsafe { barrier::isb(barrier::SY); }
}
fn init_frame_allocator() {
use bit_allocator::BitAlloc;
use core::ops::Range;
use consts::MEMORY_OFFSET;
let (start, end) = memory_map().expect("failed to find memory map");
unsafe {
HEAP_ALLOCATOR.lock().init(start, end - start);
let mut ba = FRAME_ALLOCATOR.lock();
ba.insert(to_range(start, end));
info!("FrameAllocator init end");
/*
* @param:
* start: start address
* end: end address
* @brief:
* transform the memory address to the page number
* @retval:
* the page number range from start address to end address
*/
fn to_range(start: usize, end: usize) -> Range<usize> {
let page_start = (start - MEMORY_OFFSET) / PAGE_SIZE;
let page_end = (end - MEMORY_OFFSET - 1) / PAGE_SIZE + 1;
page_start..page_end
}
info!("memory: init end");
}
extern "C" {
static _end: u8;
/// remap kernel page table after all initialization.
fn remap_the_kernel() {
let (bottom, top) = (0, bootstacktop as usize);
let kstack = Stack {
top,
bottom,
};
static mut SPACE: [u8; 0x1000] = [0; 0x1000];
let mut ms = unsafe { MemorySet::new_from_raw_space(&mut SPACE, kstack) };
ms.push(MemoryArea::new_identity(bottom, top, MemoryAttr::default(), "kstack"));
ms.push(MemoryArea::new_identity(stext as usize, etext as usize, MemoryAttr::default().execute().readonly(), "text"));
ms.push(MemoryArea::new_identity(sdata as usize, edata as usize, MemoryAttr::default(), "data"));
ms.push(MemoryArea::new_identity(srodata as usize, erodata as usize, MemoryAttr::default().readonly(), "rodata"));
ms.push(MemoryArea::new_identity(sbss as usize, ebss as usize, MemoryAttr::default(), "bss"));
use arch::board::{IO_REMAP_BASE, IO_REMAP_END};
ms.push(MemoryArea::new_identity(IO_REMAP_BASE, IO_REMAP_END, MemoryAttr::default().mmio(), "io_remap"));
unsafe { ms.activate(); }
use core::mem::forget;
forget(ms);
info!("kernel remap end");
}
/// Returns the (start address, end address) of the available memory on this
/// system if it can be determined. If it cannot, `None` is returned.
///
/// This function is expected to return `Some` under all normal cirumstances.
pub fn memory_map() -> Option<(usize, usize)> {
let binary_end = unsafe { (&_end as *const u8) as u32 };
fn memory_map() -> Option<(usize, usize)> {
let binary_end = _end as u32;
let mut atags: Atags = Atags::get();
while let Some(atag) = atags.next() {
@ -33,3 +136,17 @@ pub fn memory_map() -> Option<(usize, usize)> {
None
}
extern {
fn bootstacktop();
fn stext();
fn etext();
fn sdata();
fn edata();
fn srodata();
fn erodata();
fn sbss();
fn ebss();
fn _start();
fn _end();
}

@ -1,7 +1,6 @@
//! Entrance and initialization for aarch64.
extern crate atags;
extern crate cortex_a;
pub mod io;
pub mod paging;
@ -17,9 +16,13 @@ pub use self::board::timer;
/// The entry point of kernel
#[no_mangle] // don't mangle the name of this function
pub extern "C" fn rust_main() -> ! {
// Enable mmu and paging
memory::init_mmu_early();
// Init board to enable serial port.
board::init();
::logging::init(); // FIXME
::logging::init();
interrupt::init();
memory::init();
timer::init();

@ -1,218 +1,264 @@
//! Page table implementations for aarch64.
// Depends on kernel
use consts::{KERNEL_PML4, RECURSIVE_INDEX};
use memory::{active_table, alloc_frame, alloc_stack, dealloc_frame};
use ucore_memory::memory_set::*;
use ucore_memory::PAGE_SIZE;
use ucore_memory::paging::*;
use aarch64::asm::{tlb_invalidate, tlb_invalidate_all, flush_icache_all, ttbr_el1_read, ttbr_el1_write};
use aarch64::{PhysAddr, VirtAddr};
use aarch64::paging::{Mapper, PageTable as Aarch64PageTable, PageTableEntry, PageTableFlags as EF, RecursivePageTable};
use aarch64::paging::{FrameAllocator, FrameDeallocator, Page, PhysFrame as Frame, Size4KiB, Size2MiB, Size1GiB};
use aarch64::paging::memory_attribute::*;
// need 3 page
pub fn setup_temp_page_table(frame_lvl4: Frame, frame_lvl3: Frame, frame_lvl2: Frame) {
let p4 = unsafe { &mut *(frame_lvl4.start_address().as_u64() as *mut Aarch64PageTable) };
let p3 = unsafe { &mut *(frame_lvl3.start_address().as_u64() as *mut Aarch64PageTable) };
let p2 = unsafe { &mut *(frame_lvl2.start_address().as_u64() as *mut Aarch64PageTable) };
p4.zero();
p3.zero();
p2.zero();
let (start_addr, end_addr) = (0, 0x40000000);
let block_flags = EF::VALID | EF::AF | EF::WRITE | EF::XN;
for page in Page::<Size2MiB>::range_of(start_addr, end_addr) {
let paddr = PhysAddr::new(page.start_address().as_u64());
use arch::board::IO_REMAP_BASE;
if paddr.as_u64() >= IO_REMAP_BASE as u64 {
p2[page.p2_index()].set_block::<Size2MiB>(paddr, block_flags | EF::PXN, MairDevice::attr_value());
} else {
p2[page.p2_index()].set_block::<Size2MiB>(paddr, block_flags, MairNormal::attr_value());
}
}
type VirtAddr = usize;
type PhysAddr = usize;
p3[0].set_frame(frame_lvl2, EF::default(), MairNormal::attr_value());
p3[1].set_block::<Size1GiB>(PhysAddr::new(0x40000000), block_flags | EF::PXN, MairDevice::attr_value());
use alloc::alloc::{alloc, Layout};
use memory::{active_table, alloc_frame, alloc_stack, dealloc_frame};
p4[0].set_frame(frame_lvl3, EF::default(), MairNormal::attr_value());
p4[RECURSIVE_INDEX].set_frame(frame_lvl4, EF::default(), MairNormal::attr_value());
/// TODO
pub struct ActivePageTable {
// TODO
ttbr_el1_write(0, frame_lvl4);
tlb_invalidate_all();
}
impl ActivePageTable {
/// TODO
pub unsafe fn new() -> Self {
unimplemented!()
}
}
pub struct ActivePageTable(RecursivePageTable<'static>);
pub struct PageEntry(PageTableEntry);
impl PageTable for ActivePageTable {
type Entry = PageEntry;
fn map(&mut self, addr: VirtAddr, target: PhysAddr) -> &mut Self::Entry {
unimplemented!()
fn map(&mut self, addr: usize, target: usize) -> &mut PageEntry {
let flags = EF::default();
let attr = MairNormal::attr_value();
self.0.map_to(Page::of_addr(addr), Frame::of_addr(target), flags, attr, &mut FrameAllocatorForAarch64)
.unwrap().flush();
self.get_entry(addr)
}
fn unmap(&mut self, addr: VirtAddr) {
unimplemented!()
fn unmap(&mut self, addr: usize) {
let (frame, flush) = self.0.unmap(Page::of_addr(addr)).unwrap();
flush.flush();
}
fn get_entry(&mut self, addr: VirtAddr) -> &mut Self::Entry {
unimplemented!()
fn get_entry(&mut self, addr: usize) -> &mut PageEntry {
let entry_addr = ((addr >> 9) & 0o777_777_777_7770) | (RECURSIVE_INDEX << 39);
unsafe { &mut *(entry_addr as *mut PageEntry) }
}
// For testing with mock
fn get_page_slice_mut<'a, 'b>(&'a mut self, addr: VirtAddr) -> &'b mut [u8] {
unimplemented!()
fn get_page_slice_mut<'a, 'b>(&'a mut self, addr: usize) -> &'b mut [u8] {
use core::slice;
unsafe { slice::from_raw_parts_mut((addr & !0xfffusize) as *mut u8, PAGE_SIZE) }
}
fn read(&mut self, addr: VirtAddr) -> u8 {
unimplemented!()
fn read(&mut self, addr: usize) -> u8 {
unsafe { *(addr as *const u8) }
}
fn write(&mut self, addr: VirtAddr, data: u8) {
unimplemented!()
fn write(&mut self, addr: usize, data: u8) {
unsafe { *(addr as *mut u8) = data; }
}
}
/// TODO
pub struct PageEntry {
// TODO
const ROOT_PAGE_TABLE: *mut Aarch64PageTable =
((RECURSIVE_INDEX << 39) | (RECURSIVE_INDEX << 30) | (RECURSIVE_INDEX << 21) | (RECURSIVE_INDEX << 12)) as *mut Aarch64PageTable;
impl ActivePageTable {
pub unsafe fn new() -> Self {
ActivePageTable(RecursivePageTable::new(&mut *(ROOT_PAGE_TABLE as *mut _)).unwrap())
}
fn with_temporary_map(&mut self, frame: &Frame, f: impl FnOnce(&mut ActivePageTable, &mut Aarch64PageTable)) {
// Create a temporary page
let page = Page::of_addr(0xcafebabe);
assert!(self.0.translate_page(page).is_none(), "temporary page is already mapped");
// Map it to table
self.map(page.start_address().as_u64() as usize, frame.start_address().as_u64() as usize);
// Call f
let table = unsafe { &mut *page.start_address().as_mut_ptr() };
f(self, table);
// Unmap the page
self.unmap(0xcafebabe);
}
}
impl Entry for PageEntry {
/// IMPORTANT!
/// This must be called after any change to ensure it become effective.
/// Usually this will make a flush to TLB/MMU.
fn update(&mut self) {
unimplemented!()
let addr = VirtAddr::new_unchecked((self as *const _ as u64) << 9);
tlb_invalidate(addr);
}
/// Will be set when accessed
fn accessed(&self) -> bool {
unimplemented!()
}
fn present(&self) -> bool { self.0.flags().contains(EF::VALID) }
fn accessed(&self) -> bool { self.0.flags().contains(EF::AF) }
fn writable(&self) -> bool { self.0.flags().contains(EF::WRITE) }
fn dirty(&self) -> bool { self.hw_dirty() && self.sw_dirty() }
/// Will be set when written
fn dirty(&self) -> bool {
unimplemented!()
fn clear_accessed(&mut self) { self.as_flags().remove(EF::AF); }
fn clear_dirty(&mut self)
{
self.as_flags().remove(EF::DIRTY);
self.as_flags().insert(EF::AP_RO);
}
/// Will PageFault when try to write page where writable=0
fn writable(&self) -> bool {
unimplemented!()
fn set_writable(&mut self, value: bool)
{
self.as_flags().set(EF::AP_RO, !value);
self.as_flags().set(EF::WRITE, value);
}
/// Will PageFault when try to access page where present=0
fn present(&self) -> bool {
unimplemented!()
}
fn clear_accessed(&mut self) {
unimplemented!()
fn set_present(&mut self, value: bool) { self.as_flags().set(EF::VALID, value); }
fn target(&self) -> usize { self.0.addr().as_u64() as usize }
fn set_target(&mut self, target: usize) {
self.0.modify_addr(PhysAddr::new(target as u64));
}
fn clear_dirty(&mut self) {
unimplemented!()
}
fn set_writable(&mut self, value: bool) {
unimplemented!()
}
fn set_present(&mut self, value: bool) {
unimplemented!()
}
fn target(&self) -> PhysAddr {
unimplemented!()
}
fn set_target(&mut self, target: PhysAddr) {
unimplemented!()
}
// For Copy-on-write extension
fn writable_shared(&self) -> bool {
unimplemented!()
}
fn readonly_shared(&self) -> bool {
unimplemented!()
}
fn writable_shared(&self) -> bool { self.0.flags().contains(EF::WRITABLE_SHARED) }
fn readonly_shared(&self) -> bool { self.0.flags().contains(EF::READONLY_SHARED) }
fn set_shared(&mut self, writable: bool) {
unimplemented!()
}
fn clear_shared(&mut self) {
unimplemented!()
}
// For Swap extension
fn swapped(&self) -> bool {
unimplemented!()
}
fn set_swapped(&mut self, value: bool) {
unimplemented!()
}
fn user(&self) -> bool {
unimplemented!()
}
let flags = self.as_flags();
flags.set(EF::WRITABLE_SHARED, writable);
flags.set(EF::READONLY_SHARED, !writable);
}
fn clear_shared(&mut self) { self.as_flags().remove(EF::WRITABLE_SHARED | EF::READONLY_SHARED); }
fn user(&self) -> bool { self.0.flags().contains(EF::AP_EL0) }
fn swapped(&self) -> bool { self.0.flags().contains(EF::SWAPPED) }
fn set_swapped(&mut self, value: bool) { self.as_flags().set(EF::SWAPPED, value); }
fn set_user(&mut self, value: bool) {
unimplemented!()
self.as_flags().set(EF::AP_EL0, value);
self.as_flags().set(EF::nG, value); // set non-global to use ASID
}
fn execute(&self) -> bool {
unimplemented!()
if self.user() {
!self.0.flags().contains(EF::XN)
} else {
!self.0.flags().contains(EF::PXN)
}
}
fn set_execute(&mut self, value: bool) {
unimplemented!()
}
}
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)]
pub struct MockFrame(PhysAddr);
impl MockFrame {
pub fn of_addr(addr: PhysAddr) -> Self {
MockFrame(addr)
}
pub fn start_address(&self) -> PhysAddr {
unimplemented!()
if self.user() {
self.as_flags().set(EF::XN, !value)
} else {
self.as_flags().set(EF::PXN, !value)
}
}
pub fn p2_index(&self) -> usize {
unimplemented!()
fn mmio(&self) -> bool {
self.0.attr().value == MairDevice::attr_value().value
}
pub fn p1_index(&self) -> usize {
unimplemented!()
fn set_mmio(&mut self, value: bool) {
if value {
self.0.modify_attr(MairDevice::attr_value())
} else {
self.0.modify_attr(MairNormal::attr_value())
}
}
pub fn number(&self) -> usize {
unimplemented!()
}
impl PageEntry {
fn read_only(&self) -> bool { self.0.flags().contains(EF::AP_RO) }
fn hw_dirty(&self) -> bool { self.writable() && !self.read_only() }
fn sw_dirty(&self) -> bool { self.0.flags().contains(EF::DIRTY) }
fn as_flags(&mut self) -> &mut EF {
unsafe { &mut *(self as *mut _ as *mut EF) }
}
}
/// TODO
#[derive(Debug)]
pub struct InactivePageTable0 {
p4_frame: MockFrame,
p4_frame: Frame,
}
/// TODO
impl InactivePageTable for InactivePageTable0 {
type Active = ActivePageTable;
fn new() -> Self {
unsafe {
let layout = Layout::new::<u64>();
let ptr = alloc(layout);
let frame = MockFrame::of_addr(*ptr as usize);
InactivePageTable0 { p4_frame: frame }
}
// When the new InactivePageTable is created for the user MemorySet, it's use ttbr1 as the
// TTBR. And the kernel TTBR ttbr0 will never changed, so we needn't call map_kernel()
Self::new_bare()
}
fn new_bare() -> Self {
unimplemented!()
let frame = Self::alloc_frame().map(|target| Frame::of_addr(target))
.expect("failed to allocate frame");
active_table().with_temporary_map(&frame, |_, table: &mut Aarch64PageTable| {
table.zero();
// set up recursive mapping for the table
table[RECURSIVE_INDEX].set_frame(frame.clone(), EF::default(), MairNormal::attr_value());
});
InactivePageTable0 { p4_frame: frame }
}
fn edit(&mut self, f: impl FnOnce(&mut Self::Active)) {
unimplemented!()
active_table().with_temporary_map(&ttbr_el1_read(0), |active_table, p4_table: &mut Aarch64PageTable| {
let backup = p4_table[RECURSIVE_INDEX].clone();
// overwrite recursive mapping
p4_table[RECURSIVE_INDEX].set_frame(self.p4_frame.clone(), EF::default(), MairNormal::attr_value());
tlb_invalidate_all();
// execute f in the new context
f(active_table);
// restore recursive mapping to original p4 table
p4_table[RECURSIVE_INDEX] = backup;
tlb_invalidate_all();
});
}
unsafe fn activate(&self) {
unimplemented!()
let old_frame = ttbr_el1_read(0);
let new_frame = self.p4_frame.clone();
debug!("switch TTBR0 {:?} -> {:?}", old_frame, new_frame);
if old_frame != new_frame {
ttbr_el1_write(0, new_frame);
tlb_invalidate_all();
}
}
unsafe fn with(&self, f: impl FnOnce()) {
unimplemented!()
// Just need to switch the user TTBR
let old_frame = ttbr_el1_read(1);
let new_frame = self.p4_frame.clone();
debug!("switch TTBR1 {:?} -> {:?}", old_frame, new_frame);
if old_frame != new_frame {
ttbr_el1_write(1, new_frame);
tlb_invalidate_all();
}
f();
debug!("switch TTBR1 {:?} -> {:?}", new_frame, old_frame);
if old_frame != new_frame {
ttbr_el1_write(1, old_frame);
tlb_invalidate_all();
flush_icache_all();
}
}
fn token(&self) -> usize {
0
self.p4_frame.start_address().as_u64() as usize // as TTBRx_EL1
}
fn alloc_frame() -> Option<PhysAddr> {
fn alloc_frame() -> Option<usize> {
alloc_frame()
}
fn dealloc_frame(target: PhysAddr) {
fn dealloc_frame(target: usize) {
dealloc_frame(target)
}
@ -220,3 +266,36 @@ impl InactivePageTable for InactivePageTable0 {
alloc_stack()
}
}
impl InactivePageTable0 {
fn map_kernel(&mut self) {
let table = unsafe { &mut *ROOT_PAGE_TABLE };
let e0 = table[KERNEL_PML4].clone();
assert!(!e0.is_unused());
self.edit(|_| {
table[KERNEL_PML4].set_frame(Frame::containing_address(e0.addr()), EF::default(), MairNormal::attr_value());
});
}
}
impl Drop for InactivePageTable0 {
fn drop(&mut self) {
info!("PageTable dropping: {:?}", self);
Self::dealloc_frame(self.p4_frame.start_address().as_u64() as usize);
}
}
struct FrameAllocatorForAarch64;
impl FrameAllocator<Size4KiB> for FrameAllocatorForAarch64 {
fn alloc(&mut self) -> Option<Frame> {
alloc_frame().map(|addr| Frame::of_addr(addr))
}
}
impl FrameDeallocator<Size4KiB> for FrameAllocatorForAarch64 {
fn dealloc(&mut self, frame: Frame) {
dealloc_frame(frame.start_address().as_u64() as usize);
}
}

@ -126,6 +126,8 @@ impl Entry for PageEntry {
fn set_user(&mut self, value: bool) { self.as_flags().set(EF::USER, value); }
fn execute(&self) -> bool { self.0.flags().contains(EF::EXECUTABLE) }
fn set_execute(&mut self, value: bool) { self.as_flags().set(EF::EXECUTABLE, value); }
fn mmio(&self) -> bool { unimplemented!() }
fn set_mmio(&mut self, value: bool) { unimplemented!() }
}
impl PageEntry {

@ -138,6 +138,8 @@ impl Entry for PageEntry {
}
fn execute(&self) -> bool { !self.0.flags().contains(EF::NO_EXECUTE) }
fn set_execute(&mut self, value: bool) { self.as_flags().set(EF::NO_EXECUTE, !value); }
fn mmio(&self) -> bool { unimplemented!() }
fn set_mmio(&mut self, value: bool) { unimplemented!() }
}
impl PageEntry {
@ -261,4 +263,4 @@ impl FrameDeallocator<Size4KiB> for FrameAllocatorForX86 {
fn dealloc(&mut self, frame: Frame) {
dealloc_frame(frame.start_address().as_u64() as usize);
}
}
}

@ -131,15 +131,12 @@ mod x86_64 {
#[cfg(target_arch = "aarch64")]
mod aarch64 {
//! TODO: replace unmiplemented consts with real value
const UNIMPLEMENTED: usize = 0;
pub const KERNEL_OFFSET: usize = UNIMPLEMENTED;
pub const KERNEL_PML4: usize = UNIMPLEMENTED;
pub const KERNEL_HEAP_OFFSET: usize = UNIMPLEMENTED;
pub const RECURSIVE_INDEX: usize = 0o777;
pub const KERNEL_OFFSET: usize = 0;
pub const KERNEL_PML4: usize = 0;
pub const KERNEL_HEAP_SIZE: usize = 8 * 1024 * 1024;
pub const MEMORY_OFFSET: usize = UNIMPLEMENTED;
pub const MEMORY_END: usize = UNIMPLEMENTED;
pub const USER_STACK_OFFSET: usize = UNIMPLEMENTED;
pub const USER_STACK_SIZE: usize = UNIMPLEMENTED;
pub const USER32_STACK_OFFSET: usize = UNIMPLEMENTED;
pub const MEMORY_OFFSET: usize = 0;
pub const USER_STACK_OFFSET: usize = 0xffff_8000_0000_0000;
pub const USER_STACK_SIZE: usize = 1 * 1024 * 1024;
pub const USER32_STACK_OFFSET: usize = USER_STACK_OFFSET;
}

@ -33,6 +33,8 @@ extern crate volatile;
#[cfg(target_arch = "x86_64")]
extern crate x86_64;
extern crate xmas_elf;
#[cfg(target_arch = "aarch64")]
extern crate aarch64;
use linked_list_allocator::LockedHeap;

@ -1,5 +1,5 @@
pub use arch::paging::*;
use bit_allocator::{BitAlloc, BitAlloc4K, BitAlloc64K};
use bit_allocator::{BitAlloc, BitAlloc4K, BitAlloc64K, BitAlloc1M};
use consts::MEMORY_OFFSET;
use spin::{Mutex, MutexGuard};
use super::HEAP_ALLOCATOR;
@ -19,7 +19,7 @@ pub type FrameAlloc = BitAlloc4K;
// Raspberry Pi 3 has 1G memory
#[cfg(target_arch = "aarch64")]
pub type FrameAlloc = BitAlloc64K;
pub type FrameAlloc = BitAlloc1M;
lazy_static! {
pub static ref FRAME_ALLOCATOR: Mutex<FrameAlloc> = Mutex::new(FrameAlloc::default());
@ -76,4 +76,4 @@ pub fn init_heap() {
// use ucore_memory::cow::test::test_with;
// test_with(&mut active_table());
// }
//}
//}

@ -142,6 +142,10 @@ fn memory_set_from<'a>(elf: &'a ElfFile<'a>) -> MemorySet {
ProgramHeader::Ph32(ph) => (ph.virtual_addr as usize, ph.mem_size as usize, ph.flags),
ProgramHeader::Ph64(ph) => (ph.virtual_addr as usize, ph.mem_size as usize, ph.flags),
};
#[cfg(target_arch = "aarch64")]
assert_eq!((virt_addr >> 48), 0xffff, "Segment Fault");
set.push(MemoryArea::new(virt_addr, virt_addr + mem_size, memory_attr_from(flags), ""));
}
set

Loading…
Cancel
Save