aarch64: enable MMU in bootloader

master
equation314 6 years ago
parent 8bb1e65ba7
commit 0e38439fb8

@ -3,7 +3,7 @@
[[package]]
name = "aarch64"
version = "2.2.2"
source = "git+https://github.com/equation314/aarch64#b6a0f4a3be6f74927c88305a6af5ad2be079bccd"
source = "git+https://github.com/equation314/aarch64#ad81f8f0ebd6fed15b2b0696f5d1b566d36f1172"
dependencies = [
"bit_field 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
"bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)",

@ -90,6 +90,18 @@ set_stack:
# set the current stack pointer
mov sp, x1
goto_main:
zero_bss:
# load the start address and number of bytes in BSS section
ldr x1, =_sbss
ldr x2, =_ebss
zero_bss_loop:
# zero out the BSS section, 64-bits at a time
cmp x1, x2
b.ge zero_bss_loop_end
str xzr, [x1], #8
b zero_bss_loop
zero_bss_loop_end:
bl boot_main
b halt

@ -14,10 +14,18 @@ SECTIONS {
. = ALIGN(4K);
}
.data : {=
.data : {
*(.data .data.* .gnu.linkonce.d*)
. = ALIGN(4K);
}
.bss : {
_sbss = .;
*(.bss .bss.*)
*(COMMON)
. = ALIGN(4K);
_ebss = .;
}
/DISCARD/ : { *(.comment) *(.gnu*) *(.note*) *(.eh_frame*) }
}

@ -1,24 +1,130 @@
use core::slice;
use aarch64::addr::{VirtAddr, PhysAddr};
use aarch64::paging::{memory_attribute::*, Page, PageTable, PageTableFlags as EF, PhysFrame};
use aarch64::paging::{Size4KiB, Size2MiB, Size1GiB};
use aarch64::{asm::*, barrier, regs::*};
use core::ptr;
use fixedvec::FixedVec;
use xmas_elf::program::{ProgramHeader64, Type};
const PAGE_SIZE: usize = 4096;
const ALIGN_2MB: u64 = 0x200000;
const IO_REMAP_BASE: u64 = 0x3F00_0000;
const MEMORY_END: u64 = 0x4000_0000;
const RECURSIVE_INDEX: usize = 0o777;
const KERNEL_OFFSET: u64 = 0x0000_0000_0000_0000;
global_asm!(include_str!("boot.S"));
pub fn map_kernel(kernel_start: u64, segments: &FixedVec<ProgramHeader64>) {
fn setup_temp_page_table(start_vaddr: VirtAddr, end_vaddr: VirtAddr, offset: u64) {
#[repr(align(4096))]
struct PageData([u8; PAGE_SIZE]);
static mut PAGE_TABLE_LVL4: PageData = PageData([0; PAGE_SIZE]);
static mut PAGE_TABLE_LVL3: PageData = PageData([0; PAGE_SIZE]);
static mut PAGE_TABLE_LVL2: PageData = PageData([0; PAGE_SIZE]);
let frame_lvl4 = unsafe { PhysFrame::<Size4KiB>::containing_address(PhysAddr::new(&PAGE_TABLE_LVL4 as *const _ as u64)) };
let frame_lvl3 = unsafe { PhysFrame::<Size4KiB>::containing_address(PhysAddr::new(&PAGE_TABLE_LVL3 as *const _ as u64)) };
let frame_lvl2 = unsafe { PhysFrame::<Size4KiB>::containing_address(PhysAddr::new(&PAGE_TABLE_LVL2 as *const _ as u64)) };
let p4 = unsafe { &mut *(frame_lvl4.start_address().as_u64() as *mut PageTable) };
let p3 = unsafe { &mut *(frame_lvl3.start_address().as_u64() as *mut PageTable) };
let p2 = unsafe { &mut *(frame_lvl2.start_address().as_u64() as *mut PageTable) };
p4.zero();
p3.zero();
p2.zero();
let block_flags = EF::VALID | EF::AF | EF::WRITE | EF::UXN;
// normal memory
for page in Page::<Size2MiB>::range_of(start_vaddr.as_u64(), end_vaddr.as_u64()) {
let paddr = PhysAddr::new(page.start_address().as_u64().wrapping_add(offset));
p2[page.p2_index()].set_block::<Size2MiB>(paddr, block_flags, MairNormal::attr_value());
}
// device memory
for page in Page::<Size2MiB>::range_of(IO_REMAP_BASE, MEMORY_END) {
let paddr = PhysAddr::new(page.start_address().as_u64());
p2[page.p2_index()].set_block::<Size2MiB>(paddr, block_flags | EF::PXN, MairDevice::attr_value());
}
p3[0].set_frame(frame_lvl2, EF::default(), MairNormal::attr_value());
p3[1].set_block::<Size1GiB>(PhysAddr::new(MEMORY_END), block_flags | EF::PXN, MairDevice::attr_value());
p4[0].set_frame(frame_lvl3, EF::default(), MairNormal::attr_value());
p4[RECURSIVE_INDEX].set_frame(frame_lvl4, EF::default(), MairNormal::attr_value());
ttbr_el1_write(VirtAddr::new(KERNEL_OFFSET).va_range().unwrap() as u8, frame_lvl4);
tlb_invalidate_all();
}
fn enable_mmu() {
MAIR_EL1.write(
MAIR_EL1::Attr0.val(MairNormal::config_value()) +
MAIR_EL1::Attr1.val(MairDevice::config_value()) +
MAIR_EL1::Attr2.val(MairNormalNonCacheable::config_value()),
);
// Configure various settings of stage 1 of the EL1 translation regime.
let ips = ID_AA64MMFR0_EL1.read(ID_AA64MMFR0_EL1::PARange);
TCR_EL1.write(
TCR_EL1::TBI1::Ignored +
TCR_EL1::TBI0::Ignored +
TCR_EL1::AS::Bits_16 +
TCR_EL1::IPS.val(ips) +
TCR_EL1::TG1::KiB_4 +
TCR_EL1::SH1::Inner +
TCR_EL1::ORGN1::WriteBack_ReadAlloc_WriteAlloc_Cacheable +
TCR_EL1::IRGN1::WriteBack_ReadAlloc_WriteAlloc_Cacheable +
TCR_EL1::EPD1::EnableTTBR1Walks +
TCR_EL1::A1::UseTTBR1ASID +
TCR_EL1::T1SZ.val(16) +
TCR_EL1::TG0::KiB_4 +
TCR_EL1::SH0::Inner +
TCR_EL1::ORGN0::WriteBack_ReadAlloc_WriteAlloc_Cacheable +
TCR_EL1::IRGN0::WriteBack_ReadAlloc_WriteAlloc_Cacheable +
TCR_EL1::EPD0::EnableTTBR0Walks +
TCR_EL1::T0SZ.val(16),
);
// Switch the MMU on.
//
// First, force all previous changes to be seen before the MMU is enabled.
unsafe { barrier::isb(barrier::SY) }
// Enable the MMU and turn on data and instruction caching.
SCTLR_EL1.modify(SCTLR_EL1::M::Enable + SCTLR_EL1::C::Cacheable + SCTLR_EL1::I::Cacheable);
// Force MMU init to complete before next instruction
unsafe { barrier::isb(barrier::SY) }
}
pub fn map_kernel(kernel_start: usize, segments: &FixedVec<ProgramHeader64>) {
let (mut start_vaddr, mut end_vaddr) = (VirtAddr::new(core::u64::MAX), VirtAddr::zero());
for segment in segments {
if segment.get_type() != Ok(Type::Load) {
continue;
}
let virt_addr = segment.virtual_addr;
let offset = segment.offset;
let file_size = segment.file_size as usize;
let mem_size = segment.mem_size as usize;
let file_size = segment.file_size;
let mem_size = segment.mem_size;
unsafe {
let target = slice::from_raw_parts_mut(virt_addr as *mut u8, mem_size);
let source = slice::from_raw_parts((kernel_start + offset) as *const u8, file_size);
target.copy_from_slice(source);
target[file_size..].iter_mut().for_each(|x| *x = 0);
let src = (kernel_start as u64 + offset) as *const u8;
let dst = virt_addr as *mut u8;
ptr::copy(src, dst, file_size as usize);
ptr::write_bytes((virt_addr + file_size) as *mut u8, 0, (mem_size - file_size) as usize);
}
if virt_addr < start_vaddr.as_u64() {
start_vaddr = VirtAddr::new(virt_addr).align_down(ALIGN_2MB);
}
if virt_addr + mem_size > end_vaddr.as_u64() {
end_vaddr = VirtAddr::new(virt_addr + mem_size).align_up(ALIGN_2MB);
}
}
setup_temp_page_table(start_vaddr, end_vaddr, KERNEL_OFFSET.wrapping_neg());
enable_mmu();
}

@ -46,11 +46,10 @@ pub extern "C" fn boot_main() -> ! {
}
}
arch::map_kernel(_kernel_payload_start as u64, &segments);
let entry = kernel_elf.header.pt2.entry_point();
let kernel_main: extern "C" fn() = unsafe { transmute(entry) };
arch::map_kernel(_kernel_payload_start as usize, &segments);
kernel_main();
loop {}

2
kernel/Cargo.lock generated

@ -3,7 +3,7 @@
[[package]]
name = "aarch64"
version = "2.2.2"
source = "git+https://github.com/equation314/aarch64#b6a0f4a3be6f74927c88305a6af5ad2be079bccd"
source = "git+https://github.com/equation314/aarch64#ad81f8f0ebd6fed15b2b0696f5d1b566d36f1172"
dependencies = [
"bit_field 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
"bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)",

@ -4,18 +4,4 @@
_start:
ldr x0, =bootstacktop
mov sp, x0
zero_bss:
# load the start address and number of bytes in BSS section
ldr x1, =sbss
ldr x2, =__bss_length
zero_bss_loop:
# zero out the BSS section, 64-bits at a time
cbz x2, zero_bss_loop_end
str xzr, [x1], #8
sub x2, x2, #8
cbnz x2, zero_bss_loop
zero_bss_loop_end:
bl rust_main

@ -1,7 +1,7 @@
ENTRY(_start)
SECTIONS {
. = 0x2000000; /* Load the kernel at this address. It's also kernel stack top address */
. = 0x100000; /* Load the kernel at this address. It's also kernel stack top address */
bootstacktop = .;
.text : {
@ -26,7 +26,7 @@ SECTIONS {
edata = .;
}
.bss (NOLOAD) : {
.bss : {
. = ALIGN(32);
sbss = .;
*(.bss .bss.*)

@ -2,8 +2,7 @@
use crate::memory::{init_heap, Linear, MemoryAttr, MemorySet, FRAME_ALLOCATOR};
use super::paging::MMIOType;
use aarch64::paging::{memory_attribute::*, PhysFrame as Frame};
use aarch64::{addr::*, barrier, regs::*};
use aarch64::regs::*;
use atags::atags::Atags;
use log::*;
use rcore_memory::PAGE_SIZE;
@ -16,62 +15,6 @@ pub fn init() {
info!("memory: init end");
}
/// initialize temporary paging and enable mmu immediately after boot. Serial port is disabled at this time.
pub fn init_mmu_early() {
#[repr(align(4096))]
struct PageData([u8; PAGE_SIZE]);
static PAGE_TABLE_LVL4: PageData = PageData([0; PAGE_SIZE]);
static PAGE_TABLE_LVL3: PageData = PageData([0; PAGE_SIZE]);
static PAGE_TABLE_LVL2: PageData = PageData([0; PAGE_SIZE]);
let frame_lvl4 = Frame::containing_address(PhysAddr::new(&PAGE_TABLE_LVL4 as *const _ as u64));
let frame_lvl3 = Frame::containing_address(PhysAddr::new(&PAGE_TABLE_LVL3 as *const _ as u64));
let frame_lvl2 = Frame::containing_address(PhysAddr::new(&PAGE_TABLE_LVL2 as *const _ as u64));
super::paging::setup_temp_page_table(frame_lvl4, frame_lvl3, frame_lvl2);
// device.
MAIR_EL1.write(
MAIR_EL1::Attr0.val(MairNormal::config_value()) +
MAIR_EL1::Attr1.val(MairDevice::config_value()) +
MAIR_EL1::Attr2.val(MairNormalNonCacheable::config_value()),
);
// Configure various settings of stage 1 of the EL1 translation regime.
let ips = ID_AA64MMFR0_EL1.read(ID_AA64MMFR0_EL1::PARange);
TCR_EL1.write(
TCR_EL1::TBI1::Ignored +
TCR_EL1::TBI0::Ignored +
TCR_EL1::AS::Bits_16 +
TCR_EL1::IPS.val(ips) +
TCR_EL1::TG1::KiB_4 +
TCR_EL1::SH1::Inner +
TCR_EL1::ORGN1::WriteBack_ReadAlloc_WriteAlloc_Cacheable +
TCR_EL1::IRGN1::WriteBack_ReadAlloc_WriteAlloc_Cacheable +
TCR_EL1::EPD1::EnableTTBR1Walks +
TCR_EL1::A1::UseTTBR1ASID +
TCR_EL1::T1SZ.val(16) +
TCR_EL1::TG0::KiB_4 +
TCR_EL1::SH0::Inner +
TCR_EL1::ORGN0::WriteBack_ReadAlloc_WriteAlloc_Cacheable +
TCR_EL1::IRGN0::WriteBack_ReadAlloc_WriteAlloc_Cacheable +
TCR_EL1::EPD0::EnableTTBR0Walks +
TCR_EL1::T0SZ.val(16),
);
// Switch the MMU on.
//
// First, force all previous changes to be seen before the MMU is enabled.
unsafe { barrier::isb(barrier::SY) }
// Enable the MMU and turn on data and instruction caching.
SCTLR_EL1.modify(SCTLR_EL1::M::Enable + SCTLR_EL1::C::Cacheable + SCTLR_EL1::I::Cacheable);
// Force MMU init to complete before next instruction
unsafe { barrier::isb(barrier::SY) }
}
fn init_frame_allocator() {
use crate::consts::MEMORY_OFFSET;
use bit_allocator::BitAlloc;

@ -17,7 +17,6 @@ global_asm!(include_str!("boot/entry.S"));
/// The entry point of kernel
#[no_mangle] // don't mangle the name of this function
pub extern "C" fn rust_main() -> ! {
memory::init_mmu_early(); // Enable mmu and paging
board::init_serial_early();
crate::logging::init();

@ -3,45 +3,13 @@ use rcore_memory::paging::*;
use aarch64::asm::{tlb_invalidate, tlb_invalidate_all, ttbr_el1_read, ttbr_el1_write};
use aarch64::{PhysAddr, VirtAddr};
use aarch64::paging::{Mapper, PageTable as Aarch64PageTable, PageTableEntry, PageTableFlags as EF, RecursivePageTable};
use aarch64::paging::{FrameAllocator, FrameDeallocator, Page, PhysFrame as Frame, Size4KiB, Size2MiB, Size1GiB};
use aarch64::paging::{FrameAllocator, FrameDeallocator, Page, PhysFrame as Frame, Size4KiB};
use aarch64::paging::memory_attribute::*;
use log::*;
// Depends on kernel
use crate::consts::{KERNEL_PML4, RECURSIVE_INDEX};
use crate::memory::{active_table, alloc_frame, dealloc_frame};
// need 3 page
pub fn setup_temp_page_table(frame_lvl4: Frame, frame_lvl3: Frame, frame_lvl2: Frame) {
let p4 = unsafe { &mut *(frame_lvl4.start_address().as_u64() as *mut Aarch64PageTable) };
let p3 = unsafe { &mut *(frame_lvl3.start_address().as_u64() as *mut Aarch64PageTable) };
let p2 = unsafe { &mut *(frame_lvl2.start_address().as_u64() as *mut Aarch64PageTable) };
p4.zero();
p3.zero();
p2.zero();
let (start_addr, end_addr) = (0, 0x40000000);
let block_flags = EF::VALID | EF::AF | EF::WRITE | EF::UXN;
for page in Page::<Size2MiB>::range_of(start_addr, end_addr) {
let paddr = PhysAddr::new(page.start_address().as_u64());
use super::board::IO_REMAP_BASE;
if paddr.as_u64() >= IO_REMAP_BASE as u64 {
p2[page.p2_index()].set_block::<Size2MiB>(paddr, block_flags | EF::PXN, MairDevice::attr_value());
} else {
p2[page.p2_index()].set_block::<Size2MiB>(paddr, block_flags, MairNormal::attr_value());
}
}
p3[0].set_frame(frame_lvl2, EF::default(), MairNormal::attr_value());
p3[1].set_block::<Size1GiB>(PhysAddr::new(0x40000000), block_flags | EF::PXN, MairDevice::attr_value());
p4[0].set_frame(frame_lvl3, EF::default(), MairNormal::attr_value());
p4[RECURSIVE_INDEX].set_frame(frame_lvl4, EF::default(), MairNormal::attr_value());
ttbr_el1_write(0, frame_lvl4);
tlb_invalidate_all();
}
pub struct ActivePageTable(RecursivePageTable<'static>);
pub struct PageEntry(PageTableEntry);
@ -50,13 +18,13 @@ impl PageTable for ActivePageTable {
fn map(&mut self, addr: usize, target: usize) -> &mut Entry {
let flags = EF::default();
let attr = MairNormal::attr_value();
self.0.map_to(Page::of_addr(addr), Frame::of_addr(target), flags, attr, &mut FrameAllocatorForAarch64)
self.0.map_to(Page::of_addr(addr as u64), Frame::of_addr(target as u64), flags, attr, &mut FrameAllocatorForAarch64)
.unwrap().flush();
self.get_entry(addr).expect("fail to get entry")
}
fn unmap(&mut self, addr: usize) {
let (_frame, flush) = self.0.unmap(Page::of_addr(addr)).unwrap();
let (_frame, flush) = self.0.unmap(Page::of_addr(addr as u64)).unwrap();
flush.flush();
}
@ -191,7 +159,7 @@ impl InactivePageTable for InactivePageTable0 {
fn new_bare() -> Self {
let target = alloc_frame().expect("failed to allocate frame");
let frame = Frame::of_addr(target);
let frame = Frame::of_addr(target as u64);
active_table().with_temporary_map(target, |_, table: &mut Aarch64PageTable| {
table.zero();
// set up recursive mapping for the table
@ -274,7 +242,7 @@ struct FrameAllocatorForAarch64;
impl FrameAllocator<Size4KiB> for FrameAllocatorForAarch64 {
fn alloc(&mut self) -> Option<Frame> {
alloc_frame().map(|addr| Frame::of_addr(addr))
alloc_frame().map(|addr| Frame::of_addr(addr as u64))
}
}

Loading…
Cancel
Save