diff --git a/crate/aarch64/src/paging/page_table.rs b/crate/aarch64/src/paging/page_table.rs index b074768..1525b17 100644 --- a/crate/aarch64/src/paging/page_table.rs +++ b/crate/aarch64/src/paging/page_table.rs @@ -104,11 +104,10 @@ bitflags! { const SHARED = 3 << 8; /* SH[1:0], inner shareable */ const BIT_8 = 1 << 8; const BIT_9 = 1 << 9; - /* - pub const ATTRIB_SH_NON_SHAREABLE: usize = 0x0 << 8; - pub const ATTRIB_SH_OUTER_SHAREABLE: usize = 0x2 << 8; - pub const ATTRIB_SH_INNER_SHAREABLE: usize = 0x3 << 8; - */ + + // pub const ATTRIB_SH_NON_SHAREABLE: usize = 0x0 << 8; + const OUTER_SHAREABLE = 0b10 << 8; + const INNER_SHAREABLE = 0b11 << 8; const ACCESSED = 1 << 10; /* AF, Access Flag */ const NONE_GLOBAL = 1 << 11; /* None Global */ @@ -148,6 +147,13 @@ impl PageTable { entry.set_unused(); } } + + /// Setup identity map: VirtPage at pagenumber -> PhysFrame at pagenumber + /// pn: pagenumber = addr>>12 in riscv32. + pub fn map_identity(&mut self, p4num: usize, flags: PageTableFlags) { + let entry = self.entries[p4num].clone(); + self.entries[p4num].set_addr(entry.addr(), flags); + } } impl Index for PageTable { diff --git a/kernel/src/arch/aarch64/memory.rs b/kernel/src/arch/aarch64/memory.rs index 32990d2..126cf57 100644 --- a/kernel/src/arch/aarch64/memory.rs +++ b/kernel/src/arch/aarch64/memory.rs @@ -5,7 +5,8 @@ use ucore_memory::PAGE_SIZE; use memory::{FRAME_ALLOCATOR, init_heap}; use super::atags::atags::Atags; //use super::super::HEAP_ALLOCATOR; -use aarch64::{barrier, regs::*}; +use aarch64::{barrier, regs::*, addr::*}; +use aarch64::paging::{FrameAllocator, FrameDeallocator, Page, PageRange, PhysFrame as Frame, Size4KiB}; use core::ops::Range; /// Memory initialization. @@ -15,9 +16,27 @@ pub fn init() { HEAP_ALLOCATOR.lock().init(start, end - start); }*/ + + + #[repr(align(4096))] + struct PageData([u8; PAGE_SIZE]); + static PAGE_TABLE_ROOT: PageData = PageData([0; PAGE_SIZE]); + + let frame = Frame::containing_address(PhysAddr::new(&PAGE_TABLE_ROOT as *const _ as u64)); + super::paging::setup_page_table(frame); + + init_mmu(); + init_frame_allocator(); init_heap(); - init_mmu(); + + let (start, end) = memory_map().expect("failed to find memory map"); + let mut v = vec![]; + for i in 0..(20 + (start & 0xf)) { + v.push(i); + println!("{:x?} {:x?}", &v[i] as * const _ as usize, v); + } + } extern "C" { @@ -26,19 +45,30 @@ extern "C" { fn init_frame_allocator() { - let mut ba = FRAME_ALLOCATOR.lock(); + use consts::{MEMORY_OFFSET}; let (start, end) = memory_map().expect("failed to find memory map"); + info!("{:x?} {:x?}", start, end); + + let mut ba = FRAME_ALLOCATOR.lock(); + use core::mem::size_of; + use ::memory::FrameAlloc; + info!("{:x?} {:x?}", &FRAME_ALLOCATOR as *const _ as usize, size_of::()); + use consts::{KERNEL_HEAP_OFFSET, KERNEL_HEAP_SIZE}; + info!("{:x?} {:x?}", KERNEL_HEAP_OFFSET + KERNEL_HEAP_SIZE, end); ba.insert(to_range(start, end)); info!("FrameAllocator init end"); fn to_range(start: usize, end: usize) -> Range { - let page_start = start / PAGE_SIZE; - let page_end = (end - 1) / PAGE_SIZE + 1; + let page_start = (start - MEMORY_OFFSET) / PAGE_SIZE; + let page_end = (end - MEMORY_OFFSET - 1) / PAGE_SIZE + 1; + // info!("{:x?} {:x?}", page_start, page_end); page_start..page_end } } fn init_mmu() { + info!("init_mmu"); + // device. MAIR_EL1.write( // Attribute 1 @@ -50,6 +80,7 @@ fn init_mmu() { ); // Configure various settings of stage 1 of the EL1 translation regime. let ips = ID_AA64MMFR0_EL1.read(ID_AA64MMFR0_EL1::PARange); + info!("{:x?}", ips); TCR_EL1.write( TCR_EL1::TBI0::Ignored + TCR_EL1::IPS.val(ips) @@ -58,7 +89,7 @@ fn init_mmu() { + TCR_EL1::ORGN0::WriteBack_ReadAlloc_WriteAlloc_Cacheable + TCR_EL1::IRGN0::WriteBack_ReadAlloc_WriteAlloc_Cacheable + TCR_EL1::EPD0::EnableTTBR0Walks - + TCR_EL1::T0SZ.val(34), // Start walks at level 2 + + TCR_EL1::T0SZ.val(16), // Start walks at level 2 ); // Switch the MMU on. @@ -66,11 +97,14 @@ fn init_mmu() { // First, force all previous changes to be seen before the MMU is enabled. unsafe { barrier::isb(barrier::SY); } + info!("{:x?}", TCR_EL1.get()); // Enable the MMU and turn on data and instruction caching. SCTLR_EL1.modify(SCTLR_EL1::M::Enable + SCTLR_EL1::C::Cacheable + SCTLR_EL1::I::Cacheable); // Force MMU init to complete before next instruction unsafe { barrier::isb(barrier::SY); } + + info!("mmu enabled!"); } /// Returns the (start address, end address) of the available memory on this diff --git a/kernel/src/arch/aarch64/paging.rs b/kernel/src/arch/aarch64/paging.rs index b468f8b..603346d 100644 --- a/kernel/src/arch/aarch64/paging.rs +++ b/kernel/src/arch/aarch64/paging.rs @@ -11,6 +11,32 @@ use aarch64::paging::{Mapper, PageTable as Aarch64PageTable, PageTableEntry, Pag use aarch64::paging::{FrameAllocator, FrameDeallocator, Page, PageRange, PhysFrame as Frame, Size4KiB}; use aarch64::{regs::*}; +// need 1 page +pub fn setup_page_table(frame: Frame) { + let p4 = unsafe { &mut *(frame.start_address().as_u64() as *mut Aarch64PageTable) }; + p4.zero(); + + + // p4.set_recursive(RECURSIVE_PAGE_PML4, frame.clone()); + + // Set kernel identity map + // 0x10000000 ~ 1K area + p4.map_identity(0o777, EF::PRESENT | EF::PXN | EF::UXN); + + // 0x80000000 ~ 8K area + p4.map_identity(0, EF::PRESENT); + // p2.map_identity(KERNEL_PML4, EF::PRESENT | EF::READABLE | EF::WRITABLE); + // p2.map_identity(KERNEL_PML4 + 1, EF::VALID | EF::READABLE | EF::WRITABLE | EF::EXECUTABLE); + + // use super::riscv::register::satp; + // unsafe { satp::set(satp::Mode::Sv32, 0, frame); } + // sfence_vma_all(); + + ttbr0_el1_write(frame); + tlb_invalidate(); + info!("setup init page table end"); +} + pub trait PageExt { fn of_addr(address: usize) -> Self; fn range_of(begin: usize, end: usize) -> PageRange; @@ -75,7 +101,7 @@ impl PageTable for ActivePageTable { impl ActivePageTable { pub unsafe fn new() -> Self { - ActivePageTable(RecursivePageTable::new(&mut *(0xffffffff_fffff000 as *mut _)).unwrap()) + ActivePageTable(RecursivePageTable::new(&mut *(0xffff_ffff_ffff_f000 as *mut _)).unwrap()) } fn with_temporary_map(&mut self, frame: &Frame, f: impl FnOnce(&mut ActivePageTable, &mut Aarch64PageTable)) { // Create a temporary page @@ -238,7 +264,7 @@ impl InactivePageTable for InactivePageTable0 { impl InactivePageTable0 { fn map_kernel(&mut self) { - let mut table = unsafe { &mut *(0xffffffff_fffff000 as *mut Aarch64PageTable) }; + let mut table = unsafe { &mut *(0xffff_ffff_ffff_f000 as *mut Aarch64PageTable) }; // Kernel at 0xffff_ff00_0000_0000 // Kernel stack at 0x0000_57ac_0000_0000 (defined in bootloader crate) let e0 = table[0].clone(); @@ -268,4 +294,4 @@ impl FrameDeallocator for FrameAllocatorForAarch64 { fn dealloc(&mut self, frame: Frame) { dealloc_frame(frame.start_address().as_u64() as usize); } -} \ No newline at end of file +}