aarch64/mmu: remap kernel memory ranges at the end of memory::init()

toolchain_update
equation314 6 years ago
parent 99c5b3c3f8
commit a9de99d3a9

@ -18,7 +18,7 @@ mod frame_alloc;
mod page_table;
mod recursive;
/// Trait for abstracting over the three possible page sizes on x86_64, 4KiB, 2MiB, 1GiB.
/// Trait for abstracting over the three possible block/page sizes on aarch64, 4KiB, 2MiB, 1GiB.
pub trait PageSize: Copy + Eq + PartialOrd + Ord {
/// The page size in bytes.
const SIZE: u64;
@ -34,6 +34,14 @@ pub trait NotGiantPageSize: PageSize {}
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum Size4KiB {}
/// A “huge” 2MiB page.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum Size2MiB {}
/// A “giant” 1GiB page.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum Size1GiB {}
impl PageSize for Size4KiB {
const SIZE: u64 = 4096;
const SIZE_AS_DEBUG_STR: &'static str = "4KiB";
@ -41,6 +49,18 @@ impl PageSize for Size4KiB {
impl NotGiantPageSize for Size4KiB {}
impl PageSize for Size2MiB {
const SIZE: u64 = Size4KiB::SIZE * 512;
const SIZE_AS_DEBUG_STR: &'static str = "2MiB";
}
impl NotGiantPageSize for Size2MiB {}
impl PageSize for Size1GiB {
const SIZE: u64 = Size2MiB::SIZE * 512;
const SIZE_AS_DEBUG_STR: &'static str = "1GiB";
}
/// A virtual memory page.
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
#[repr(C)]
@ -97,6 +117,14 @@ impl<S: PageSize> Page<S> {
pub fn range_inclusive(start: Self, end: Self) -> PageRangeInclusive<S> {
PageRangeInclusive { start, end }
}
pub fn of_addr(address: usize) -> Self {
Self::containing_address(VirtAddr::new(address as u64))
}
pub fn range_of(begin: usize, end: usize) -> PageRange<S> {
Self::range(Page::of_addr(begin), Page::of_addr(end - 1) + 1)
}
}
impl<S: NotGiantPageSize> Page<S> {
@ -106,6 +134,31 @@ impl<S: NotGiantPageSize> Page<S> {
}
}
impl Page<Size1GiB> {
/// Returns the 1GiB memory page with the specified page table indices.
pub fn from_page_table_indices_1gib(p4_index: u9, p3_index: u9) -> Self {
use bit_field::BitField;
let mut addr = 0;
addr.set_bits(39..48, u64::from(p4_index));
addr.set_bits(30..39, u64::from(p3_index));
Page::containing_address(VirtAddr::new(addr))
}
}
impl Page<Size2MiB> {
/// Returns the 2MiB memory page with the specified page table indices.
pub fn from_page_table_indices_2mib(p4_index: u9, p3_index: u9, p2_index: u9) -> Self {
use bit_field::BitField;
let mut addr = 0;
addr.set_bits(39..48, u64::from(p4_index));
addr.set_bits(30..39, u64::from(p3_index));
addr.set_bits(21..30, u64::from(p2_index));
Page::containing_address(VirtAddr::new(addr))
}
}
impl Page<Size4KiB> {
/// Returns the 4KiB memory page with the specified page table indices.
pub fn from_page_table_indices(p4_index: u9, p3_index: u9, p2_index: u9, p1_index: u9) -> Self {
@ -199,6 +252,16 @@ impl<S: PageSize> Iterator for PageRange<S> {
}
}
impl PageRange<Size2MiB> {
/// Converts the range of 2MiB pages to a range of 4KiB pages.
pub fn as_4kib_page_range(self) -> PageRange<Size4KiB> {
PageRange {
start: Page::containing_address(self.start.start_address()),
end: Page::containing_address(self.end.start_address()),
}
}
}
impl<S: PageSize> fmt::Debug for PageRange<S> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("PageRange")
@ -294,6 +357,10 @@ impl<S: PageSize> PhysFrame<S> {
pub fn range_inclusive(start: PhysFrame<S>, end: PhysFrame<S>) -> PhysFrameRangeInclusive<S> {
PhysFrameRangeInclusive { start, end }
}
pub fn of_addr(address: usize) -> Self {
Self::containing_address(PhysAddr::new(address as u64))
}
}
impl<S: PageSize> fmt::Debug for PhysFrame<S> {

@ -239,6 +239,45 @@ impl<'a> RecursivePageTable<'a> {
inner(entry, next_table_page, allocator)
}
pub fn p3_ptr<S: PageSize>(&self, page: Page<S>) -> *mut PageTable {
self.p3_page(page).start_address().as_mut_ptr()
}
pub fn p2_ptr<S: NotGiantPageSize>(&self, page: Page<S>) -> *mut PageTable {
self.p2_page(page).start_address().as_mut_ptr()
}
pub fn p1_ptr(&self, page: Page<Size4KiB>) -> *mut PageTable {
self.p1_page(page).start_address().as_mut_ptr()
}
fn p3_page<S: PageSize>(&self, page: Page<S>) -> Page {
Page::from_page_table_indices(
self.recursive_index,
self.recursive_index,
self.recursive_index,
page.p4_index(),
)
}
fn p2_page<S: NotGiantPageSize>(&self, page: Page<S>) -> Page {
Page::from_page_table_indices(
self.recursive_index,
self.recursive_index,
page.p4_index(),
page.p3_index(),
)
}
fn p1_page(&self, page: Page<Size4KiB>) -> Page {
Page::from_page_table_indices(
self.recursive_index,
page.p4_index(),
page.p3_index(),
page.p2_index(),
)
}
}
@ -253,15 +292,16 @@ impl<'a> Mapper<Size4KiB> for RecursivePageTable<'a> {
where
A: FrameAllocator<Size4KiB>,
{
let p4 = &mut self.p4;
let self_mut = unsafe{ &mut *(self as *const _ as *mut Self) };
let p4 = &mut self_mut.p4;
let p3_page = p3_page(page, self.recursive_index);
let p3_page = self.p3_page(page);
let p3 = unsafe { Self::create_next_table(&mut p4[page.p4_index()], p3_page, allocator)? };
let p2_page = p2_page(page, self.recursive_index);
let p2_page = self.p2_page(page);
let p2 = unsafe { Self::create_next_table(&mut p3[page.p3_index()], p2_page, allocator)? };
let p1_page = p1_page(page, self.recursive_index);
let p1_page = self.p1_page(page);
let p1 = unsafe { Self::create_next_table(&mut p2[page.p2_index()], p1_page, allocator)? };
if !p1[page.p1_index()].is_unused() {
@ -276,28 +316,30 @@ impl<'a> Mapper<Size4KiB> for RecursivePageTable<'a> {
&mut self,
page: Page<Size4KiB>,
) -> Result<(PhysFrame<Size4KiB>, MapperFlush<Size4KiB>), UnmapError> {
let p4 = &mut self.p4;
let self_mut = unsafe{ &mut *(self as *const _ as *mut Self) };
let p4 = &mut self_mut.p4;
let p4_entry = &p4[page.p4_index()];
p4_entry.frame().map_err(|err| match err {
FrameError::FrameNotPresent => UnmapError::PageNotMapped,
FrameError::HugeFrame => UnmapError::ParentEntryHugePage,
})?;
let p3 = unsafe { &mut *(p3_ptr(page, self.recursive_index)) };
let p3 = unsafe { &mut *(self.p3_ptr(page)) };
let p3_entry = &p3[page.p3_index()];
p3_entry.frame().map_err(|err| match err {
FrameError::FrameNotPresent => UnmapError::PageNotMapped,
FrameError::HugeFrame => UnmapError::ParentEntryHugePage,
})?;
let p2 = unsafe { &mut *(p2_ptr(page, self.recursive_index)) };
let p2 = unsafe { &mut *(self.p2_ptr(page)) };
let p2_entry = &p2[page.p2_index()];
p2_entry.frame().map_err(|err| match err {
FrameError::FrameNotPresent => UnmapError::PageNotMapped,
FrameError::HugeFrame => UnmapError::ParentEntryHugePage,
})?;
let p1 = unsafe { &mut *(p1_ptr(page, self.recursive_index)) };
let p1 = unsafe { &mut *(self.p1_ptr(page)) };
let p1_entry = &mut p1[page.p1_index()];
let frame = p1_entry.frame().map_err(|err| match err {
@ -314,25 +356,26 @@ impl<'a> Mapper<Size4KiB> for RecursivePageTable<'a> {
page: Page<Size4KiB>,
flags: PageTableFlags,
) -> Result<MapperFlush<Size4KiB>, FlagUpdateError> {
let p4 = &mut self.p4;
let self_mut = unsafe{ &mut *(self as *const _ as *mut Self) };
let p4 = &mut self_mut.p4;
if p4[page.p4_index()].is_unused() {
return Err(FlagUpdateError::PageNotMapped);
}
let p3 = unsafe { &mut *(p3_ptr(page, self.recursive_index)) };
let p3 = unsafe { &mut *(self.p3_ptr(page)) };
if p3[page.p3_index()].is_unused() {
return Err(FlagUpdateError::PageNotMapped);
}
let p2 = unsafe { &mut *(p2_ptr(page, self.recursive_index)) };
let p2 = unsafe { &mut *(self.p2_ptr(page)) };
if p2[page.p2_index()].is_unused() {
return Err(FlagUpdateError::PageNotMapped);
}
let p1 = unsafe { &mut *(p1_ptr(page, self.recursive_index)) };
let p1 = unsafe { &mut *(self.p1_ptr(page)) };
if p1[page.p1_index()].is_unused() {
return Err(FlagUpdateError::PageNotMapped);
@ -344,27 +387,28 @@ impl<'a> Mapper<Size4KiB> for RecursivePageTable<'a> {
}
fn translate_page(&self, page: Page<Size4KiB>) -> Option<PhysFrame<Size4KiB>> {
let p4 = &self.p4;
let self_mut = unsafe{ &mut *(self as *const _ as *mut Self) };
let p4 = &mut self_mut.p4;
if p4[page.p4_index()].is_unused() {
return None;
}
let p3 = unsafe { &*(p3_ptr(page, self.recursive_index)) };
let p3 = unsafe { &*(self.p3_ptr(page)) };
let p3_entry = &p3[page.p3_index()];
if p3_entry.is_unused() {
return None;
}
let p2 = unsafe { &*(p2_ptr(page, self.recursive_index)) };
let p2 = unsafe { &*(self.p2_ptr(page)) };
let p2_entry = &p2[page.p2_index()];
if p2_entry.is_unused() {
return None;
}
let p1 = unsafe { &*(p1_ptr(page, self.recursive_index)) };
let p1 = unsafe { &*(self.p1_ptr(page)) };
let p1_entry = &p1[page.p1_index()];
if p1_entry.is_unused() {
@ -374,42 +418,3 @@ impl<'a> Mapper<Size4KiB> for RecursivePageTable<'a> {
PhysFrame::from_start_address(p1_entry.addr()).ok()
}
}
fn p3_ptr<S: PageSize>(page: Page<S>, recursive_index: u9) -> *mut PageTable {
p3_page(page, recursive_index).start_address().as_mut_ptr()
}
fn p3_page<S: PageSize>(page: Page<S>, recursive_index: u9) -> Page {
Page::from_page_table_indices(
recursive_index,
recursive_index,
recursive_index,
page.p4_index(),
)
}
fn p2_ptr<S: NotGiantPageSize>(page: Page<S>, recursive_index: u9) -> *mut PageTable {
p2_page(page, recursive_index).start_address().as_mut_ptr()
}
fn p2_page<S: NotGiantPageSize>(page: Page<S>, recursive_index: u9) -> Page {
Page::from_page_table_indices(
recursive_index,
recursive_index,
page.p4_index(),
page.p3_index(),
)
}
fn p1_ptr(page: Page<Size4KiB>, recursive_index: u9) -> *mut PageTable {
p1_page(page, recursive_index).start_address().as_mut_ptr()
}
fn p1_page(page: Page<Size4KiB>, recursive_index: u9) -> Page {
Page::from_page_table_indices(
recursive_index,
page.p4_index(),
page.p3_index(),
page.p2_index(),
)
}

@ -166,6 +166,9 @@ impl<T: InactivePageTable> MemorySet<T> {
pub fn iter(&self) -> impl Iterator<Item=&MemoryArea> {
self.areas.iter()
}
pub fn edit(&mut self, f: impl FnOnce(&mut T::Active)) {
self.page_table.edit(f);
}
pub unsafe fn with(&self, f: impl FnOnce()) {
self.page_table.with(f);
}

@ -93,7 +93,7 @@ set_stack:
zero_bss:
# load the start address and number of bytes in BSS section
ldr x1, =__bss_start
ldr x1, =sbss
ldr x2, =__bss_length
zero_bss_loop:

@ -8,34 +8,44 @@ SECTIONS {
}
. = 0x100000; /* Load the kernel at this address. It's also kernel stack top address */
bootstacktop = .;
.text : {
stext = .;
*(.text.entry)
*(.text .text.* .gnu.linkonce.t*)
. = ALIGN(4K);
etext = .;
}
.rodata : {
srodata = .;
*(.rodata .rodata.* .gnu.linkonce.r*)
. = ALIGN(4K);
erodata = .;
}
.data : {
sdata = .;
*(.data .data.* .gnu.linkonce.d*)
. = ALIGN(4K);
edata = .;
}
.bss (NOLOAD) : {
. = ALIGN(32);
__bss_start = .;
sbss = .;
*(.bss .bss.*)
*(COMMON)
. = ALIGN(8);
__bss_end = .;
. = ALIGN(4K);
ebss = .;
}
/* end of the binary */
_end = ALIGN(8);
/* number of bytes in BSS section and complete binary */
__bss_length = (__bss_end - __bss_start);
__bss_length = (ebss - sbss);
__binary_length = (_end - _start);
/DISCARD/ : { *(.comment) *(.gnu*) *(.note*) *(.eh_frame*) }

@ -1,13 +1,9 @@
//! Memory initialization for aarch64.
use bit_allocator::BitAlloc;
use ucore_memory::PAGE_SIZE;
use memory::{FRAME_ALLOCATOR, init_heap};
use memory::{FRAME_ALLOCATOR, init_heap, MemoryArea, MemoryAttr, MemorySet, Stack};
use super::atags::atags::Atags;
//use super::super::HEAP_ALLOCATOR;
use aarch64::{barrier, regs::*, addr::*};
use aarch64::paging::{FrameAllocator, FrameDeallocator, Page, PageRange, PhysFrame as Frame, Size4KiB};
use core::ops::Range;
use aarch64::{barrier, regs::*, addr::*, paging::PhysFrame as Frame};
/// Memory initialization.
pub fn init() {
@ -25,14 +21,11 @@ pub fn init() {
init_mmu();
init_frame_allocator();
init_heap();
remap_the_kernel();
info!("memory: init end");
}
extern "C" {
static _end: u8;
}
fn init_frame_allocator() {
use bit_allocator::BitAlloc;
use core::ops::Range;
@ -96,12 +89,36 @@ fn init_mmu() {
info!("mmu enabled");
}
fn remap_the_kernel() {
let (bottom, top) = (0, bootstacktop as usize);
let kstack = Stack {
top,
bottom,
};
static mut SPACE: [u8; 0x1000] = [0; 0x1000];
let mut ms = unsafe { MemorySet::new_from_raw_space(&mut SPACE, kstack) };
ms.push(MemoryArea::new_identity(bottom, top, MemoryAttr::default(), "kstack"));
ms.push(MemoryArea::new_identity(stext as usize, etext as usize, MemoryAttr::default().execute().readonly(), "text"));
ms.push(MemoryArea::new_identity(sdata as usize, edata as usize, MemoryAttr::default(), "data"));
ms.push(MemoryArea::new_identity(srodata as usize, erodata as usize, MemoryAttr::default().readonly(), "rodata"));
ms.push(MemoryArea::new_identity(sbss as usize, ebss as usize, MemoryAttr::default(), "bss"));
// ensure the level 2 page table exists
ms.push(MemoryArea::new_identity(0x40000000, 0x40200000, MemoryAttr::default(), "arm_control"));
super::paging::remap_device_2mib(&mut ms, 0x3F000000, 0x40200000);
unsafe { ms.activate(); }
use core::mem::forget;
forget(ms);
info!("kernel remap end");
}
/// Returns the (start address, end address) of the available memory on this
/// system if it can be determined. If it cannot, `None` is returned.
///
/// This function is expected to return `Some` under all normal cirumstances.
fn memory_map() -> Option<(usize, usize)> {
let binary_end = unsafe { (&_end as *const u8) as u32 };
let binary_end = unsafe { _end as u32 };
let mut atags: Atags = Atags::get();
while let Some(atag) = atags.next() {
@ -113,3 +130,16 @@ fn memory_map() -> Option<(usize, usize)> {
None
}
extern {
fn bootstacktop();
fn stext();
fn etext();
fn sdata();
fn edata();
fn srodata();
fn erodata();
fn sbss();
fn ebss();
fn _start();
fn _end();
}

@ -8,8 +8,7 @@ use ucore_memory::paging::*;
use aarch64::asm::{tlb_invalidate, ttbr0_el1_read, ttbr0_el1_write};
use aarch64::{PhysAddr, VirtAddr};
use aarch64::paging::{Mapper, PageTable as Aarch64PageTable, PageTableEntry, PageTableFlags as EF, RecursivePageTable};
use aarch64::paging::{FrameAllocator, FrameDeallocator, Page, PageRange, PhysFrame as Frame, Size4KiB};
use aarch64::{regs::*};
use aarch64::paging::{FrameAllocator, FrameDeallocator, Page, PageRange, PhysFrame as Frame, Size4KiB, Size2MiB};
register_bitfields! {u64,
// AArch64 Reference Manual page 2150
@ -61,6 +60,11 @@ register_bitfields! {u64,
]
}
mod mair {
pub const NORMAL: u64 = 0;
pub const DEVICE: u64 = 1;
}
// need 3 page
pub fn setup_page_table(frame_lvl4: Frame, frame_lvl3: Frame, frame_lvl2: Frame) {
let p4 = unsafe { &mut *(frame_lvl4.start_address().as_u64() as *mut Aarch64PageTable) };
@ -70,11 +74,6 @@ pub fn setup_page_table(frame_lvl4: Frame, frame_lvl3: Frame, frame_lvl2: Frame)
p3.zero();
p2.zero();
mod mair {
pub const NORMAL: u64 = 0;
pub const DEVICE: u64 = 1;
}
// Fill the rest of the LVL2 (2MiB) entries as block
// descriptors. Differentiate between normal and device mem.
const MMIO_BASE: u64 = 0x3F000000;
@ -129,28 +128,37 @@ pub fn setup_page_table(frame_lvl4: Frame, frame_lvl3: Frame, frame_lvl2: Frame)
info!("setup init page table end");
}
pub trait PageExt {
fn of_addr(address: usize) -> Self;
fn range_of(begin: usize, end: usize) -> PageRange;
}
/// map the range [start, end) as device memory, insert to the MemorySet
pub fn remap_device_2mib(ms: &mut MemorySet<InactivePageTable0>, start_addr: usize, end_addr: usize) {
ms.edit(|active_table| {
let common = STAGE1_DESCRIPTOR::VALID::True
+ STAGE1_DESCRIPTOR::TYPE::Block
+ STAGE1_DESCRIPTOR::AP::RW_EL1
+ STAGE1_DESCRIPTOR::AF::True
+ STAGE1_DESCRIPTOR::XN::True;
impl PageExt for Page {
fn of_addr(address: usize) -> Self {
Page::containing_address(VirtAddr::new(address as u64))
}
fn range_of(begin: usize, end: usize) -> PageRange {
Page::range(Page::of_addr(begin), Page::of_addr(end - 1) + 1)
let mem_attr = STAGE1_DESCRIPTOR::SH::OuterShareable + STAGE1_DESCRIPTOR::AttrIndx.val(mair::DEVICE);
type Page2MiB = Page<Size2MiB>;
for page in Page2MiB::range_of(start_addr, end_addr) {
let p2 = unsafe { &mut *active_table.0.p2_ptr(page) };
p2[page.p2_index()].entry = (common + mem_attr + STAGE1_DESCRIPTOR::LVL2_OUTPUT_ADDR_4KiB.val(page.start_address().as_u64() >> 21)).value;
}
}
pub trait FrameExt {
fn of_addr(address: usize) -> Self;
}
// let p2 = unsafe { &mut *(0o777_777_000_000_0000 as *mut Aarch64PageTable) };
// for i in 0..512 {
// if p2[i].flags().bits() != 0 {
// info!("{:x?} {:x?} {:x?}",i, &p2[i] as *const _ as usize, p2[i]);
// }
// }
impl FrameExt for Frame {
fn of_addr(address: usize) -> Self {
Frame::containing_address(PhysAddr::new(address as u64))
}
// let p2 = unsafe { &mut *(0o777_777_000_001_0000 as *mut Aarch64PageTable) };
// for i in 0..512 {
// if p2[i].flags().bits() != 0 {
// info!("{:x?} {:x?} {:x?}",i, &p2[i] as *const _ as usize, p2[i]);
// }
// }
});
}
pub struct ActivePageTable(RecursivePageTable<'static>);

Loading…
Cancel
Save