Merge branch 'dev' into mipsel

Signed-off-by: Harry Chen <i@harrychen.xyz>
master
Harry Chen 6 years ago
commit 0614e2b1aa

@ -2,11 +2,12 @@
# It is not intended for manual editing.
[[package]]
name = "aarch64"
version = "2.5.0"
source = "git+https://github.com/rcore-os/aarch64#797c24f07f9d90542eb094530b6f63fe3ea7dded"
version = "2.6.1"
source = "git+https://github.com/rcore-os/aarch64#65d1453f11f3cc113247352dffa02d8dcdd34769"
dependencies = [
"bit_field 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
"bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
"cast 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"register 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"usize_conversions 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
"ux 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
@ -30,6 +31,11 @@ name = "bitflags"
version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "cast"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "cc"
version = "1.0.31"
@ -98,7 +104,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
name = "rcore-bootloader"
version = "0.1.0"
dependencies = [
"aarch64 2.5.0 (git+https://github.com/rcore-os/aarch64)",
"aarch64 2.6.1 (git+https://github.com/rcore-os/aarch64)",
"bcm2837 1.0.0 (git+https://github.com/rcore-os/bcm2837)",
"cc 1.0.31 (registry+https://github.com/rust-lang/crates.io-index)",
"fixedvec 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
@ -205,10 +211,11 @@ version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
[metadata]
"checksum aarch64 2.5.0 (git+https://github.com/rcore-os/aarch64)" = "<none>"
"checksum aarch64 2.6.1 (git+https://github.com/rcore-os/aarch64)" = "<none>"
"checksum bcm2837 1.0.0 (git+https://github.com/rcore-os/bcm2837)" = "<none>"
"checksum bit_field 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ed8765909f9009617974ab6b7d332625b320b33c326b1e9321382ef1999b5d56"
"checksum bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "228047a76f468627ca71776ecdebd732a3423081fcf5125585bcd7c49886ce12"
"checksum cast 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "926013f2860c46252efceabb19f4a6b308197505082c609025aa6706c011d427"
"checksum cc 1.0.31 (registry+https://github.com/rust-lang/crates.io-index)" = "c9ce8bb087aacff865633f0bd5aeaed910fe2fe55b55f4739527f2e023a2e53d"
"checksum fixedvec 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "7c6c16d316ccdac21a4dd648e314e76facbbaf316e83ca137d0857a9c07419d0"
"checksum fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba"

@ -9,7 +9,7 @@ xmas-elf = "0.6.2"
fixedvec = "0.2.3"
[target.'cfg(target_arch = "aarch64")'.dependencies]
aarch64 = { git = "https://github.com/rcore-os/aarch64", version = "2.5.0" }
aarch64 = { git = "https://github.com/rcore-os/aarch64", version = "2.6.1" }
bcm2837 = { git = "https://github.com/rcore-os/bcm2837", version = "1.0.0" }
[build-dependencies]

@ -1,6 +1,6 @@
use aarch64::addr::{VirtAddr, PhysAddr};
use aarch64::addr::{PhysAddr, VirtAddr};
use aarch64::paging::{memory_attribute::*, Page, PageTable, PageTableFlags as EF, PhysFrame};
use aarch64::paging::{Size4KiB, Size2MiB, Size1GiB};
use aarch64::paging::{Size1GiB, Size2MiB, Size4KiB};
use aarch64::{asm::*, barrier, regs::*};
use bcm2837::consts::RAW_IO_BASE;
use core::ptr;
@ -10,12 +10,22 @@ use xmas_elf::program::{ProgramHeader64, Type};
const PAGE_SIZE: usize = 4096;
const ALIGN_2MB: u64 = 0x200000;
const RECURSIVE_INDEX: usize = 0o777;
const KERNEL_OFFSET: u64 = 0xFFFF_0000_0000_0000;
const PHYSICAL_MEMORY_OFFSET: u64 = 0xFFFF_0000_0000_0000;
global_asm!(include_str!("boot.S"));
fn setup_temp_page_table(start_vaddr: VirtAddr, end_vaddr: VirtAddr, offset: u64) {
/// Convert physical address to virtual address
const fn phys_to_virt(paddr: u64) -> u64 {
PHYSICAL_MEMORY_OFFSET + paddr
}
/// Convert virtual address to physical address
const fn virt_to_phys(vaddr: u64) -> u64 {
vaddr - PHYSICAL_MEMORY_OFFSET
}
// TODO: set segments permission
fn create_page_table(start_paddr: usize, end_paddr: usize) {
#[repr(align(4096))]
struct PageData([u8; PAGE_SIZE]);
static mut PAGE_TABLE_LVL4: PageData = PageData([0; PAGE_SIZE]);
@ -34,13 +44,17 @@ fn setup_temp_page_table(start_vaddr: VirtAddr, end_vaddr: VirtAddr, offset: u64
let block_flags = EF::VALID | EF::AF | EF::WRITE | EF::UXN;
// normal memory
for page in Page::<Size2MiB>::range_of(start_vaddr.as_u64(), end_vaddr.as_u64()) {
let paddr = PhysAddr::new(page.start_address().as_u64().wrapping_add(offset));
for frame in PhysFrame::<Size2MiB>::range_of(start_paddr as u64, end_paddr as u64) {
let paddr = frame.start_address();
let vaddr = VirtAddr::new(phys_to_virt(paddr.as_u64()));
let page = Page::<Size2MiB>::containing_address(vaddr);
p2[page.p2_index()].set_block::<Size2MiB>(paddr, block_flags, MairNormal::attr_value());
}
// device memory
for page in Page::<Size2MiB>::range_of(RAW_IO_BASE as u64, 0x4000_0000) {
let paddr = PhysAddr::new(page.start_address().as_u64());
for frame in PhysFrame::<Size2MiB>::range_of(RAW_IO_BASE as u64, 0x4000_0000) {
let paddr = frame.start_address();
let vaddr = VirtAddr::new(phys_to_virt(paddr.as_u64()));
let page = Page::<Size2MiB>::containing_address(vaddr);
p2[page.p2_index()].set_block::<Size2MiB>(paddr, block_flags | EF::PXN, MairDevice::attr_value());
}
@ -48,8 +62,9 @@ fn setup_temp_page_table(start_vaddr: VirtAddr, end_vaddr: VirtAddr, offset: u64
p3[1].set_block::<Size1GiB>(PhysAddr::new(0x4000_0000), block_flags | EF::PXN, MairDevice::attr_value());
p4[0].set_frame(frame_lvl3, EF::default(), MairNormal::attr_value());
p4[RECURSIVE_INDEX].set_frame(frame_lvl4, EF::default(), MairNormal::attr_value());
// the bootloader is still running at the lower virtual address range,
// so the TTBR0_EL1 also needs to be set.
ttbr_el1_write(0, frame_lvl4);
ttbr_el1_write(1, frame_lvl4);
tlb_invalidate_all();
@ -118,7 +133,7 @@ pub fn map_kernel(kernel_start: usize, segments: &FixedVec<ProgramHeader64>) {
unsafe {
let src = (kernel_start as u64 + offset) as *const u8;
let dst = virt_addr.wrapping_sub(KERNEL_OFFSET) as *mut u8;
let dst = virt_to_phys(virt_addr) as *mut u8;
ptr::copy(src, dst, file_size as usize);
ptr::write_bytes(dst.offset(file_size as isize), 0, (mem_size - file_size) as usize);
}
@ -131,6 +146,6 @@ pub fn map_kernel(kernel_start: usize, segments: &FixedVec<ProgramHeader64>) {
}
}
setup_temp_page_table(start_vaddr, end_vaddr, KERNEL_OFFSET.wrapping_neg());
create_page_table(0, RAW_IO_BASE);
enable_mmu();
}

@ -49,20 +49,6 @@ impl MemoryArea {
self.check_read_array(ptr, count)
}
}
/// Check the null-end C string is within the readable memory, and is valid.
/// If so, clone it to a String.
///
/// Unsafe: the page table must be active.
pub unsafe fn check_and_clone_cstr(&self, ptr: *const u8) -> Option<String> {
if ptr as usize >= self.end_addr {
return None;
}
let max_len = self.end_addr - ptr as usize;
(0..max_len)
.find(|&i| ptr.offset(i as isize).read() == 0)
.and_then(|len| core::str::from_utf8(core::slice::from_raw_parts(ptr, len)).ok())
.map(|s| String::from(s))
}
/// Test whether this area is (page) overlap with area [`start_addr`, `end_addr`]
pub fn is_overlap_with(&self, start_addr: VirtAddr, end_addr: VirtAddr) -> bool {
let p0 = Page::of_addr(self.start_addr);
@ -128,7 +114,8 @@ impl MemoryAttr {
/// A set of memory space with multiple memory areas with associated page table
/// NOTE: Don't remove align(64), or you will fail to run MIPS.
#[repr(align(64))]
/// Temporary solution for rv64
#[cfg_attr(not(target_arch = "riscv64"), repr(align(64)))]
pub struct MemorySet<T: PageTableExt> {
areas: Vec<MemoryArea>,
page_table: T,
@ -187,35 +174,6 @@ impl<T: PageTableExt> MemorySet<T> {
}
Err(VMError::InvalidPtr)
}
/// Check the null-end C string pointer array
/// Used for getting argv & envp
pub unsafe fn check_and_clone_cstr_array(
&self,
mut argv: *const *const u8,
) -> VMResult<Vec<String>> {
let mut args = Vec::new();
loop {
let cstr = *self.check_read_ptr(argv)?;
if cstr.is_null() {
break;
}
let arg = self.check_and_clone_cstr(cstr)?;
args.push(arg);
argv = argv.add(1);
}
Ok(args)
}
/// Check the null-end C string is within the readable memory, and is valid.
/// If so, clone it to a String.
///
/// Unsafe: the page table must be active.
pub unsafe fn check_and_clone_cstr(&self, ptr: *const u8) -> VMResult<String> {
self.areas
.iter()
.filter_map(|area| area.check_and_clone_cstr(ptr))
.next()
.ok_or(VMError::InvalidPtr)
}
/// Find a free area with hint address `addr_hint` and length `len`.
/// Return the start address of found free area.
/// Used for mmap.

11
kernel/Cargo.lock generated

@ -2,11 +2,12 @@
# It is not intended for manual editing.
[[package]]
name = "aarch64"
version = "2.5.0"
source = "git+https://github.com/rcore-os/aarch64#797c24f07f9d90542eb094530b6f63fe3ea7dded"
version = "2.6.1"
source = "git+https://github.com/rcore-os/aarch64#65d1453f11f3cc113247352dffa02d8dcdd34769"
dependencies = [
"bit_field 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
"bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
"cast 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"register 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"usize_conversions 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
"ux 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
@ -43,7 +44,7 @@ name = "bcm2837"
version = "1.0.0"
source = "git+https://github.com/rcore-os/bcm2837#b29a8db5504b7eaa6f8adf2c3ff916d1ffd15194"
dependencies = [
"aarch64 2.5.0 (git+https://github.com/rcore-os/aarch64)",
"aarch64 2.6.1 (git+https://github.com/rcore-os/aarch64)",
"volatile 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
]
@ -352,7 +353,7 @@ dependencies = [
name = "rcore"
version = "0.2.0"
dependencies = [
"aarch64 2.5.0 (git+https://github.com/rcore-os/aarch64)",
"aarch64 2.6.1 (git+https://github.com/rcore-os/aarch64)",
"apic 0.1.0 (git+https://github.com/rcore-os/apic-rs)",
"bcm2837 1.0.0 (git+https://github.com/rcore-os/bcm2837)",
"bit_field 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
@ -658,7 +659,7 @@ version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
[metadata]
"checksum aarch64 2.5.0 (git+https://github.com/rcore-os/aarch64)" = "<none>"
"checksum aarch64 2.6.1 (git+https://github.com/rcore-os/aarch64)" = "<none>"
"checksum apic 0.1.0 (git+https://github.com/rcore-os/apic-rs)" = "<none>"
"checksum array-init 0.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "23589ecb866b460d3a0f1278834750268c607e8e28a1b982c907219f3178cd72"
"checksum bare-metal 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)" = "a3caf393d93b2d453e80638d0674597020cef3382ada454faacd43d1a55a735a"

@ -79,7 +79,7 @@ pc-keyboard = "0.5"
riscv = { git = "https://github.com/rcore-os/riscv", features = ["inline-asm"] }
[target.'cfg(target_arch = "aarch64")'.dependencies]
aarch64 = { git = "https://github.com/rcore-os/aarch64", version = "2.5.0" }
aarch64 = { git = "https://github.com/rcore-os/aarch64", version = "2.6.1" }
bcm2837 = { git = "https://github.com/rcore-os/bcm2837", version = "1.0.0", optional = true }
[target.'cfg(target_arch = "mips")'.dependencies]

@ -1,6 +1,5 @@
//! Raspberry PI 3 Model B/B+
use alloc::string::String;
use bcm2837::atags::Atags;
#[path = "../../../../drivers/gpu/fb.rs"]
@ -10,7 +9,7 @@ pub mod mailbox;
pub mod serial;
pub mod timer;
use fb::{ColorConfig, FramebufferInfo, FramebufferResult};
use fb::{ColorConfig, FramebufferResult};
pub const IO_REMAP_BASE: usize = bcm2837::consts::IO_BASE;
pub const IO_REMAP_END: usize = bcm2837::consts::KERNEL_OFFSET + 0x4000_1000;
@ -67,9 +66,8 @@ pub fn probe_fb_info(width: u32, height: u32, depth: u32) -> FramebufferResult {
))?;
}
use crate::arch::memory;
let paddr = info.bus_addr & !0xC0000000;
let vaddr = memory::ioremap(paddr as usize, info.screen_size as usize, "fb");
let vaddr = crate::memory::phys_to_virt(paddr as usize);
if vaddr == 0 {
Err(format!(
"cannot remap memory range [{:#x?}..{:#x?}]",

@ -1,7 +1,7 @@
pub const RECURSIVE_INDEX: usize = 0o777;
pub const MEMORY_OFFSET: usize = 0;
pub const KERNEL_OFFSET: usize = 0xFFFF_0000_0000_0000;
pub const KERNEL_PML4: usize = 0;
pub const PHYSICAL_MEMORY_OFFSET: usize = KERNEL_OFFSET - MEMORY_OFFSET;
pub const KERNEL_HEAP_SIZE: usize = 8 * 1024 * 1024;
pub const MEMORY_OFFSET: usize = 0;
pub const USER_STACK_OFFSET: usize = 0x0000_8000_0000_0000 - USER_STACK_SIZE;
pub const USER_STACK_SIZE: usize = 1 * 1024 * 1024;

@ -1,12 +1,13 @@
use aarch64::{asm, regs::*};
pub fn halt() {
unsafe { asm!("wfi" :::: "volatile") }
asm::wfi();
}
pub fn id() -> usize {
// TODO: cpu id
0
(MPIDR_EL1.get() & 3) as usize
}
pub unsafe fn exit_in_qemu(error_code: u8) -> ! {
pub unsafe fn exit_in_qemu(_error_code: u8) -> ! {
unimplemented!()
}

@ -1,9 +1,7 @@
//! Memory initialization for aarch64.
use super::paging::MMIOType;
use crate::consts::{KERNEL_OFFSET, MEMORY_OFFSET};
use crate::memory::{init_heap, Linear, MemoryAttr, MemorySet, FRAME_ALLOCATOR};
use aarch64::regs::*;
use crate::consts::MEMORY_OFFSET;
use crate::memory::{init_heap, virt_to_phys, FRAME_ALLOCATOR};
use log::*;
use rcore_memory::PAGE_SIZE;
@ -11,7 +9,6 @@ use rcore_memory::PAGE_SIZE;
pub fn init() {
init_frame_allocator();
init_heap();
remap_the_kernel();
info!("memory: init end");
}
@ -22,7 +19,7 @@ fn init_frame_allocator() {
let end = super::board::probe_memory()
.expect("failed to find memory map")
.1;
let start = (_end as u64 + PAGE_SIZE as u64).wrapping_sub(KERNEL_OFFSET as u64) as usize;
let start = virt_to_phys(_end as usize + PAGE_SIZE);
let mut ba = FRAME_ALLOCATOR.lock();
ba.insert(to_range(start, end));
info!("FrameAllocator init end");
@ -35,79 +32,7 @@ fn init_frame_allocator() {
}
}
static mut KERNEL_MEMORY_SET: Option<MemorySet> = None;
/// remap kernel page table after all initialization.
fn remap_the_kernel() {
let offset = -(KERNEL_OFFSET as isize);
let mut ms = MemorySet::new_bare();
ms.push(
stext as usize,
etext as usize,
MemoryAttr::default().execute().readonly(),
Linear::new(offset),
"text",
);
ms.push(
sdata as usize,
edata as usize,
MemoryAttr::default(),
Linear::new(offset),
"data",
);
ms.push(
srodata as usize,
erodata as usize,
MemoryAttr::default().readonly(),
Linear::new(offset),
"rodata",
);
ms.push(
sbss as usize,
ebss as usize,
MemoryAttr::default(),
Linear::new(offset),
"bss",
);
ms.push(
bootstack as usize,
bootstacktop as usize,
MemoryAttr::default(),
Linear::new(offset),
"kstack",
);
use super::board::{IO_REMAP_BASE, IO_REMAP_END};
ms.push(
IO_REMAP_BASE,
IO_REMAP_END,
MemoryAttr::default().mmio(MMIOType::Device as u8),
Linear::new(offset),
"io_remap",
);
info!("{:#x?}", ms);
unsafe { ms.get_page_table_mut().activate_as_kernel() }
unsafe { KERNEL_MEMORY_SET = Some(ms) }
info!("kernel remap end");
}
pub fn ioremap(paddr: usize, len: usize, name: &'static str) -> usize {
let offset = -(KERNEL_OFFSET as isize);
let vaddr = paddr.wrapping_add(KERNEL_OFFSET);
if let Some(ms) = unsafe { KERNEL_MEMORY_SET.as_mut() } {
ms.push(
vaddr,
vaddr + len,
MemoryAttr::default().mmio(MMIOType::NormalNonCacheable as u8),
Linear::new(offset),
name,
);
return vaddr;
}
0
}
#[allow(dead_code)]
extern "C" {
fn stext();
fn etext();

@ -1,58 +1,79 @@
//! Page table implementations for aarch64.
use crate::memory::{alloc_frame, dealloc_frame, phys_to_virt};
use aarch64::asm::{tlb_invalidate, tlb_invalidate_all, ttbr_el1_read, ttbr_el1_write};
use aarch64::paging::memory_attribute::*;
use aarch64::paging::{FrameAllocator, FrameDeallocator, Page, PhysFrame as Frame, Size4KiB};
use aarch64::paging::{
Mapper, PageTable as Aarch64PageTable, PageTableEntry, PageTableFlags as EF, RecursivePageTable,
frame::PhysFrame as Frame,
mapper::{MappedPageTable, Mapper},
memory_attribute::*,
page_table::{PageTable as Aarch64PageTable, PageTableEntry, PageTableFlags as EF},
FrameAllocator, FrameDeallocator, Page as PageAllSizes, Size4KiB,
};
use aarch64::{PhysAddr, VirtAddr};
use log::*;
use rcore_memory::paging::*;
// Depends on kernel
use crate::consts::{KERNEL_OFFSET, KERNEL_PML4, RECURSIVE_INDEX};
use crate::memory::{active_table, alloc_frame, dealloc_frame};
pub struct ActivePageTable(RecursivePageTable);
type Page = PageAllSizes<Size4KiB>;
pub struct PageTableImpl {
page_table: MappedPageTable<'static, fn(Frame) -> *mut Aarch64PageTable>,
root_frame: Frame,
entry: PageEntry,
}
pub struct PageEntry(PageTableEntry);
pub struct PageEntry(&'static mut PageTableEntry, Page);
impl PageTable for ActivePageTable {
impl PageTable for PageTableImpl {
fn map(&mut self, addr: usize, target: usize) -> &mut Entry {
let flags = EF::default();
let attr = MairNormal::attr_value();
self.0
.map_to(
Page::of_addr(addr as u64),
Frame::of_addr(target as u64),
flags,
attr,
&mut FrameAllocatorForAarch64,
)
.unwrap()
.flush();
unsafe {
self.page_table
.map_to(
Page::of_addr(addr as u64),
Frame::of_addr(target as u64),
flags,
attr,
&mut FrameAllocatorForAarch64,
)
.unwrap()
.flush();
}
self.get_entry(addr).expect("fail to get entry")
}
fn unmap(&mut self, addr: usize) {
self.0.unmap(Page::of_addr(addr as u64)).unwrap().1.flush();
self.page_table
.unmap(Page::of_addr(addr as u64))
.unwrap()
.1
.flush();
}
fn get_entry(&mut self, vaddr: usize) -> Option<&mut Entry> {
// get p1 entry
let entry_addr =
((vaddr >> 9) & 0o777_777_777_7770) | (RECURSIVE_INDEX << 39) | (vaddr & KERNEL_OFFSET);
Some(unsafe { &mut *(entry_addr as *mut PageEntry) })
let page = Page::of_addr(vaddr as u64);
if let Ok(e) = self.page_table.get_entry_mut(page) {
let e = unsafe { &mut *(e as *mut PageTableEntry) };
self.entry = PageEntry(e, page);
Some(&mut self.entry as &mut Entry)
} else {
None
}
}
}
impl PageTableExt for ActivePageTable {
const TEMP_PAGE_ADDR: usize = KERNEL_OFFSET | 0xcafeb000;
fn get_page_slice_mut<'a>(&mut self, addr: usize) -> &'a mut [u8] {
let frame = self
.page_table
.translate_page(Page::of_addr(addr as u64))
.unwrap();
let vaddr = phys_to_virt(frame.start_address().as_u64() as usize);
unsafe { core::slice::from_raw_parts_mut(vaddr as *mut u8, 0x1000) }
}
}
impl ActivePageTable {
pub unsafe fn new() -> Self {
ActivePageTable(RecursivePageTable::new(RECURSIVE_INDEX as u16))
}
fn frame_to_page_table(frame: Frame) -> *mut Aarch64PageTable {
let vaddr = phys_to_virt(frame.start_address().as_u64() as usize);
vaddr as *mut Aarch64PageTable
}
#[repr(u8)]
@ -65,8 +86,7 @@ pub enum MMIOType {
impl Entry for PageEntry {
fn update(&mut self) {
let addr = VirtAddr::new_unchecked((self as *const _ as u64) << 9);
tlb_invalidate(addr);
tlb_invalidate(self.1.start_address());
}
fn present(&self) -> bool {
@ -100,7 +120,8 @@ impl Entry for PageEntry {
self.0.addr().as_u64() as usize
}
fn set_target(&mut self, target: usize) {
self.0.modify_addr(PhysAddr::new(target as u64));
self.0
.set_addr(PhysAddr::new(target as u64), self.0.flags(), self.0.attr());
}
fn writable_shared(&self) -> bool {
self.0.flags().contains(EF::WRITABLE_SHARED)
@ -163,7 +184,7 @@ impl Entry for PageEntry {
2 => MairNormalNonCacheable::attr_value(),
_ => return,
};
self.0.modify_attr(attr);
self.0.set_attr(attr);
}
}
@ -178,40 +199,45 @@ impl PageEntry {
self.0.flags().contains(EF::DIRTY)
}
fn as_flags(&mut self) -> &mut EF {
unsafe { &mut *(self as *mut _ as *mut EF) }
unsafe { &mut *(self.0 as *mut _ as *mut EF) }
}
}
#[derive(Debug)]
pub struct InactivePageTable0 {
p4_frame: Frame,
impl PageTableImpl {
/// Unsafely get the current active page table.
/// WARN: You MUST call `core::mem::forget` for it after use!
pub unsafe fn active() -> Self {
let frame = Frame::of_addr(PageTableImpl::active_token() as u64);
let table = &mut *frame_to_page_table(frame);
PageTableImpl {
page_table: MappedPageTable::new(table, frame_to_page_table),
root_frame: frame,
entry: core::mem::uninitialized(),
}
}
}
impl InactivePageTable for InactivePageTable0 {
type Active = ActivePageTable;
impl PageTableExt for PageTableImpl {
fn new_bare() -> Self {
let target = alloc_frame().expect("failed to allocate frame");
let frame = Frame::of_addr(target as u64);
active_table().with_temporary_map(target, |_, table: &mut Aarch64PageTable| {
table.zero();
// set up recursive mapping for the table
table[RECURSIVE_INDEX].set_frame(
frame.clone(),
EF::default(),
MairNormal::attr_value(),
);
});
InactivePageTable0 { p4_frame: frame }
let table = unsafe { &mut *frame_to_page_table(frame) };
table.zero();
unsafe {
PageTableImpl {
page_table: MappedPageTable::new(table, frame_to_page_table),
root_frame: frame,
entry: core::mem::uninitialized(),
}
}
}
fn map_kernel(&mut self) {
// When the new InactivePageTable is created for the user MemorySet, it's use ttbr0 as the
// TTBR. And the kernel TTBR ttbr1 will never changed, so we needn't call map_kernel()
// kernel page table is based on TTBR1_EL1 and will nerver change.
}
fn token(&self) -> usize {
self.p4_frame.start_address().as_u64() as usize // as TTBRx_EL1
self.root_frame.start_address().as_u64() as usize // as TTBR0_EL1
}
unsafe fn set_token(token: usize) {
@ -225,73 +251,25 @@ impl InactivePageTable for InactivePageTable0 {
fn flush_tlb() {
tlb_invalidate_all();
}
fn edit<T>(&mut self, f: impl FnOnce(&mut Self::Active) -> T) -> T {
let target = ttbr_el1_read(1);
if self.p4_frame == target {
return f(&mut active_table());
}
let target = target.start_address().as_u64() as usize;
active_table().with_temporary_map(
target,
|active_table, p4_table: &mut Aarch64PageTable| {
let backup = p4_table[RECURSIVE_INDEX].clone();
let old_frame = ttbr_el1_read(0);
// overwrite recursive mapping
p4_table[RECURSIVE_INDEX].set_frame(
self.p4_frame.clone(),
EF::default(),
MairNormal::attr_value(),
);
ttbr_el1_write(0, self.p4_frame.clone());
tlb_invalidate_all();
// execute f in the new context
let ret = f(active_table);
// restore recursive mapping to original p4 table
p4_table[RECURSIVE_INDEX] = backup;
ttbr_el1_write(0, old_frame);
tlb_invalidate_all();
ret
},
)
}
}
impl InactivePageTable0 {
/// Activate as kernel page table (TTBR0).
/// Used in `arch::memory::remap_the_kernel()`.
pub unsafe fn activate_as_kernel(&self) {
let old_frame = ttbr_el1_read(1);
let new_frame = self.p4_frame.clone();
debug!("switch TTBR1 {:?} -> {:?}", old_frame, new_frame);
if old_frame != new_frame {
ttbr_el1_write(0, Frame::of_addr(0));
ttbr_el1_write(1, new_frame);
tlb_invalidate_all();
}
}
}
impl Drop for InactivePageTable0 {
impl Drop for PageTableImpl {
fn drop(&mut self) {
info!("PageTable dropping: {:?}", self);
dealloc_frame(self.p4_frame.start_address().as_u64() as usize);
info!("PageTable dropping: {:?}", self.root_frame);
dealloc_frame(self.root_frame.start_address().as_u64() as usize);
}
}
struct FrameAllocatorForAarch64;
impl FrameAllocator<Size4KiB> for FrameAllocatorForAarch64 {
fn alloc(&mut self) -> Option<Frame> {
unsafe impl FrameAllocator<Size4KiB> for FrameAllocatorForAarch64 {
fn allocate_frame(&mut self) -> Option<Frame> {
alloc_frame().map(|addr| Frame::of_addr(addr as u64))
}
}
impl FrameDeallocator<Size4KiB> for FrameAllocatorForAarch64 {
fn dealloc(&mut self, frame: Frame) {
fn deallocate_frame(&mut self, frame: Frame) {
dealloc_frame(frame.start_address().as_u64() as usize);
}
}

@ -21,6 +21,9 @@ SECTIONS
*(.text.entry)
. = ALIGN(4K);
*(.text.ebase)
_copy_user_start = .;
*(.text.copy_user)
_copy_user_end = .;
*(.text .text.*)
. = ALIGN(4K);
etext = .;

@ -266,6 +266,15 @@ fn page_fault(tf: &mut TrapFrame) {
if !tlb_valid {
if !crate::memory::handle_page_fault(addr) {
extern "C" {
fn _copy_user_start();
fn _copy_user_end();
}
if tf.epc >= _copy_user_start as usize && tf.epc < _copy_user_end as usize {
debug!("fixup for addr {:x?}", addr);
tf.epc = crate::memory::read_user_fixup as usize;
return;
}
crate::trap::error(tf);
}
}
@ -274,6 +283,15 @@ fn page_fault(tf: &mut TrapFrame) {
}
Err(()) => {
if !crate::memory::handle_page_fault(addr) {
extern "C" {
fn _copy_user_start();
fn _copy_user_end();
}
if tf.epc >= _copy_user_start as usize && tf.epc < _copy_user_end as usize {
debug!("fixup for addr {:x?}", addr);
tf.epc = crate::memory::read_user_fixup as usize;
return;
}
crate::trap::error(tf);
}
}

@ -17,6 +17,9 @@ SECTIONS
.text : {
stext = .;
*(.text.entry)
_copy_user_start = .;
*(.text.copy_user)
_copy_user_end = .;
*(.text .text.*)
. = ALIGN(4K);
etext = .;

@ -17,6 +17,9 @@ SECTIONS
.text : {
stext = .;
*(.text.entry)
_copy_user_start = .;
*(.text.copy_user)
_copy_user_end = .;
*(.text .text.*)
. = ALIGN(4K);
etext = .;

@ -17,6 +17,9 @@ SECTIONS
.text : {
stext = .;
*(.text.entry)
_copy_user_start = .;
*(.text.copy_user)
_copy_user_end = .;
*(.text .text.*)
. = ALIGN(4K);
etext = .;

@ -22,6 +22,9 @@ SECTIONS
.text : {
stext = .;
*(.text.entry)
_copy_user_start = .;
*(.text.copy_user)
_copy_user_end = .;
*(.text .text.*)
. = ALIGN(4K);
etext = .;

@ -17,6 +17,9 @@ SECTIONS
.text : {
stext = .;
*(.text.entry)
_copy_user_start = .;
*(.text.copy_user)
_copy_user_end = .;
*(.text .text.*)
. = ALIGN(4K);
etext = .;

@ -132,6 +132,15 @@ fn page_fault(tf: &mut TrapFrame) {
trace!("\nEXCEPTION: Page Fault @ {:#x}", addr);
if !crate::memory::handle_page_fault(addr) {
extern "C" {
fn _copy_user_start();
fn _copy_user_end();
}
if tf.sepc >= _copy_user_start as usize && tf.sepc < _copy_user_end as usize {
debug!("fixup for addr {:x?}", addr);
tf.sepc = crate::memory::read_user_fixup as usize;
return;
}
crate::trap::error(tf);
}
}

@ -147,6 +147,17 @@ fn page_fault(tf: &mut TrapFrame) {
if crate::memory::handle_page_fault(addr) {
return;
}
extern "C" {
fn _copy_user_start();
fn _copy_user_end();
}
if tf.rip >= _copy_user_start as usize && tf.rip < _copy_user_end as usize {
debug!("fixup for addr {:x?}", addr);
tf.rip = crate::memory::read_user_fixup as usize;
return;
}
error!("\nEXCEPTION: Page Fault @ {:#x}, code: {:?}", addr, code);
error(tf);
}

@ -14,6 +14,9 @@ SECTIONS {
.text ALIGN(4K):
{
stext = .;
_copy_user_start = .;
*(.text.copy_user)
_copy_user_end = .;
*(.text .text.*)
etext = .;
}

@ -184,6 +184,12 @@ impl<F: Font> Console<F> {
self.buf.delete(self.row, self.col);
}
}
b'\t' => {
self.write_byte(b' ');
while self.col % 8 != 0 {
self.write_byte(b' ');
}
}
b'\n' => self.new_line(),
b'\r' => self.col = 0,
b'\x1b' => self.parser.start_parse(),

@ -1,6 +1,8 @@
use alloc::alloc::{alloc_zeroed, dealloc, Layout};
pub use crate::arch::paging::PageTableImpl;
use crate::consts::PHYSICAL_MEMORY_OFFSET;
use crate::memory::{alloc_frame, dealloc_frame, phys_to_virt, virt_to_phys};
use isomorphic_drivers::provider;
use rcore_memory::paging::PageTable;
use rcore_memory::PAGE_SIZE;
@ -11,16 +13,19 @@ impl provider::Provider for Provider {
const PAGE_SIZE: usize = PAGE_SIZE;
fn alloc_dma(size: usize) -> (usize, usize) {
let layout = Layout::from_size_align(size, PAGE_SIZE).unwrap();
let vaddr = unsafe { alloc_zeroed(layout) } as usize;
let mut page_table = unsafe { PageTableImpl::active() };
let paddr = page_table.get_entry(vaddr).unwrap().target();
core::mem::forget(page_table);
// TODO: allocate continuous pages
let mut paddr = alloc_frame().unwrap();
for i in 1..(size / PAGE_SIZE) {
let paddr_new = alloc_frame().unwrap();
assert_eq!(paddr - PAGE_SIZE, paddr_new);
paddr = paddr_new;
}
let vaddr = phys_to_virt(paddr);
(vaddr, paddr)
}
fn dealloc_dma(vaddr: usize, size: usize) {
let layout = Layout::from_size_align(size, PAGE_SIZE).unwrap();
unsafe { dealloc(vaddr as *mut u8, layout) }
let paddr = virt_to_phys(vaddr);
dealloc_frame(paddr);
}
}

@ -21,6 +21,7 @@ use alloc::boxed::Box;
use bitmap_allocator::BitAlloc;
use buddy_system_allocator::Heap;
use core::mem;
use core::mem::size_of;
use lazy_static::*;
use log::*;
pub use rcore_memory::memory_set::{handler::*, MemoryArea, MemoryAttr};
@ -145,13 +146,9 @@ pub fn init_heap() {
pub fn enlarge_heap(heap: &mut Heap) {
info!("Enlarging heap to avoid oom");
let mut page_table = unsafe { PageTableImpl::active() };
let mut addrs = [(0, 0); 32];
let mut addr_len = 0;
#[cfg(target_arch = "x86_64")]
let va_offset = KERNEL_OFFSET + 0xe0000000;
#[cfg(not(target_arch = "x86_64"))]
let va_offset = KERNEL_OFFSET + 0x00e00000;
let va_offset = PHYSICAL_MEMORY_OFFSET;
for i in 0..16384 {
let page = alloc_frame().unwrap();
let va = va_offset + page;
@ -167,13 +164,56 @@ pub fn enlarge_heap(heap: &mut Heap) {
addr_len += 1;
}
for (addr, len) in addrs[..addr_len].into_iter() {
for va in (*addr..(*addr + *len)).step_by(PAGE_SIZE) {
page_table.map(va, va - va_offset).update();
}
info!("Adding {:#X} {:#X} to heap", addr, len);
unsafe {
heap.init(*addr, *len);
}
}
core::mem::forget(page_table);
}
pub fn access_ok(from: usize, len: usize) -> bool {
from < PHYSICAL_MEMORY_OFFSET && (from + len) < PHYSICAL_MEMORY_OFFSET
}
#[naked]
pub unsafe extern "C" fn read_user_fixup() -> usize {
return 1;
}
#[no_mangle]
pub fn copy_from_user_u8(addr: *const u8) -> Option<u8> {
#[naked]
#[inline(never)]
#[link_section = ".text.copy_user"]
unsafe extern "C" fn read_user_u8(dst: *mut u8, src: *const u8) -> usize {
dst.copy_from_nonoverlapping(src, 1);
0
}
if !access_ok(addr as usize, size_of::<u8>()) {
return None;
}
let mut dst: u8 = 0;
match unsafe { read_user_u8((&mut dst) as *mut u8, addr) } {
0 => Some(dst),
_ => None,
}
}
#[no_mangle]
pub fn copy_from_user_usize(addr: *const usize) -> Option<usize> {
#[naked]
#[inline(never)]
#[link_section = ".text.copy_user"]
unsafe extern "C" fn read_user_usize(dst: *mut usize, src: *const usize) -> usize {
dst.copy_from_nonoverlapping(src, 1);
0
}
if !access_ok(addr as usize, size_of::<usize>()) {
return None;
}
let mut dst: usize = 0;
match unsafe { read_user_usize((&mut dst) as *mut usize, addr) } {
0 => Some(dst),
_ => None,
}
}

@ -272,7 +272,7 @@ impl Syscall<'_> {
mode: usize,
) -> SysResult {
let mut proc = self.process();
let path = unsafe { self.vm().check_and_clone_cstr(path)? };
let path = unsafe { check_and_clone_cstr(path)? };
let flags = OpenFlags::from_bits_truncate(flags);
info!(
"openat: dir_fd: {}, path: {:?}, flags: {:?}, mode: {:#o}",
@ -336,7 +336,7 @@ impl Syscall<'_> {
) -> SysResult {
// TODO: check permissions based on uid/gid
let proc = self.process();
let path = unsafe { self.vm().check_and_clone_cstr(path)? };
let path = unsafe { check_and_clone_cstr(path)? };
let flags = AtFlags::from_bits_truncate(flags);
if !proc.pid.is_init() {
// we trust pid 0 process
@ -386,7 +386,7 @@ impl Syscall<'_> {
flags: usize,
) -> SysResult {
let proc = self.process();
let path = unsafe { self.vm().check_and_clone_cstr(path)? };
let path = unsafe { check_and_clone_cstr(path)? };
let stat_ref = unsafe { self.vm().check_write_ptr(stat_ptr)? };
let flags = AtFlags::from_bits_truncate(flags);
info!(
@ -417,7 +417,7 @@ impl Syscall<'_> {
len: usize,
) -> SysResult {
let proc = self.process();
let path = unsafe { self.vm().check_and_clone_cstr(path)? };
let path = unsafe { check_and_clone_cstr(path)? };
let slice = unsafe { self.vm().check_write_array(base, len)? };
info!(
"readlinkat: dirfd: {}, path: {:?}, base: {:?}, len: {}",
@ -463,7 +463,7 @@ impl Syscall<'_> {
pub fn sys_truncate(&mut self, path: *const u8, len: usize) -> SysResult {
let proc = self.process();
let path = unsafe { self.vm().check_and_clone_cstr(path)? };
let path = unsafe { check_and_clone_cstr(path)? };
info!("truncate: path: {:?}, len: {}", path, len);
proc.lookup_inode(&path)?.resize(len)?;
Ok(0)
@ -537,7 +537,7 @@ impl Syscall<'_> {
pub fn sys_chdir(&mut self, path: *const u8) -> SysResult {
let mut proc = self.process();
let path = unsafe { self.vm().check_and_clone_cstr(path)? };
let path = unsafe { check_and_clone_cstr(path)? };
if !proc.pid.is_init() {
// we trust pid 0 process
info!("chdir: path: {:?}", path);
@ -590,8 +590,8 @@ impl Syscall<'_> {
newpath: *const u8,
) -> SysResult {
let proc = self.process();
let oldpath = unsafe { self.vm().check_and_clone_cstr(oldpath)? };
let newpath = unsafe { self.vm().check_and_clone_cstr(newpath)? };
let oldpath = unsafe { check_and_clone_cstr(oldpath)? };
let newpath = unsafe { check_and_clone_cstr(newpath)? };
info!(
"renameat: olddirfd: {}, oldpath: {:?}, newdirfd: {}, newpath: {:?}",
olddirfd as isize, oldpath, newdirfd as isize, newpath
@ -611,7 +611,7 @@ impl Syscall<'_> {
pub fn sys_mkdirat(&mut self, dirfd: usize, path: *const u8, mode: usize) -> SysResult {
let proc = self.process();
let path = unsafe { self.vm().check_and_clone_cstr(path)? };
let path = unsafe { check_and_clone_cstr(path)? };
// TODO: check pathname
info!(
"mkdirat: dirfd: {}, path: {:?}, mode: {:#o}",
@ -629,7 +629,7 @@ impl Syscall<'_> {
pub fn sys_rmdir(&mut self, path: *const u8) -> SysResult {
let proc = self.process();
let path = unsafe { self.vm().check_and_clone_cstr(path)? };
let path = unsafe { check_and_clone_cstr(path)? };
info!("rmdir: path: {:?}", path);
let (dir_path, file_name) = split_path(&path);
@ -655,8 +655,8 @@ impl Syscall<'_> {
flags: usize,
) -> SysResult {
let proc = self.process();
let oldpath = unsafe { self.vm().check_and_clone_cstr(oldpath)? };
let newpath = unsafe { self.vm().check_and_clone_cstr(newpath)? };
let oldpath = unsafe { check_and_clone_cstr(oldpath)? };
let newpath = unsafe { check_and_clone_cstr(newpath)? };
let flags = AtFlags::from_bits_truncate(flags);
info!(
"linkat: olddirfd: {}, oldpath: {:?}, newdirfd: {}, newpath: {:?}, flags: {:?}",
@ -676,7 +676,7 @@ impl Syscall<'_> {
pub fn sys_unlinkat(&mut self, dirfd: usize, path: *const u8, flags: usize) -> SysResult {
let proc = self.process();
let path = unsafe { self.vm().check_and_clone_cstr(path)? };
let path = unsafe { check_and_clone_cstr(path)? };
let flags = AtFlags::from_bits_truncate(flags);
info!(
"unlinkat: dirfd: {}, path: {:?}, flags: {:?}",

@ -10,7 +10,7 @@ use rcore_memory::VMError;
use crate::arch::cpu;
use crate::arch::interrupt::TrapFrame;
use crate::arch::syscall::*;
use crate::memory::MemorySet;
use crate::memory::{copy_from_user_u8, copy_from_user_usize, MemorySet};
use crate::process::*;
use crate::sync::{Condvar, MutexGuard, SpinNoIrq};
use crate::thread;
@ -556,3 +556,38 @@ pub fn spin_and_wait<T>(condvars: &[&Condvar], mut action: impl FnMut() -> Optio
Condvar::wait_any(&condvars);
}
}
pub fn check_and_clone_cstr(user: *const u8) -> Result<String, SysError> {
let mut buffer = Vec::new();
for i in 0.. {
let addr = unsafe { user.add(i) };
if let Some(data) = copy_from_user_u8(addr) {
if data > 0 {
buffer.push(data);
} else {
break;
}
} else {
return Err(SysError::EFAULT);
}
}
return String::from_utf8(buffer).map_err(|_| SysError::EFAULT);
}
pub fn check_and_clone_cstr_array(user: *const *const u8) -> Result<Vec<String>, SysError> {
let mut buffer = Vec::new();
for i in 0.. {
let addr = unsafe { user.add(i) };
if let Some(str_addr) = copy_from_user_usize(addr as *const usize) {
if str_addr > 0 {
let string = check_and_clone_cstr(str_addr as *const u8)?;
buffer.push(string);
} else {
break;
}
} else {
return Err(SysError::EFAULT);
}
}
return Ok(buffer);
}

@ -153,9 +153,9 @@ impl Syscall<'_> {
path, argv, envp
);
let mut proc = self.process();
let path = unsafe { self.vm().check_and_clone_cstr(path)? };
let args = unsafe { self.vm().check_and_clone_cstr_array(argv)? };
let envs = unsafe { self.vm().check_and_clone_cstr_array(envp)? };
let path = unsafe { check_and_clone_cstr(path)? };
let args = unsafe { check_and_clone_cstr_array(argv)? };
let envs = unsafe { check_and_clone_cstr_array(envp)? };
if args.is_empty() {
error!("exec: args is null");

@ -1 +1 @@
Subproject commit 822cd0336855b8648424b10cebcaa0b7c944dbca
Subproject commit bf02e72b85784af3555c7abe6b985aefc215023e
Loading…
Cancel
Save