Merge pull request #51 from oscourse-tsinghua/dev

Dev
master
PanQL 6 years ago committed by GitHub
commit 7cf8b5d52f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -2,11 +2,12 @@
# It is not intended for manual editing. # It is not intended for manual editing.
[[package]] [[package]]
name = "aarch64" name = "aarch64"
version = "2.5.0" version = "2.6.1"
source = "git+https://github.com/rcore-os/aarch64#797c24f07f9d90542eb094530b6f63fe3ea7dded" source = "git+https://github.com/rcore-os/aarch64#65d1453f11f3cc113247352dffa02d8dcdd34769"
dependencies = [ dependencies = [
"bit_field 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "bit_field 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
"bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", "bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
"cast 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"register 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "register 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"usize_conversions 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "usize_conversions 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
"ux 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", "ux 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
@ -30,6 +31,11 @@ name = "bitflags"
version = "1.0.4" version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "cast"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]] [[package]]
name = "cc" name = "cc"
version = "1.0.31" version = "1.0.31"
@ -98,7 +104,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
name = "rcore-bootloader" name = "rcore-bootloader"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"aarch64 2.5.0 (git+https://github.com/rcore-os/aarch64)", "aarch64 2.6.1 (git+https://github.com/rcore-os/aarch64)",
"bcm2837 1.0.0 (git+https://github.com/rcore-os/bcm2837)", "bcm2837 1.0.0 (git+https://github.com/rcore-os/bcm2837)",
"cc 1.0.31 (registry+https://github.com/rust-lang/crates.io-index)", "cc 1.0.31 (registry+https://github.com/rust-lang/crates.io-index)",
"fixedvec 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", "fixedvec 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
@ -205,10 +211,11 @@ version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
[metadata] [metadata]
"checksum aarch64 2.5.0 (git+https://github.com/rcore-os/aarch64)" = "<none>" "checksum aarch64 2.6.1 (git+https://github.com/rcore-os/aarch64)" = "<none>"
"checksum bcm2837 1.0.0 (git+https://github.com/rcore-os/bcm2837)" = "<none>" "checksum bcm2837 1.0.0 (git+https://github.com/rcore-os/bcm2837)" = "<none>"
"checksum bit_field 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ed8765909f9009617974ab6b7d332625b320b33c326b1e9321382ef1999b5d56" "checksum bit_field 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ed8765909f9009617974ab6b7d332625b320b33c326b1e9321382ef1999b5d56"
"checksum bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "228047a76f468627ca71776ecdebd732a3423081fcf5125585bcd7c49886ce12" "checksum bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "228047a76f468627ca71776ecdebd732a3423081fcf5125585bcd7c49886ce12"
"checksum cast 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "926013f2860c46252efceabb19f4a6b308197505082c609025aa6706c011d427"
"checksum cc 1.0.31 (registry+https://github.com/rust-lang/crates.io-index)" = "c9ce8bb087aacff865633f0bd5aeaed910fe2fe55b55f4739527f2e023a2e53d" "checksum cc 1.0.31 (registry+https://github.com/rust-lang/crates.io-index)" = "c9ce8bb087aacff865633f0bd5aeaed910fe2fe55b55f4739527f2e023a2e53d"
"checksum fixedvec 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "7c6c16d316ccdac21a4dd648e314e76facbbaf316e83ca137d0857a9c07419d0" "checksum fixedvec 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "7c6c16d316ccdac21a4dd648e314e76facbbaf316e83ca137d0857a9c07419d0"
"checksum fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba" "checksum fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba"

@ -9,7 +9,7 @@ xmas-elf = "0.6.2"
fixedvec = "0.2.3" fixedvec = "0.2.3"
[target.'cfg(target_arch = "aarch64")'.dependencies] [target.'cfg(target_arch = "aarch64")'.dependencies]
aarch64 = { git = "https://github.com/rcore-os/aarch64", version = "2.5.0" } aarch64 = { git = "https://github.com/rcore-os/aarch64", version = "2.6.1" }
bcm2837 = { git = "https://github.com/rcore-os/bcm2837", version = "1.0.0" } bcm2837 = { git = "https://github.com/rcore-os/bcm2837", version = "1.0.0" }
[build-dependencies] [build-dependencies]

@ -1,6 +1,6 @@
use aarch64::addr::{VirtAddr, PhysAddr}; use aarch64::addr::{PhysAddr, VirtAddr};
use aarch64::paging::{memory_attribute::*, Page, PageTable, PageTableFlags as EF, PhysFrame}; use aarch64::paging::{memory_attribute::*, Page, PageTable, PageTableFlags as EF, PhysFrame};
use aarch64::paging::{Size4KiB, Size2MiB, Size1GiB}; use aarch64::paging::{Size1GiB, Size2MiB, Size4KiB};
use aarch64::{asm::*, barrier, regs::*}; use aarch64::{asm::*, barrier, regs::*};
use bcm2837::consts::RAW_IO_BASE; use bcm2837::consts::RAW_IO_BASE;
use core::ptr; use core::ptr;
@ -10,12 +10,22 @@ use xmas_elf::program::{ProgramHeader64, Type};
const PAGE_SIZE: usize = 4096; const PAGE_SIZE: usize = 4096;
const ALIGN_2MB: u64 = 0x200000; const ALIGN_2MB: u64 = 0x200000;
const RECURSIVE_INDEX: usize = 0o777; const PHYSICAL_MEMORY_OFFSET: u64 = 0xFFFF_0000_0000_0000;
const KERNEL_OFFSET: u64 = 0xFFFF_0000_0000_0000;
global_asm!(include_str!("boot.S")); global_asm!(include_str!("boot.S"));
fn setup_temp_page_table(start_vaddr: VirtAddr, end_vaddr: VirtAddr, offset: u64) { /// Convert physical address to virtual address
const fn phys_to_virt(paddr: u64) -> u64 {
PHYSICAL_MEMORY_OFFSET + paddr
}
/// Convert virtual address to physical address
const fn virt_to_phys(vaddr: u64) -> u64 {
vaddr - PHYSICAL_MEMORY_OFFSET
}
// TODO: set segments permission
fn create_page_table(start_paddr: usize, end_paddr: usize) {
#[repr(align(4096))] #[repr(align(4096))]
struct PageData([u8; PAGE_SIZE]); struct PageData([u8; PAGE_SIZE]);
static mut PAGE_TABLE_LVL4: PageData = PageData([0; PAGE_SIZE]); static mut PAGE_TABLE_LVL4: PageData = PageData([0; PAGE_SIZE]);
@ -34,13 +44,17 @@ fn setup_temp_page_table(start_vaddr: VirtAddr, end_vaddr: VirtAddr, offset: u64
let block_flags = EF::VALID | EF::AF | EF::WRITE | EF::UXN; let block_flags = EF::VALID | EF::AF | EF::WRITE | EF::UXN;
// normal memory // normal memory
for page in Page::<Size2MiB>::range_of(start_vaddr.as_u64(), end_vaddr.as_u64()) { for frame in PhysFrame::<Size2MiB>::range_of(start_paddr as u64, end_paddr as u64) {
let paddr = PhysAddr::new(page.start_address().as_u64().wrapping_add(offset)); let paddr = frame.start_address();
let vaddr = VirtAddr::new(phys_to_virt(paddr.as_u64()));
let page = Page::<Size2MiB>::containing_address(vaddr);
p2[page.p2_index()].set_block::<Size2MiB>(paddr, block_flags, MairNormal::attr_value()); p2[page.p2_index()].set_block::<Size2MiB>(paddr, block_flags, MairNormal::attr_value());
} }
// device memory // device memory
for page in Page::<Size2MiB>::range_of(RAW_IO_BASE as u64, 0x4000_0000) { for frame in PhysFrame::<Size2MiB>::range_of(RAW_IO_BASE as u64, 0x4000_0000) {
let paddr = PhysAddr::new(page.start_address().as_u64()); let paddr = frame.start_address();
let vaddr = VirtAddr::new(phys_to_virt(paddr.as_u64()));
let page = Page::<Size2MiB>::containing_address(vaddr);
p2[page.p2_index()].set_block::<Size2MiB>(paddr, block_flags | EF::PXN, MairDevice::attr_value()); p2[page.p2_index()].set_block::<Size2MiB>(paddr, block_flags | EF::PXN, MairDevice::attr_value());
} }
@ -48,8 +62,9 @@ fn setup_temp_page_table(start_vaddr: VirtAddr, end_vaddr: VirtAddr, offset: u64
p3[1].set_block::<Size1GiB>(PhysAddr::new(0x4000_0000), block_flags | EF::PXN, MairDevice::attr_value()); p3[1].set_block::<Size1GiB>(PhysAddr::new(0x4000_0000), block_flags | EF::PXN, MairDevice::attr_value());
p4[0].set_frame(frame_lvl3, EF::default(), MairNormal::attr_value()); p4[0].set_frame(frame_lvl3, EF::default(), MairNormal::attr_value());
p4[RECURSIVE_INDEX].set_frame(frame_lvl4, EF::default(), MairNormal::attr_value());
// the bootloader is still running at the lower virtual address range,
// so the TTBR0_EL1 also needs to be set.
ttbr_el1_write(0, frame_lvl4); ttbr_el1_write(0, frame_lvl4);
ttbr_el1_write(1, frame_lvl4); ttbr_el1_write(1, frame_lvl4);
tlb_invalidate_all(); tlb_invalidate_all();
@ -118,7 +133,7 @@ pub fn map_kernel(kernel_start: usize, segments: &FixedVec<ProgramHeader64>) {
unsafe { unsafe {
let src = (kernel_start as u64 + offset) as *const u8; let src = (kernel_start as u64 + offset) as *const u8;
let dst = virt_addr.wrapping_sub(KERNEL_OFFSET) as *mut u8; let dst = virt_to_phys(virt_addr) as *mut u8;
ptr::copy(src, dst, file_size as usize); ptr::copy(src, dst, file_size as usize);
ptr::write_bytes(dst.offset(file_size as isize), 0, (mem_size - file_size) as usize); ptr::write_bytes(dst.offset(file_size as isize), 0, (mem_size - file_size) as usize);
} }
@ -131,6 +146,6 @@ pub fn map_kernel(kernel_start: usize, segments: &FixedVec<ProgramHeader64>) {
} }
} }
setup_temp_page_table(start_vaddr, end_vaddr, KERNEL_OFFSET.wrapping_neg()); create_page_table(0, RAW_IO_BASE);
enable_mmu(); enable_mmu();
} }

@ -11,30 +11,19 @@ pub struct Page {
} }
impl Page { impl Page {
/* /// Returns the start address of the page.
** @brief get the virtual address of beginning of the page
** @retval VirtAddr the virtual address of beginning of the page
*/
pub fn start_address(&self) -> VirtAddr { pub fn start_address(&self) -> VirtAddr {
self.number * PAGE_SIZE self.number * PAGE_SIZE
} }
/*
** @brief get the page of a given virtual address /// Returns the page that contains the given virtual address.
** @param addr: VirtAddr the given virtual address
** @retval Page the page of the given virtual address
*/
pub fn of_addr(addr: VirtAddr) -> Self { pub fn of_addr(addr: VirtAddr) -> Self {
Page { Page {
number: addr / PAGE_SIZE, number: addr / PAGE_SIZE,
} }
} }
/* /// Returns a range of pages between address `begin` and `end`
** @brief get a pageRange between two virtual address
** @param begin: VirtAddr the virtual address of the beginning
** @param end: VirtAddr the virtual address of the end
** @retval PageRange the page of the given virtual address
*/
pub fn range_of(begin: VirtAddr, end: VirtAddr) -> PageRange { pub fn range_of(begin: VirtAddr, end: VirtAddr) -> PageRange {
PageRange { PageRange {
start: Page::of_addr(begin), start: Page::of_addr(begin),
@ -79,45 +68,3 @@ impl Iterator for PageRange {
} }
} }
} }
/// frame for the swapmanager
#[derive(Debug, Copy, Clone, PartialOrd, Ord)]
#[repr(C)]
pub struct Frame {
/// the raw pointer for the frame's memory set's inactive page table
page_table: usize,
/// the virtual addr for the frame
virtaddr: VirtAddr,
/// the token for frame
token: usize,
}
impl Frame {
pub fn get_page_table(&self) -> usize {
self.page_table
}
pub fn get_virtaddr(&self) -> VirtAddr {
self.virtaddr
}
pub fn get_token(&self) -> usize {
self.token
}
pub fn new(pt: usize, addr: VirtAddr, pttoken: usize) -> Self {
Frame {
page_table: pt,
virtaddr: addr,
token: pttoken,
}
}
}
impl PartialEq for Frame {
fn eq(&self, other: &Frame) -> bool {
self.token == other.token && self.virtaddr == other.virtaddr
}
}
impl Eq for Frame {}

@ -11,7 +11,7 @@ pub mod cow;
pub mod memory_set; pub mod memory_set;
pub mod no_mmu; pub mod no_mmu;
pub mod paging; pub mod paging;
pub mod swap; //pub mod swap;
pub use crate::addr::*; pub use crate::addr::*;

@ -25,15 +25,13 @@ impl<T: FrameAllocator> MemoryHandler for ByFrame<T> {
fn clone_map( fn clone_map(
&self, &self,
pt: &mut PageTable, pt: &mut PageTable,
with: &Fn(&mut FnMut()), src_pt: &mut PageTable,
addr: VirtAddr, addr: VirtAddr,
attr: &MemoryAttr, attr: &MemoryAttr,
) { ) {
let data = Vec::from(pt.get_page_slice_mut(addr)); self.map(pt, addr, attr);
with(&mut || { let data = src_pt.get_page_slice_mut(addr);
self.map(pt, addr, attr); pt.get_page_slice_mut(addr).copy_from_slice(data);
pt.get_page_slice_mut(addr).copy_from_slice(&data);
});
} }
fn handle_page_fault(&self, _pt: &mut PageTable, _addr: VirtAddr) -> bool { fn handle_page_fault(&self, _pt: &mut PageTable, _addr: VirtAddr) -> bool {

@ -30,24 +30,21 @@ impl<T: FrameAllocator> MemoryHandler for Delay<T> {
fn clone_map( fn clone_map(
&self, &self,
pt: &mut PageTable, pt: &mut PageTable,
with: &Fn(&mut FnMut()), src_pt: &mut PageTable,
addr: VirtAddr, addr: VirtAddr,
attr: &MemoryAttr, attr: &MemoryAttr,
) { ) {
let entry = pt.get_entry(addr).expect("failed to get entry"); let entry = src_pt.get_entry(addr).expect("failed to get entry");
if entry.present() { if entry.present() {
// eager map and copy data // eager map and copy data
let data = Vec::from(pt.get_page_slice_mut(addr)); let data = src_pt.get_page_slice_mut(addr);
with(&mut || { let target = self.allocator.alloc().expect("failed to alloc frame");
let target = self.allocator.alloc().expect("failed to alloc frame"); let entry = pt.map(addr, target);
let target_data = pt.get_page_slice_mut(addr); attr.apply(entry);
let entry = pt.map(addr, target); pt.get_page_slice_mut(addr).copy_from_slice(data);
target_data.copy_from_slice(&data);
attr.apply(entry);
});
} else { } else {
// delay map // delay map
with(&mut || self.map(pt, addr, attr)); self.map(pt, addr, attr);
} }
} }

@ -39,24 +39,21 @@ impl<F: Read, T: FrameAllocator> MemoryHandler for File<F, T> {
fn clone_map( fn clone_map(
&self, &self,
pt: &mut PageTable, pt: &mut PageTable,
with: &Fn(&mut FnMut()), src_pt: &mut PageTable,
addr: usize, addr: usize,
attr: &MemoryAttr, attr: &MemoryAttr,
) { ) {
let entry = pt.get_entry(addr).expect("failed to get entry"); let entry = src_pt.get_entry(addr).expect("failed to get entry");
if entry.present() && !attr.readonly { if entry.present() && !attr.readonly {
// eager map and copy data // eager map and copy data
let data = Vec::from(pt.get_page_slice_mut(addr)); let data = src_pt.get_page_slice_mut(addr);
with(&mut || { let target = self.allocator.alloc().expect("failed to alloc frame");
let target = self.allocator.alloc().expect("failed to alloc frame"); let entry = pt.map(addr, target);
let target_data = pt.get_page_slice_mut(addr); attr.apply(entry);
let entry = pt.map(addr, target); pt.get_page_slice_mut(addr).copy_from_slice(data);
target_data.copy_from_slice(&data);
attr.apply(entry);
});
} else { } else {
// delay map // delay map
with(&mut || self.map(pt, addr, attr)); self.map(pt, addr, attr);
} }
} }
@ -69,16 +66,9 @@ impl<F: Read, T: FrameAllocator> MemoryHandler for File<F, T> {
let frame = self.allocator.alloc().expect("failed to alloc frame"); let frame = self.allocator.alloc().expect("failed to alloc frame");
entry.set_target(frame); entry.set_target(frame);
entry.set_present(true); entry.set_present(true);
let writable = entry.writable();
entry.set_writable(true);
entry.update(); entry.update();
self.fill_data(pt, addr); self.fill_data(pt, addr);
let entry = pt.get_entry(addr).expect("failed to get entry");
entry.set_writable(writable);
entry.update();
true true
} }
} }

@ -23,11 +23,11 @@ impl MemoryHandler for Linear {
fn clone_map( fn clone_map(
&self, &self,
pt: &mut PageTable, pt: &mut PageTable,
with: &Fn(&mut FnMut()), _src_pt: &mut PageTable,
addr: VirtAddr, addr: VirtAddr,
attr: &MemoryAttr, attr: &MemoryAttr,
) { ) {
with(&mut || self.map(pt, addr, attr)); self.map(pt, addr, attr);
} }
fn handle_page_fault(&self, _pt: &mut PageTable, _addr: VirtAddr) -> bool { fn handle_page_fault(&self, _pt: &mut PageTable, _addr: VirtAddr) -> bool {

@ -5,20 +5,17 @@ pub trait MemoryHandler: Debug + Send + Sync + 'static {
fn box_clone(&self) -> Box<MemoryHandler>; fn box_clone(&self) -> Box<MemoryHandler>;
/// Map `addr` in the page table /// Map `addr` in the page table
/// Should set page flags here instead of in page_fault_handler /// Should set page flags here instead of in `page_fault_handler`
fn map(&self, pt: &mut PageTable, addr: VirtAddr, attr: &MemoryAttr); fn map(&self, pt: &mut PageTable, addr: VirtAddr, attr: &MemoryAttr);
/// Unmap `addr` in the page table /// Unmap `addr` in the page table
fn unmap(&self, pt: &mut PageTable, addr: VirtAddr); fn unmap(&self, pt: &mut PageTable, addr: VirtAddr);
/// Clone map `addr` from one page table to another. /// Clone map `addr` from page table `src_pt` to `pt`.
/// `pt` is the current active page table.
/// `with` is the `InactivePageTable::with` function.
/// Call `with` then use `pt` as target page table inside.
fn clone_map( fn clone_map(
&self, &self,
pt: &mut PageTable, pt: &mut PageTable,
with: &Fn(&mut FnMut()), src_pt: &mut PageTable,
addr: VirtAddr, addr: VirtAddr,
attr: &MemoryAttr, attr: &MemoryAttr,
); );

@ -1,5 +1,4 @@
//! memory set, area //! Memory management structures
//! and the inactive page table
use alloc::{boxed::Box, string::String, vec::Vec}; use alloc::{boxed::Box, string::String, vec::Vec};
use core::fmt::{Debug, Error, Formatter}; use core::fmt::{Debug, Error, Formatter};
@ -13,8 +12,7 @@ use self::handler::MemoryHandler;
pub mod handler; pub mod handler;
/// a continuous memory space when the same attribute /// A continuous memory space when the same attribute
/// like `vma_struct` in ucore
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct MemoryArea { pub struct MemoryArea {
start_addr: VirtAddr, start_addr: VirtAddr,
@ -25,31 +23,7 @@ pub struct MemoryArea {
} }
impl MemoryArea { impl MemoryArea {
/* /// Test whether a virtual address is in the memory area
** @brief get slice of the content in the memory area
** @retval &[u8] the slice of the content in the memory area
*/
pub unsafe fn as_slice(&self) -> &[u8] {
::core::slice::from_raw_parts(
self.start_addr as *const u8,
self.end_addr - self.start_addr,
)
}
/*
** @brief get mutable slice of the content in the memory area
** @retval &mut[u8] the mutable slice of the content in the memory area
*/
pub unsafe fn as_slice_mut(&self) -> &mut [u8] {
::core::slice::from_raw_parts_mut(
self.start_addr as *mut u8,
self.end_addr - self.start_addr,
)
}
/*
** @brief test whether a virtual address is in the memory area
** @param addr: VirtAddr the virtual address to test
** @retval bool whether the virtual address is in the memory area
*/
pub fn contains(&self, addr: VirtAddr) -> bool { pub fn contains(&self, addr: VirtAddr) -> bool {
addr >= self.start_addr && addr < self.end_addr addr >= self.start_addr && addr < self.end_addr
} }
@ -75,20 +49,6 @@ impl MemoryArea {
self.check_read_array(ptr, count) self.check_read_array(ptr, count)
} }
} }
/// Check the null-end C string is within the readable memory, and is valid.
/// If so, clone it to a String.
///
/// Unsafe: the page table must be active.
pub unsafe fn check_and_clone_cstr(&self, ptr: *const u8) -> Option<String> {
if ptr as usize >= self.end_addr {
return None;
}
let max_len = self.end_addr - ptr as usize;
(0..max_len)
.find(|&i| ptr.offset(i as isize).read() == 0)
.and_then(|len| core::str::from_utf8(core::slice::from_raw_parts(ptr, len)).ok())
.map(|s| String::from(s))
}
/// Test whether this area is (page) overlap with area [`start_addr`, `end_addr`] /// Test whether this area is (page) overlap with area [`start_addr`, `end_addr`]
pub fn is_overlap_with(&self, start_addr: VirtAddr, end_addr: VirtAddr) -> bool { pub fn is_overlap_with(&self, start_addr: VirtAddr, end_addr: VirtAddr) -> bool {
let p0 = Page::of_addr(self.start_addr); let p0 = Page::of_addr(self.start_addr);
@ -121,42 +81,22 @@ pub struct MemoryAttr {
} }
impl MemoryAttr { impl MemoryAttr {
/*
** @brief set the memory attribute's user bit
** @retval MemoryAttr the memory attribute itself
*/
pub fn user(mut self) -> Self { pub fn user(mut self) -> Self {
self.user = true; self.user = true;
self self
} }
/*
** @brief set the memory attribute's readonly bit
** @retval MemoryAttr the memory attribute itself
*/
pub fn readonly(mut self) -> Self { pub fn readonly(mut self) -> Self {
self.readonly = true; self.readonly = true;
self self
} }
/*
** @brief unset the memory attribute's readonly bit
** @retval MemoryAttr the memory attribute itself
*/
pub fn writable(mut self) -> Self { pub fn writable(mut self) -> Self {
self.readonly = false; self.readonly = false;
self self
} }
/*
** @brief set the memory attribute's execute bit
** @retval MemoryAttr the memory attribute itself
*/
pub fn execute(mut self) -> Self { pub fn execute(mut self) -> Self {
self.execute = true; self.execute = true;
self self
} }
/*
** @brief set the MMIO type
** @retval MemoryAttr the memory attribute itself
*/
pub fn mmio(mut self, value: u8) -> Self { pub fn mmio(mut self, value: u8) -> Self {
self.mmio = value; self.mmio = value;
self self
@ -172,26 +112,24 @@ impl MemoryAttr {
} }
} }
/// set of memory space with multiple memory area with associated page table and stack space /// A set of memory space with multiple memory areas with associated page table
/// like `mm_struct` in ucore
/// NOTE: Don't remove align(64), or you will fail to run MIPS. /// NOTE: Don't remove align(64), or you will fail to run MIPS.
#[repr(align(64))] /// Temporary solution for rv64
pub struct MemorySet<T: InactivePageTable> { #[cfg_attr(not(target_arch = "riscv64"), repr(align(64)))]
pub struct MemorySet<T: PageTableExt> {
areas: Vec<MemoryArea>, areas: Vec<MemoryArea>,
page_table: T, page_table: T,
} }
impl<T: InactivePageTable> MemorySet<T> { impl<T: PageTableExt> MemorySet<T> {
/* /// Create a new `MemorySet`
** @brief create a memory set
** @retval MemorySet<T> the memory set created
*/
pub fn new() -> Self { pub fn new() -> Self {
MemorySet { MemorySet {
areas: Vec::new(), areas: Vec::new(),
page_table: T::new(), page_table: T::new(),
} }
} }
/// Create a new `MemorySet` for kernel remap
pub fn new_bare() -> Self { pub fn new_bare() -> Self {
MemorySet { MemorySet {
areas: Vec::new(), areas: Vec::new(),
@ -236,35 +174,6 @@ impl<T: InactivePageTable> MemorySet<T> {
} }
Err(VMError::InvalidPtr) Err(VMError::InvalidPtr)
} }
/// Check the null-end C string pointer array
/// Used for getting argv & envp
pub unsafe fn check_and_clone_cstr_array(
&self,
mut argv: *const *const u8,
) -> VMResult<Vec<String>> {
let mut args = Vec::new();
loop {
let cstr = *self.check_read_ptr(argv)?;
if cstr.is_null() {
break;
}
let arg = self.check_and_clone_cstr(cstr)?;
args.push(arg);
argv = argv.add(1);
}
Ok(args)
}
/// Check the null-end C string is within the readable memory, and is valid.
/// If so, clone it to a String.
///
/// Unsafe: the page table must be active.
pub unsafe fn check_and_clone_cstr(&self, ptr: *const u8) -> VMResult<String> {
self.areas
.iter()
.filter_map(|area| area.check_and_clone_cstr(ptr))
.next()
.ok_or(VMError::InvalidPtr)
}
/// Find a free area with hint address `addr_hint` and length `len`. /// Find a free area with hint address `addr_hint` and length `len`.
/// Return the start address of found free area. /// Return the start address of found free area.
/// Used for mmap. /// Used for mmap.
@ -284,11 +193,7 @@ impl<T: InactivePageTable> MemorySet<T> {
.find(|area| area.is_overlap_with(start_addr, end_addr)) .find(|area| area.is_overlap_with(start_addr, end_addr))
.is_none() .is_none()
} }
/* /// Add an area to this set
** @brief add the memory area to the memory set
** @param area: MemoryArea the memory area to add
** @retval none
*/
pub fn push( pub fn push(
&mut self, &mut self,
start_addr: VirtAddr, start_addr: VirtAddr,
@ -309,7 +214,7 @@ impl<T: InactivePageTable> MemorySet<T> {
handler: Box::new(handler), handler: Box::new(handler),
name, name,
}; };
self.page_table.edit(|pt| area.map(pt)); area.map(&mut self.page_table);
// keep order by start address // keep order by start address
let idx = self let idx = self
.areas .areas
@ -321,28 +226,21 @@ impl<T: InactivePageTable> MemorySet<T> {
self.areas.insert(idx, area); self.areas.insert(idx, area);
} }
/* /// Remove the area `[start_addr, end_addr)` from `MemorySet`
** @brief remove the memory area from the memory set
** @param area: MemoryArea the memory area to remove
** @retval none
*/
pub fn pop(&mut self, start_addr: VirtAddr, end_addr: VirtAddr) { pub fn pop(&mut self, start_addr: VirtAddr, end_addr: VirtAddr) {
assert!(start_addr <= end_addr, "invalid memory area"); assert!(start_addr <= end_addr, "invalid memory area");
for i in 0..self.areas.len() { for i in 0..self.areas.len() {
if self.areas[i].start_addr == start_addr && self.areas[i].end_addr == end_addr { if self.areas[i].start_addr == start_addr && self.areas[i].end_addr == end_addr {
let area = self.areas.remove(i); let area = self.areas.remove(i);
self.page_table.edit(|pt| area.unmap(pt)); area.unmap(&mut self.page_table);
return; return;
} }
} }
panic!("no memory area found"); panic!("no memory area found");
} }
/* /// Remove the area `[start_addr, end_addr)` from `MemorySet`
** @brief remove the memory area from the memory set and split existed ones when necessary /// and split existed ones when necessary.
** @param area: MemoryArea the memory area to remove
** @retval none
*/
pub fn pop_with_split(&mut self, start_addr: VirtAddr, end_addr: VirtAddr) { pub fn pop_with_split(&mut self, start_addr: VirtAddr, end_addr: VirtAddr) {
assert!(start_addr <= end_addr, "invalid memory area"); assert!(start_addr <= end_addr, "invalid memory area");
let mut i = 0; let mut i = 0;
@ -351,7 +249,7 @@ impl<T: InactivePageTable> MemorySet<T> {
if self.areas[i].start_addr >= start_addr && self.areas[i].end_addr <= end_addr { if self.areas[i].start_addr >= start_addr && self.areas[i].end_addr <= end_addr {
// subset // subset
let area = self.areas.remove(i); let area = self.areas.remove(i);
self.page_table.edit(|pt| area.unmap(pt)); area.unmap(&mut self.page_table);
i -= 1; i -= 1;
} else if self.areas[i].start_addr >= start_addr } else if self.areas[i].start_addr >= start_addr
&& self.areas[i].start_addr < end_addr && self.areas[i].start_addr < end_addr
@ -365,7 +263,7 @@ impl<T: InactivePageTable> MemorySet<T> {
handler: area.handler.box_clone(), handler: area.handler.box_clone(),
name: area.name, name: area.name,
}; };
self.page_table.edit(|pt| dead_area.unmap(pt)); dead_area.unmap(&mut self.page_table);
let new_area = MemoryArea { let new_area = MemoryArea {
start_addr: end_addr, start_addr: end_addr,
end_addr: area.end_addr, end_addr: area.end_addr,
@ -379,13 +277,13 @@ impl<T: InactivePageTable> MemorySet<T> {
// postfix // postfix
let area = self.areas.remove(i); let area = self.areas.remove(i);
let dead_area = MemoryArea { let dead_area = MemoryArea {
start_addr: start_addr, start_addr,
end_addr: area.end_addr, end_addr: area.end_addr,
attr: area.attr, attr: area.attr,
handler: area.handler.box_clone(), handler: area.handler.box_clone(),
name: area.name, name: area.name,
}; };
self.page_table.edit(|pt| dead_area.unmap(pt)); dead_area.unmap(&mut self.page_table);
let new_area = MemoryArea { let new_area = MemoryArea {
start_addr: area.start_addr, start_addr: area.start_addr,
end_addr: start_addr, end_addr: start_addr,
@ -398,13 +296,13 @@ impl<T: InactivePageTable> MemorySet<T> {
// superset // superset
let area = self.areas.remove(i); let area = self.areas.remove(i);
let dead_area = MemoryArea { let dead_area = MemoryArea {
start_addr: start_addr, start_addr,
end_addr: end_addr, end_addr,
attr: area.attr, attr: area.attr,
handler: area.handler.box_clone(), handler: area.handler.box_clone(),
name: area.name, name: area.name,
}; };
self.page_table.edit(|pt| dead_area.unmap(pt)); dead_area.unmap(&mut self.page_table);
let new_area_left = MemoryArea { let new_area_left = MemoryArea {
start_addr: area.start_addr, start_addr: area.start_addr,
end_addr: start_addr, end_addr: start_addr,
@ -428,74 +326,50 @@ impl<T: InactivePageTable> MemorySet<T> {
} }
} }
/* /// Get iterator of areas
** @brief get iterator of the memory area
** @retval impl Iterator<Item=&MemoryArea>
** the memory area iterator
*/
pub fn iter(&self) -> impl Iterator<Item = &MemoryArea> { pub fn iter(&self) -> impl Iterator<Item = &MemoryArea> {
self.areas.iter() self.areas.iter()
} }
pub fn edit(&mut self, f: impl FnOnce(&mut T::Active)) {
self.page_table.edit(f); /// Execute function `f` with the associated page table
}
/*
** @brief execute function with the associated page table
** @param f: impl FnOnce() the function to be executed
** @retval none
*/
pub unsafe fn with(&self, f: impl FnOnce()) { pub unsafe fn with(&self, f: impl FnOnce()) {
self.page_table.with(f); self.page_table.with(f);
} }
/* /// Activate the associated page table
** @brief activate the associated page table
** @retval none
*/
pub unsafe fn activate(&self) { pub unsafe fn activate(&self) {
self.page_table.activate(); self.page_table.activate();
} }
/*
** @brief get the token of the associated page table /// Get the token of the associated page table
** @retval usize the token of the inactive page table
*/
pub fn token(&self) -> usize { pub fn token(&self) -> usize {
self.page_table.token() self.page_table.token()
} }
/*
** @brief clear the memory set /// Clear and unmap all areas
** @retval none
*/
pub fn clear(&mut self) { pub fn clear(&mut self) {
let Self { let Self {
ref mut page_table, ref mut page_table,
ref mut areas, ref mut areas,
.. ..
} = self; } = self;
page_table.edit(|pt| { for area in areas.iter() {
for area in areas.iter() { area.unmap(page_table);
area.unmap(pt); }
}
});
areas.clear(); areas.clear();
} }
/// Get physical address of the page of given virtual `addr` /// Get physical address of the page of given virtual `addr`
pub fn translate(&mut self, addr: VirtAddr) -> Option<PhysAddr> { pub fn translate(&mut self, addr: VirtAddr) -> Option<PhysAddr> {
self.page_table.edit(|pt| { self.page_table.get_entry(addr).and_then(|entry| {
pt.get_entry(addr).and_then(|entry| { if entry.user() {
if entry.user() { Some(entry.target())
Some(entry.target()) } else {
} else { None
None }
}
})
}) })
} }
/* /// Get the reference of inner page table
** @brief get the mutable reference for the inactive page table
** @retval: &mut T the mutable reference of the inactive page table
*/
pub fn get_page_table_mut(&mut self) -> &mut T { pub fn get_page_table_mut(&mut self) -> &mut T {
&mut self.page_table &mut self.page_table
} }
@ -503,32 +377,28 @@ impl<T: InactivePageTable> MemorySet<T> {
pub fn handle_page_fault(&mut self, addr: VirtAddr) -> bool { pub fn handle_page_fault(&mut self, addr: VirtAddr) -> bool {
let area = self.areas.iter().find(|area| area.contains(addr)); let area = self.areas.iter().find(|area| area.contains(addr));
match area { match area {
Some(area) => self Some(area) => area.handler.handle_page_fault(&mut self.page_table, addr),
.page_table
.edit(|pt| area.handler.handle_page_fault(pt, addr)),
None => false, None => false,
} }
} }
pub fn clone(&mut self) -> Self { pub fn clone(&mut self) -> Self {
let new_page_table = T::new(); let mut new_page_table = T::new();
let Self { let Self {
ref mut page_table, ref mut page_table,
ref areas, ref areas,
.. ..
} = self; } = self;
page_table.edit(|pt| { for area in areas.iter() {
for area in areas.iter() { for page in Page::range_of(area.start_addr, area.end_addr) {
for page in Page::range_of(area.start_addr, area.end_addr) { area.handler.clone_map(
area.handler.clone_map( &mut new_page_table,
pt, page_table,
&|f| unsafe { new_page_table.with(f) }, page.start_address(),
page.start_address(), &area.attr,
&area.attr, );
);
}
} }
}); }
MemorySet { MemorySet {
areas: areas.clone(), areas: areas.clone(),
page_table: new_page_table, page_table: new_page_table,
@ -536,13 +406,13 @@ impl<T: InactivePageTable> MemorySet<T> {
} }
} }
impl<T: InactivePageTable> Drop for MemorySet<T> { impl<T: PageTableExt> Drop for MemorySet<T> {
fn drop(&mut self) { fn drop(&mut self) {
self.clear(); self.clear();
} }
} }
impl<T: InactivePageTable> Debug for MemorySet<T> { impl<T: PageTableExt> Debug for MemorySet<T> {
fn fmt(&self, f: &mut Formatter) -> Result<(), Error> { fn fmt(&self, f: &mut Formatter) -> Result<(), Error> {
f.debug_list().entries(self.areas.iter()).finish() f.debug_list().entries(self.areas.iter()).finish()
} }

@ -1,23 +0,0 @@
//! Helper functions
use super::*;
pub trait PageTableExt: PageTable {
// Take some special care here.
// TEMP_PAGE_ADDR mapping might be overwritten in the `f` below.
// So this should be really high in kernel space when necessary.
const TEMP_PAGE_ADDR: VirtAddr = 0xcafeb000;
fn with_temporary_map<T, D>(
&mut self,
target: PhysAddr,
f: impl FnOnce(&mut Self, &mut D) -> T,
) -> T {
self.map(Self::TEMP_PAGE_ADDR, target);
let data =
unsafe { &mut *(self.get_page_slice_mut(Self::TEMP_PAGE_ADDR).as_ptr() as *mut D) };
let ret = f(self, data);
self.unmap(Self::TEMP_PAGE_ADDR);
ret
}
}

@ -2,12 +2,10 @@
//! //!
//! Implemented for every architecture, used by OS. //! Implemented for every architecture, used by OS.
pub use self::ext::*;
#[cfg(test)] #[cfg(test)]
pub use self::mock_page_table::MockPageTable; pub use self::mock_page_table::MockPageTable;
use super::*; use super::*;
mod ext;
#[cfg(test)] #[cfg(test)]
mod mock_page_table; mod mock_page_table;
@ -26,31 +24,18 @@ pub trait PageTable {
fn get_entry(&mut self, addr: VirtAddr) -> Option<&mut Entry>; fn get_entry(&mut self, addr: VirtAddr) -> Option<&mut Entry>;
/// Get a mutable reference of the content of a page of virtual address `addr` /// Get a mutable reference of the content of a page of virtual address `addr`
/// Used for testing with mock fn get_page_slice_mut<'a>(&mut self, addr: VirtAddr) -> &'a mut [u8];
fn get_page_slice_mut<'a>(&mut self, addr: VirtAddr) -> &'a mut [u8] {
unsafe { core::slice::from_raw_parts_mut((addr & !(PAGE_SIZE - 1)) as *mut u8, PAGE_SIZE) }
}
/// Read data from virtual address `addr` /// Read data from virtual address `addr`
/// Used for testing with mock /// Used for testing with mock
fn read(&mut self, addr: VirtAddr) -> u8 { fn read(&mut self, _addr: VirtAddr) -> u8 {
unsafe { (addr as *const u8).read() } unimplemented!()
} }
/// Write data to virtual address `addr` /// Write data to virtual address `addr`
/// Used for testing with mock /// Used for testing with mock
fn write(&mut self, addr: VirtAddr, data: u8) { fn write(&mut self, _addr: VirtAddr, _data: u8) {
unsafe { (addr as *mut u8).write(data) } unimplemented!()
}
/// When `vaddr` is not mapped, map it to `paddr`.
fn map_if_not_exists(&mut self, vaddr: VirtAddr, paddr: usize) -> bool {
if let Some(entry) = self.get_entry(vaddr) {
if entry.present() {
return false;
}
}
self.map(vaddr, paddr);
true
} }
} }
@ -99,13 +84,8 @@ pub trait Entry {
fn set_mmio(&mut self, value: u8); fn set_mmio(&mut self, value: u8);
} }
/// An inactive page table /// Extra methods of `PageTable` for non-trait-object usage
/// Note: InactivePageTable is not a PageTable pub trait PageTableExt: PageTable + Sized {
/// but it can be activated and "become" a PageTable
pub trait InactivePageTable: Sized {
/// the active version of page table
type Active: PageTable;
/// Create a new page table with kernel memory mapped /// Create a new page table with kernel memory mapped
fn new() -> Self { fn new() -> Self {
let mut pt = Self::new_bare(); let mut pt = Self::new_bare();
@ -125,10 +105,6 @@ pub trait InactivePageTable: Sized {
fn active_token() -> usize; fn active_token() -> usize;
fn flush_tlb(); fn flush_tlb();
/// Make this page table editable
/// Set the recursive entry of current active page table to this
fn edit<T>(&mut self, f: impl FnOnce(&mut Self::Active) -> T) -> T;
/// Activate this page table /// Activate this page table
unsafe fn activate(&self) { unsafe fn activate(&self) {
let old_token = Self::active_token(); let old_token = Self::active_token();

37
kernel/Cargo.lock generated

@ -2,11 +2,12 @@
# It is not intended for manual editing. # It is not intended for manual editing.
[[package]] [[package]]
name = "aarch64" name = "aarch64"
version = "2.5.0" version = "2.6.1"
source = "git+https://github.com/rcore-os/aarch64#797c24f07f9d90542eb094530b6f63fe3ea7dded" source = "git+https://github.com/rcore-os/aarch64#65d1453f11f3cc113247352dffa02d8dcdd34769"
dependencies = [ dependencies = [
"bit_field 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "bit_field 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
"bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", "bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
"cast 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"register 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "register 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"usize_conversions 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "usize_conversions 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
"ux 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", "ux 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
@ -52,7 +53,7 @@ name = "bcm2837"
version = "1.0.0" version = "1.0.0"
source = "git+https://github.com/rcore-os/bcm2837#b29a8db5504b7eaa6f8adf2c3ff916d1ffd15194" source = "git+https://github.com/rcore-os/bcm2837#b29a8db5504b7eaa6f8adf2c3ff916d1ffd15194"
dependencies = [ dependencies = [
"aarch64 2.5.0 (git+https://github.com/rcore-os/aarch64)", "aarch64 2.6.1 (git+https://github.com/rcore-os/aarch64)",
"volatile 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", "volatile 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
@ -87,7 +88,7 @@ source = "git+https://github.com/myrrlyn/bitvec.git#8ab20a3e33fe068fc3a4a05eda12
[[package]] [[package]]
name = "bootloader" name = "bootloader"
version = "0.4.0" version = "0.4.0"
source = "git+https://github.com/rcore-os/bootloader?branch=vga#4ba8680bda6f355b80f11b685ee95a92599924e4" source = "git+https://github.com/rcore-os/bootloader?branch=linear#cc33d7d2d2d33f5adcbd0f596964ba99127b51af"
dependencies = [ dependencies = [
"apic 0.1.0 (git+https://github.com/rcore-os/apic-rs)", "apic 0.1.0 (git+https://github.com/rcore-os/apic-rs)",
"fixedvec 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", "fixedvec 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
@ -370,14 +371,14 @@ dependencies = [
name = "rcore" name = "rcore"
version = "0.2.0" version = "0.2.0"
dependencies = [ dependencies = [
"aarch64 2.5.0 (git+https://github.com/rcore-os/aarch64)", "aarch64 2.6.1 (git+https://github.com/rcore-os/aarch64)",
"apic 0.1.0 (git+https://github.com/rcore-os/apic-rs)", "apic 0.1.0 (git+https://github.com/rcore-os/apic-rs)",
"bcm2837 1.0.0 (git+https://github.com/rcore-os/bcm2837)", "bcm2837 1.0.0 (git+https://github.com/rcore-os/bcm2837)",
"bit_field 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "bit_field 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
"bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", "bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
"bitmap-allocator 0.1.0 (git+https://github.com/rcore-os/bitmap-allocator)", "bitmap-allocator 0.1.0 (git+https://github.com/rcore-os/bitmap-allocator)",
"bitvec 0.11.0 (git+https://github.com/myrrlyn/bitvec.git)", "bitvec 0.11.0 (git+https://github.com/myrrlyn/bitvec.git)",
"bootloader 0.4.0 (git+https://github.com/rcore-os/bootloader?branch=vga)", "bootloader 0.4.0 (git+https://github.com/rcore-os/bootloader?branch=linear)",
"buddy_system_allocator 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "buddy_system_allocator 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
"cc 1.0.36 (registry+https://github.com/rust-lang/crates.io-index)", "cc 1.0.36 (registry+https://github.com/rust-lang/crates.io-index)",
"console-traits 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "console-traits 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
@ -400,7 +401,7 @@ dependencies = [
"spin 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "spin 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
"uart_16550 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "uart_16550 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
"volatile 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", "volatile 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
"x86_64 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)", "x86_64 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
"xmas-elf 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "xmas-elf 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
@ -654,6 +655,19 @@ dependencies = [
"ux 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", "ux 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
[[package]]
name = "x86_64"
version = "0.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"array-init 0.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
"bit_field 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
"bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
"cast 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"raw-cpuid 6.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"ux 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]] [[package]]
name = "xmas-elf" name = "xmas-elf"
version = "0.6.2" version = "0.6.2"
@ -668,7 +682,7 @@ version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
[metadata] [metadata]
"checksum aarch64 2.5.0 (git+https://github.com/rcore-os/aarch64)" = "<none>" "checksum aarch64 2.6.1 (git+https://github.com/rcore-os/aarch64)" = "<none>"
"checksum apic 0.1.0 (git+https://github.com/rcore-os/apic-rs)" = "<none>" "checksum apic 0.1.0 (git+https://github.com/rcore-os/apic-rs)" = "<none>"
"checksum array-init 0.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "23589ecb866b460d3a0f1278834750268c607e8e28a1b982c907219f3178cd72" "checksum array-init 0.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "23589ecb866b460d3a0f1278834750268c607e8e28a1b982c907219f3178cd72"
"checksum as-slice 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "293dac66b274fab06f95e7efb05ec439a6b70136081ea522d270bc351ae5bb27" "checksum as-slice 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "293dac66b274fab06f95e7efb05ec439a6b70136081ea522d270bc351ae5bb27"
@ -679,11 +693,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
"checksum bitmap-allocator 0.1.0 (git+https://github.com/rcore-os/bitmap-allocator)" = "<none>" "checksum bitmap-allocator 0.1.0 (git+https://github.com/rcore-os/bitmap-allocator)" = "<none>"
"checksum bitvec 0.11.0 (git+https://github.com/myrrlyn/bitvec.git)" = "<none>" "checksum bitvec 0.11.0 (git+https://github.com/myrrlyn/bitvec.git)" = "<none>"
"checksum bitvec 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "cfadef5c4e2c2e64067b9ecc061179f12ac7ec65ba613b1f60f3972bbada1f5b" "checksum bitvec 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "cfadef5c4e2c2e64067b9ecc061179f12ac7ec65ba613b1f60f3972bbada1f5b"
"checksum bootloader 0.4.0 (git+https://github.com/rcore-os/bootloader?branch=vga)" = "<none>" "checksum bootloader 0.4.0 (git+https://github.com/rcore-os/bootloader?branch=linear)" = "<none>"
"checksum buddy_system_allocator 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "59da15ef556589ee78370281d75b67f2d69ed26465ec0e0f3961e2021502426f" "checksum buddy_system_allocator 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "59da15ef556589ee78370281d75b67f2d69ed26465ec0e0f3961e2021502426f"
"checksum byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a019b10a2a7cdeb292db131fc8113e57ea2a908f6e7894b0c3c671893b65dbeb" "checksum byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a019b10a2a7cdeb292db131fc8113e57ea2a908f6e7894b0c3c671893b65dbeb"
"checksum cast 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "926013f2860c46252efceabb19f4a6b308197505082c609025aa6706c011d427" "checksum cast 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "926013f2860c46252efceabb19f4a6b308197505082c609025aa6706c011d427"
"checksum cc 1.0.36 (registry+https://github.com/rust-lang/crates.io-index)" = "a0c56216487bb80eec9c4516337b2588a4f2a2290d72a1416d930e4dcdb0c90d" "checksum cc 1.0.31 (registry+https://github.com/rust-lang/crates.io-index)" = "c9ce8bb087aacff865633f0bd5aeaed910fe2fe55b55f4739527f2e023a2e53d"
"checksum cfg-if 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "11d43355396e872eefb45ce6342e4374ed7bc2b3a502d1b28e36d6e23c05d1f4" "checksum cfg-if 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "11d43355396e872eefb45ce6342e4374ed7bc2b3a502d1b28e36d6e23c05d1f4"
"checksum console-traits 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f711b3d1d5c3f7ae7d6428901c0f3e5d5f5c800fcfac86bf0252e96373a2cec6" "checksum console-traits 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f711b3d1d5c3f7ae7d6428901c0f3e5d5f5c800fcfac86bf0252e96373a2cec6"
"checksum deque 0.3.2 (git+https://github.com/rcore-os/deque.git?branch=no_std)" = "<none>" "checksum deque 0.3.2 (git+https://github.com/rcore-os/deque.git?branch=no_std)" = "<none>"
@ -748,6 +762,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
"checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" "checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
"checksum x86 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)" = "841e1ca5a87068718a2a26f2473c6f93cf3b8119f9778fa0ae4b39b664d9e66a" "checksum x86 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)" = "841e1ca5a87068718a2a26f2473c6f93cf3b8119f9778fa0ae4b39b664d9e66a"
"checksum x86_64 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "f9258d7e2dd25008d69e8c9e9ee37865887a5e1e3d06a62f1cb3f6c209e6f177" "checksum x86_64 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "f9258d7e2dd25008d69e8c9e9ee37865887a5e1e3d06a62f1cb3f6c209e6f177"
"checksum x86_64 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)" = "bb8f09c32a991cc758ebcb9b7984f530095d32578a4e7b85db6ee1f0bbe4c9c6" "checksum x86_64 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)" = "1d0a8201f52d2c7b373c7243dcdfb27c0dd5012f221ef6a126f507ee82005204"
"checksum x86_64 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d69bf2d256c74df90fcc68aaf99862dd205310609e9d56247a5c82ead2f28a93"
"checksum xmas-elf 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "22678df5df766e8d1e5d609da69f0c3132d794edf6ab5e75e7abcd2270d4cf58" "checksum xmas-elf 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "22678df5df766e8d1e5d609da69f0c3132d794edf6ab5e75e7abcd2270d4cf58"
"checksum zero 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "5f1bc8a6b2005884962297587045002d8cfb8dcec9db332f4ca216ddc5de82c5" "checksum zero 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "5f1bc8a6b2005884962297587045002d8cfb8dcec9db332f4ca216ddc5de82c5"

@ -19,12 +19,10 @@ authors = [
] ]
[features] [features]
default = ["sv39"] default = []
# Page table sv39 or sv48 (for riscv64) board_u540 = ["link_user"]
sv39 = [] board_k210 = ["link_user"]
board_u540 = ["sv39", "link_user"] board_rocket_chip = ["link_user"]
board_k210 = ["sv39", "link_user"]
board_rocket_chip = ["sv39", "link_user"]
# (for aarch64 RaspberryPi3) # (for aarch64 RaspberryPi3)
nographic = [] nographic = []
board_raspi3 = ["bcm2837", "link_user"] board_raspi3 = ["bcm2837", "link_user"]
@ -70,9 +68,9 @@ rcore-fs = { git = "https://github.com/rcore-os/rcore-fs" }
rcore-fs-sfs = { git = "https://github.com/rcore-os/rcore-fs" } rcore-fs-sfs = { git = "https://github.com/rcore-os/rcore-fs" }
[target.'cfg(target_arch = "x86_64")'.dependencies] [target.'cfg(target_arch = "x86_64")'.dependencies]
bootloader = { git = "https://github.com/rcore-os/bootloader", branch="vga", features=["vga_320x200"] } bootloader = { git = "https://github.com/rcore-os/bootloader", branch = "linear" }
apic = { git = "https://github.com/rcore-os/apic-rs" } apic = { git = "https://github.com/rcore-os/apic-rs" }
x86_64 = "0.5" x86_64 = "0.6"
raw-cpuid = "6.0" raw-cpuid = "6.0"
uart_16550 = "0.2" uart_16550 = "0.2"
pc-keyboard = "0.5" pc-keyboard = "0.5"
@ -81,7 +79,7 @@ pc-keyboard = "0.5"
riscv = { git = "https://github.com/rcore-os/riscv", features = ["inline-asm"] } riscv = { git = "https://github.com/rcore-os/riscv", features = ["inline-asm"] }
[target.'cfg(target_arch = "aarch64")'.dependencies] [target.'cfg(target_arch = "aarch64")'.dependencies]
aarch64 = { git = "https://github.com/rcore-os/aarch64", version = "2.5.0" } aarch64 = { git = "https://github.com/rcore-os/aarch64", version = "2.6.1" }
bcm2837 = { git = "https://github.com/rcore-os/bcm2837", version = "1.0.0", optional = true } bcm2837 = { git = "https://github.com/rcore-os/bcm2837", version = "1.0.0", optional = true }
[target.'cfg(target_arch = "mips")'.dependencies] [target.'cfg(target_arch = "mips")'.dependencies]

@ -1,6 +1,5 @@
//! Raspberry PI 3 Model B/B+ //! Raspberry PI 3 Model B/B+
use alloc::string::String;
use bcm2837::atags::Atags; use bcm2837::atags::Atags;
#[path = "../../../../drivers/gpu/fb.rs"] #[path = "../../../../drivers/gpu/fb.rs"]
@ -10,7 +9,7 @@ pub mod mailbox;
pub mod serial; pub mod serial;
pub mod timer; pub mod timer;
use fb::{ColorConfig, FramebufferInfo, FramebufferResult}; use fb::{ColorConfig, FramebufferResult};
pub const IO_REMAP_BASE: usize = bcm2837::consts::IO_BASE; pub const IO_REMAP_BASE: usize = bcm2837::consts::IO_BASE;
pub const IO_REMAP_END: usize = bcm2837::consts::KERNEL_OFFSET + 0x4000_1000; pub const IO_REMAP_END: usize = bcm2837::consts::KERNEL_OFFSET + 0x4000_1000;
@ -67,9 +66,8 @@ pub fn probe_fb_info(width: u32, height: u32, depth: u32) -> FramebufferResult {
))?; ))?;
} }
use crate::arch::memory;
let paddr = info.bus_addr & !0xC0000000; let paddr = info.bus_addr & !0xC0000000;
let vaddr = memory::ioremap(paddr as usize, info.screen_size as usize, "fb"); let vaddr = crate::memory::phys_to_virt(paddr as usize);
if vaddr == 0 { if vaddr == 0 {
Err(format!( Err(format!(
"cannot remap memory range [{:#x?}..{:#x?}]", "cannot remap memory range [{:#x?}..{:#x?}]",

@ -1,7 +1,7 @@
pub const RECURSIVE_INDEX: usize = 0o777; pub const MEMORY_OFFSET: usize = 0;
pub const KERNEL_OFFSET: usize = 0xFFFF_0000_0000_0000; pub const KERNEL_OFFSET: usize = 0xFFFF_0000_0000_0000;
pub const KERNEL_PML4: usize = 0; pub const PHYSICAL_MEMORY_OFFSET: usize = KERNEL_OFFSET - MEMORY_OFFSET;
pub const KERNEL_HEAP_SIZE: usize = 8 * 1024 * 1024; pub const KERNEL_HEAP_SIZE: usize = 8 * 1024 * 1024;
pub const MEMORY_OFFSET: usize = 0;
pub const USER_STACK_OFFSET: usize = 0x0000_8000_0000_0000 - USER_STACK_SIZE; pub const USER_STACK_OFFSET: usize = 0x0000_8000_0000_0000 - USER_STACK_SIZE;
pub const USER_STACK_SIZE: usize = 1 * 1024 * 1024; pub const USER_STACK_SIZE: usize = 1 * 1024 * 1024;

@ -1,12 +1,13 @@
use aarch64::{asm, regs::*};
pub fn halt() { pub fn halt() {
unsafe { asm!("wfi" :::: "volatile") } asm::wfi();
} }
pub fn id() -> usize { pub fn id() -> usize {
// TODO: cpu id (MPIDR_EL1.get() & 3) as usize
0
} }
pub unsafe fn exit_in_qemu(error_code: u8) -> ! { pub unsafe fn exit_in_qemu(_error_code: u8) -> ! {
unimplemented!() unimplemented!()
} }

@ -1,9 +1,7 @@
//! Memory initialization for aarch64. //! Memory initialization for aarch64.
use super::paging::MMIOType; use crate::consts::MEMORY_OFFSET;
use crate::consts::{KERNEL_OFFSET, MEMORY_OFFSET}; use crate::memory::{init_heap, virt_to_phys, FRAME_ALLOCATOR};
use crate::memory::{init_heap, Linear, MemoryAttr, MemorySet, FRAME_ALLOCATOR};
use aarch64::regs::*;
use log::*; use log::*;
use rcore_memory::PAGE_SIZE; use rcore_memory::PAGE_SIZE;
@ -11,7 +9,6 @@ use rcore_memory::PAGE_SIZE;
pub fn init() { pub fn init() {
init_frame_allocator(); init_frame_allocator();
init_heap(); init_heap();
remap_the_kernel();
info!("memory: init end"); info!("memory: init end");
} }
@ -22,7 +19,7 @@ fn init_frame_allocator() {
let end = super::board::probe_memory() let end = super::board::probe_memory()
.expect("failed to find memory map") .expect("failed to find memory map")
.1; .1;
let start = (_end as u64 + PAGE_SIZE as u64).wrapping_sub(KERNEL_OFFSET as u64) as usize; let start = virt_to_phys(_end as usize + PAGE_SIZE);
let mut ba = FRAME_ALLOCATOR.lock(); let mut ba = FRAME_ALLOCATOR.lock();
ba.insert(to_range(start, end)); ba.insert(to_range(start, end));
info!("FrameAllocator init end"); info!("FrameAllocator init end");
@ -35,79 +32,7 @@ fn init_frame_allocator() {
} }
} }
static mut KERNEL_MEMORY_SET: Option<MemorySet> = None; #[allow(dead_code)]
/// remap kernel page table after all initialization.
fn remap_the_kernel() {
let offset = -(KERNEL_OFFSET as isize);
let mut ms = MemorySet::new_bare();
ms.push(
stext as usize,
etext as usize,
MemoryAttr::default().execute().readonly(),
Linear::new(offset),
"text",
);
ms.push(
sdata as usize,
edata as usize,
MemoryAttr::default(),
Linear::new(offset),
"data",
);
ms.push(
srodata as usize,
erodata as usize,
MemoryAttr::default().readonly(),
Linear::new(offset),
"rodata",
);
ms.push(
sbss as usize,
ebss as usize,
MemoryAttr::default(),
Linear::new(offset),
"bss",
);
ms.push(
bootstack as usize,
bootstacktop as usize,
MemoryAttr::default(),
Linear::new(offset),
"kstack",
);
use super::board::{IO_REMAP_BASE, IO_REMAP_END};
ms.push(
IO_REMAP_BASE,
IO_REMAP_END,
MemoryAttr::default().mmio(MMIOType::Device as u8),
Linear::new(offset),
"io_remap",
);
info!("{:#x?}", ms);
unsafe { ms.get_page_table_mut().activate_as_kernel() }
unsafe { KERNEL_MEMORY_SET = Some(ms) }
info!("kernel remap end");
}
pub fn ioremap(paddr: usize, len: usize, name: &'static str) -> usize {
let offset = -(KERNEL_OFFSET as isize);
let vaddr = paddr.wrapping_add(KERNEL_OFFSET);
if let Some(ms) = unsafe { KERNEL_MEMORY_SET.as_mut() } {
ms.push(
vaddr,
vaddr + len,
MemoryAttr::default().mmio(MMIOType::NormalNonCacheable as u8),
Linear::new(offset),
name,
);
return vaddr;
}
0
}
extern "C" { extern "C" {
fn stext(); fn stext();
fn etext(); fn etext();

@ -1,58 +1,79 @@
//! Page table implementations for aarch64. //! Page table implementations for aarch64.
use crate::memory::{alloc_frame, dealloc_frame, phys_to_virt};
use aarch64::asm::{tlb_invalidate, tlb_invalidate_all, ttbr_el1_read, ttbr_el1_write}; use aarch64::asm::{tlb_invalidate, tlb_invalidate_all, ttbr_el1_read, ttbr_el1_write};
use aarch64::paging::memory_attribute::*;
use aarch64::paging::{FrameAllocator, FrameDeallocator, Page, PhysFrame as Frame, Size4KiB};
use aarch64::paging::{ use aarch64::paging::{
Mapper, PageTable as Aarch64PageTable, PageTableEntry, PageTableFlags as EF, RecursivePageTable, frame::PhysFrame as Frame,
mapper::{MappedPageTable, Mapper},
memory_attribute::*,
page_table::{PageTable as Aarch64PageTable, PageTableEntry, PageTableFlags as EF},
FrameAllocator, FrameDeallocator, Page as PageAllSizes, Size4KiB,
}; };
use aarch64::{PhysAddr, VirtAddr}; use aarch64::{PhysAddr, VirtAddr};
use log::*; use log::*;
use rcore_memory::paging::*; use rcore_memory::paging::*;
// Depends on kernel
use crate::consts::{KERNEL_OFFSET, KERNEL_PML4, RECURSIVE_INDEX};
use crate::memory::{active_table, alloc_frame, dealloc_frame};
pub struct ActivePageTable(RecursivePageTable); type Page = PageAllSizes<Size4KiB>;
pub struct PageTableImpl {
page_table: MappedPageTable<'static, fn(Frame) -> *mut Aarch64PageTable>,
root_frame: Frame,
entry: PageEntry,
}
pub struct PageEntry(PageTableEntry); pub struct PageEntry(&'static mut PageTableEntry, Page);
impl PageTable for ActivePageTable { impl PageTable for PageTableImpl {
fn map(&mut self, addr: usize, target: usize) -> &mut Entry { fn map(&mut self, addr: usize, target: usize) -> &mut Entry {
let flags = EF::default(); let flags = EF::default();
let attr = MairNormal::attr_value(); let attr = MairNormal::attr_value();
self.0 unsafe {
.map_to( self.page_table
Page::of_addr(addr as u64), .map_to(
Frame::of_addr(target as u64), Page::of_addr(addr as u64),
flags, Frame::of_addr(target as u64),
attr, flags,
&mut FrameAllocatorForAarch64, attr,
) &mut FrameAllocatorForAarch64,
.unwrap() )
.flush(); .unwrap()
.flush();
}
self.get_entry(addr).expect("fail to get entry") self.get_entry(addr).expect("fail to get entry")
} }
fn unmap(&mut self, addr: usize) { fn unmap(&mut self, addr: usize) {
self.0.unmap(Page::of_addr(addr as u64)).unwrap().1.flush(); self.page_table
.unmap(Page::of_addr(addr as u64))
.unwrap()
.1
.flush();
} }
fn get_entry(&mut self, vaddr: usize) -> Option<&mut Entry> { fn get_entry(&mut self, vaddr: usize) -> Option<&mut Entry> {
// get p1 entry let page = Page::of_addr(vaddr as u64);
let entry_addr = if let Ok(e) = self.page_table.get_entry_mut(page) {
((vaddr >> 9) & 0o777_777_777_7770) | (RECURSIVE_INDEX << 39) | (vaddr & KERNEL_OFFSET); let e = unsafe { &mut *(e as *mut PageTableEntry) };
Some(unsafe { &mut *(entry_addr as *mut PageEntry) }) self.entry = PageEntry(e, page);
Some(&mut self.entry as &mut Entry)
} else {
None
}
} }
}
impl PageTableExt for ActivePageTable { fn get_page_slice_mut<'a>(&mut self, addr: usize) -> &'a mut [u8] {
const TEMP_PAGE_ADDR: usize = KERNEL_OFFSET | 0xcafeb000; let frame = self
.page_table
.translate_page(Page::of_addr(addr as u64))
.unwrap();
let vaddr = phys_to_virt(frame.start_address().as_u64() as usize);
unsafe { core::slice::from_raw_parts_mut(vaddr as *mut u8, 0x1000) }
}
} }
impl ActivePageTable { fn frame_to_page_table(frame: Frame) -> *mut Aarch64PageTable {
pub unsafe fn new() -> Self { let vaddr = phys_to_virt(frame.start_address().as_u64() as usize);
ActivePageTable(RecursivePageTable::new(RECURSIVE_INDEX as u16)) vaddr as *mut Aarch64PageTable
}
} }
#[repr(u8)] #[repr(u8)]
@ -65,8 +86,7 @@ pub enum MMIOType {
impl Entry for PageEntry { impl Entry for PageEntry {
fn update(&mut self) { fn update(&mut self) {
let addr = VirtAddr::new_unchecked((self as *const _ as u64) << 9); tlb_invalidate(self.1.start_address());
tlb_invalidate(addr);
} }
fn present(&self) -> bool { fn present(&self) -> bool {
@ -100,7 +120,8 @@ impl Entry for PageEntry {
self.0.addr().as_u64() as usize self.0.addr().as_u64() as usize
} }
fn set_target(&mut self, target: usize) { fn set_target(&mut self, target: usize) {
self.0.modify_addr(PhysAddr::new(target as u64)); self.0
.set_addr(PhysAddr::new(target as u64), self.0.flags(), self.0.attr());
} }
fn writable_shared(&self) -> bool { fn writable_shared(&self) -> bool {
self.0.flags().contains(EF::WRITABLE_SHARED) self.0.flags().contains(EF::WRITABLE_SHARED)
@ -163,7 +184,7 @@ impl Entry for PageEntry {
2 => MairNormalNonCacheable::attr_value(), 2 => MairNormalNonCacheable::attr_value(),
_ => return, _ => return,
}; };
self.0.modify_attr(attr); self.0.set_attr(attr);
} }
} }
@ -178,40 +199,45 @@ impl PageEntry {
self.0.flags().contains(EF::DIRTY) self.0.flags().contains(EF::DIRTY)
} }
fn as_flags(&mut self) -> &mut EF { fn as_flags(&mut self) -> &mut EF {
unsafe { &mut *(self as *mut _ as *mut EF) } unsafe { &mut *(self.0 as *mut _ as *mut EF) }
} }
} }
#[derive(Debug)] impl PageTableImpl {
pub struct InactivePageTable0 { /// Unsafely get the current active page table.
p4_frame: Frame, /// WARN: You MUST call `core::mem::forget` for it after use!
pub unsafe fn active() -> Self {
let frame = Frame::of_addr(PageTableImpl::active_token() as u64);
let table = &mut *frame_to_page_table(frame);
PageTableImpl {
page_table: MappedPageTable::new(table, frame_to_page_table),
root_frame: frame,
entry: core::mem::uninitialized(),
}
}
} }
impl InactivePageTable for InactivePageTable0 { impl PageTableExt for PageTableImpl {
type Active = ActivePageTable;
fn new_bare() -> Self { fn new_bare() -> Self {
let target = alloc_frame().expect("failed to allocate frame"); let target = alloc_frame().expect("failed to allocate frame");
let frame = Frame::of_addr(target as u64); let frame = Frame::of_addr(target as u64);
active_table().with_temporary_map(target, |_, table: &mut Aarch64PageTable| { let table = unsafe { &mut *frame_to_page_table(frame) };
table.zero(); table.zero();
// set up recursive mapping for the table unsafe {
table[RECURSIVE_INDEX].set_frame( PageTableImpl {
frame.clone(), page_table: MappedPageTable::new(table, frame_to_page_table),
EF::default(), root_frame: frame,
MairNormal::attr_value(), entry: core::mem::uninitialized(),
); }
}); }
InactivePageTable0 { p4_frame: frame }
} }
fn map_kernel(&mut self) { fn map_kernel(&mut self) {
// When the new InactivePageTable is created for the user MemorySet, it's use ttbr0 as the // kernel page table is based on TTBR1_EL1 and will nerver change.
// TTBR. And the kernel TTBR ttbr1 will never changed, so we needn't call map_kernel()
} }
fn token(&self) -> usize { fn token(&self) -> usize {
self.p4_frame.start_address().as_u64() as usize // as TTBRx_EL1 self.root_frame.start_address().as_u64() as usize // as TTBR0_EL1
} }
unsafe fn set_token(token: usize) { unsafe fn set_token(token: usize) {
@ -225,73 +251,25 @@ impl InactivePageTable for InactivePageTable0 {
fn flush_tlb() { fn flush_tlb() {
tlb_invalidate_all(); tlb_invalidate_all();
} }
fn edit<T>(&mut self, f: impl FnOnce(&mut Self::Active) -> T) -> T {
let target = ttbr_el1_read(1);
if self.p4_frame == target {
return f(&mut active_table());
}
let target = target.start_address().as_u64() as usize;
active_table().with_temporary_map(
target,
|active_table, p4_table: &mut Aarch64PageTable| {
let backup = p4_table[RECURSIVE_INDEX].clone();
let old_frame = ttbr_el1_read(0);
// overwrite recursive mapping
p4_table[RECURSIVE_INDEX].set_frame(
self.p4_frame.clone(),
EF::default(),
MairNormal::attr_value(),
);
ttbr_el1_write(0, self.p4_frame.clone());
tlb_invalidate_all();
// execute f in the new context
let ret = f(active_table);
// restore recursive mapping to original p4 table
p4_table[RECURSIVE_INDEX] = backup;
ttbr_el1_write(0, old_frame);
tlb_invalidate_all();
ret
},
)
}
}
impl InactivePageTable0 {
/// Activate as kernel page table (TTBR0).
/// Used in `arch::memory::remap_the_kernel()`.
pub unsafe fn activate_as_kernel(&self) {
let old_frame = ttbr_el1_read(1);
let new_frame = self.p4_frame.clone();
debug!("switch TTBR1 {:?} -> {:?}", old_frame, new_frame);
if old_frame != new_frame {
ttbr_el1_write(0, Frame::of_addr(0));
ttbr_el1_write(1, new_frame);
tlb_invalidate_all();
}
}
} }
impl Drop for InactivePageTable0 { impl Drop for PageTableImpl {
fn drop(&mut self) { fn drop(&mut self) {
info!("PageTable dropping: {:?}", self); info!("PageTable dropping: {:?}", self.root_frame);
dealloc_frame(self.p4_frame.start_address().as_u64() as usize); dealloc_frame(self.root_frame.start_address().as_u64() as usize);
} }
} }
struct FrameAllocatorForAarch64; struct FrameAllocatorForAarch64;
impl FrameAllocator<Size4KiB> for FrameAllocatorForAarch64 { unsafe impl FrameAllocator<Size4KiB> for FrameAllocatorForAarch64 {
fn alloc(&mut self) -> Option<Frame> { fn allocate_frame(&mut self) -> Option<Frame> {
alloc_frame().map(|addr| Frame::of_addr(addr as u64)) alloc_frame().map(|addr| Frame::of_addr(addr as u64))
} }
} }
impl FrameDeallocator<Size4KiB> for FrameAllocatorForAarch64 { impl FrameDeallocator<Size4KiB> for FrameAllocatorForAarch64 {
fn dealloc(&mut self, frame: Frame) { fn deallocate_frame(&mut self, frame: Frame) {
dealloc_frame(frame.start_address().as_u64() as usize); dealloc_frame(frame.start_address().as_u64() as usize);
} }
} }

@ -1,3 +1,3 @@
/// board specific constants /// board specific constants
pub const MEMORY_END: usize = 0x8080_0000; pub const MEMORY_END: usize = 0x8800_0000;
pub const KERNEL_HEAP_SIZE: usize = 0x0044_0000; pub const KERNEL_HEAP_SIZE: usize = 0x0044_0000;

@ -17,6 +17,9 @@ SECTIONS
*(.text.entry) *(.text.entry)
. = ALIGN(4K); . = ALIGN(4K);
*(.text.ebase) *(.text.ebase)
_copy_user_start = .;
*(.text.copy_user)
_copy_user_end = .;
*(.text .text.*) *(.text .text.*)
. = ALIGN(4K); . = ALIGN(4K);
etext = .; etext = .;

@ -2,12 +2,11 @@
/// ///
pub use super::board::consts::*; pub use super::board::consts::*;
pub const MEMORY_OFFSET: usize = 0x80000000;
pub const KERNEL_OFFSET: usize = 0x80100000; pub const KERNEL_OFFSET: usize = 0x80100000;
pub const PHYSICAL_MEMORY_OFFSET: usize = 0x80000000;
pub const MEMORY_OFFSET: usize = 0x8000_0000;
pub const USER_STACK_OFFSET: usize = 0x70000000 - USER_STACK_SIZE; pub const USER_STACK_OFFSET: usize = 0x70000000 - USER_STACK_SIZE;
pub const USER_STACK_SIZE: usize = 0x10000; pub const USER_STACK_SIZE: usize = 0x10000;
pub const USER32_STACK_OFFSET: usize = 0x70000000 - USER_STACK_SIZE;
pub const MAX_DTB_SIZE: usize = 0x2000; pub const MAX_DTB_SIZE: usize = 0x2000;

@ -232,7 +232,7 @@ fn reserved_inst(tf: &mut TrapFrame) -> bool {
let tls = unsafe { *(_cur_tls as *const usize) }; let tls = unsafe { *(_cur_tls as *const usize) };
set_trapframe_register(rt, tls, tf); set_trapframe_register(rt, tls, tf);
info!("Read TLS by rdhdr {:x} to register {:?}", tls, rt); debug!("Read TLS by rdhdr {:x} to register {:?}", tls, rt);
return true; return true;
} else { } else {
return false; return false;
@ -266,6 +266,15 @@ fn page_fault(tf: &mut TrapFrame) {
if !tlb_valid { if !tlb_valid {
if !crate::memory::handle_page_fault(addr) { if !crate::memory::handle_page_fault(addr) {
extern "C" {
fn _copy_user_start();
fn _copy_user_end();
}
if tf.epc >= _copy_user_start as usize && tf.epc < _copy_user_end as usize {
debug!("fixup for addr {:x?}", addr);
tf.epc = crate::memory::read_user_fixup as usize;
return;
}
crate::trap::error(tf); crate::trap::error(tf);
} }
} }
@ -274,6 +283,15 @@ fn page_fault(tf: &mut TrapFrame) {
} }
Err(()) => { Err(()) => {
if !crate::memory::handle_page_fault(addr) { if !crate::memory::handle_page_fault(addr) {
extern "C" {
fn _copy_user_start();
fn _copy_user_end();
}
if tf.epc >= _copy_user_start as usize && tf.epc < _copy_user_end as usize {
debug!("fixup for addr {:x?}", addr);
tf.epc = crate::memory::read_user_fixup as usize;
return;
}
crate::trap::error(tf); crate::trap::error(tf);
} }
} }

@ -1,29 +1,32 @@
// Depends on kernel // Depends on kernel
use crate::memory::{active_table, alloc_frame, dealloc_frame}; use crate::memory::{alloc_frame, dealloc_frame};
use log::*;
use mips::addr::*; use mips::addr::*;
use mips::paging::{FrameAllocator, FrameDeallocator};
use mips::paging::{ use mips::paging::{
Mapper, PageTable as MIPSPageTable, PageTableEntry, PageTableFlags as EF, TwoLevelPageTable, FrameAllocator, FrameDeallocator, Mapper, PageTable as MIPSPageTable, PageTableEntry,
PageTableFlags as EF, TwoLevelPageTable,
}; };
use mips::tlb::*; use mips::tlb::TLBEntry;
use rcore_memory::paging::*; use rcore_memory::paging::*;
pub struct ActivePageTable(usize, PageEntry); pub struct PageTableImpl {
page_table: TwoLevelPageTable<'static>,
root_frame: Frame,
entry: PageEntry,
}
/// PageTableEntry: the contents of this entry. /// PageTableEntry: the contents of this entry.
/// Page: this entry is the pte of page `Page`. /// Page: this entry is the pte of page `Page`.
pub struct PageEntry(&'static mut PageTableEntry, Page); pub struct PageEntry(&'static mut PageTableEntry, Page);
impl PageTable for ActivePageTable { impl PageTable for PageTableImpl {
fn map(&mut self, addr: usize, target: usize) -> &mut Entry { fn map(&mut self, addr: usize, target: usize) -> &mut Entry {
// map the 4K `page` to the 4K `frame` with `flags` // map the 4K `page` to the 4K `frame` with `flags`
let flags = EF::VALID | EF::WRITABLE | EF::CACHEABLE; let flags = EF::VALID | EF::WRITABLE | EF::CACHEABLE;
let page = Page::of_addr(VirtAddr::new(addr)); let page = Page::of_addr(VirtAddr::new(addr));
let frame = Frame::of_addr(PhysAddr::new(target)); let frame = Frame::of_addr(PhysAddr::new(target));
// we may need frame allocator to alloc frame for new page table(first/second) // we may need frame allocator to alloc frame for new page table(first/second)
self.get_table() self.page_table
.map_to(page, frame, flags, &mut FrameAllocatorForRiscv) .map_to(page, frame, flags, &mut FrameAllocatorForMips)
.unwrap() .unwrap()
.flush(); .flush();
self.get_entry(addr).expect("fail to get entry") self.get_entry(addr).expect("fail to get entry")
@ -31,20 +34,29 @@ impl PageTable for ActivePageTable {
fn unmap(&mut self, addr: usize) { fn unmap(&mut self, addr: usize) {
let page = Page::of_addr(VirtAddr::new(addr)); let page = Page::of_addr(VirtAddr::new(addr));
let (_, flush) = self.get_table().unmap(page).unwrap(); let (_, flush) = self.page_table.unmap(page).unwrap();
flush.flush(); flush.flush();
} }
fn get_entry(&mut self, vaddr: usize) -> Option<&mut Entry> { fn get_entry(&mut self, vaddr: usize) -> Option<&mut Entry> {
let page = Page::of_addr(VirtAddr::new(vaddr)); let page = Page::of_addr(VirtAddr::new(vaddr));
if let Ok(e) = self.get_table().ref_entry(page.clone()) { if let Ok(e) = self.page_table.ref_entry(page.clone()) {
let e = unsafe { &mut *(e as *mut PageTableEntry) }; let e = unsafe { &mut *(e as *mut PageTableEntry) };
self.1 = PageEntry(e, page); self.entry = PageEntry(e, page);
Some(&mut self.1 as &mut Entry) Some(&mut self.entry as &mut Entry)
} else { } else {
None None
} }
} }
fn get_page_slice_mut<'a>(&mut self, addr: usize) -> &'a mut [u8] {
let frame = self
.page_table
.translate_page(Page::of_addr(VirtAddr::new(addr)))
.unwrap();
let vaddr = frame.to_kernel_unmapped().as_usize();
unsafe { core::slice::from_raw_parts_mut(vaddr as *mut u8, 0x1000) }
}
} }
extern "C" { extern "C" {
@ -54,7 +66,7 @@ extern "C" {
pub fn set_root_page_table_ptr(ptr: usize) { pub fn set_root_page_table_ptr(ptr: usize) {
unsafe { unsafe {
clear_all_tlb(); TLBEntry::clear_all();
*(_root_page_table_ptr as *mut usize) = ptr; *(_root_page_table_ptr as *mut usize) = ptr;
} }
} }
@ -67,35 +79,11 @@ pub fn root_page_table_buffer() -> &'static mut MIPSPageTable {
unsafe { &mut *(_root_page_table_ptr as *mut MIPSPageTable) } unsafe { &mut *(_root_page_table_ptr as *mut MIPSPageTable) }
} }
impl PageTableExt for ActivePageTable {}
static mut __page_table_with_mode: bool = false;
/// The virtual address of root page table
impl ActivePageTable {
pub unsafe fn new() -> Self {
ActivePageTable(get_root_page_table_ptr(), ::core::mem::uninitialized())
}
unsafe fn get_raw_table(&mut self) -> *mut MIPSPageTable {
if __page_table_with_mode {
get_root_page_table_ptr() as *mut MIPSPageTable
} else {
self.0 as *mut MIPSPageTable
}
}
fn get_table(&mut self) -> TwoLevelPageTable<'static> {
unsafe { TwoLevelPageTable::new(&mut *self.get_raw_table()) }
}
}
/// implementation for the Entry trait in /crate/memory/src/paging/mod.rs /// implementation for the Entry trait in /crate/memory/src/paging/mod.rs
impl Entry for PageEntry { impl Entry for PageEntry {
fn update(&mut self) { fn update(&mut self) {
unsafe { unsafe {
clear_all_tlb(); TLBEntry::clear_all();
} }
} }
fn accessed(&self) -> bool { fn accessed(&self) -> bool {
@ -158,22 +146,33 @@ impl Entry for PageEntry {
fn set_mmio(&mut self, _value: u8) {} fn set_mmio(&mut self, _value: u8) {}
} }
#[derive(Debug)] impl PageTableImpl {
pub struct InactivePageTable0 { /// Unsafely get the current active page table.
root_frame: Frame, /// WARN: You MUST call `core::mem::forget` for it after use!
pub unsafe fn active() -> Self {
let frame = Frame::of_addr(PhysAddr::new(get_root_page_table_ptr() & 0x7fffffff));
let table = root_page_table_buffer();
PageTableImpl {
page_table: TwoLevelPageTable::new(table),
root_frame: frame,
entry: unsafe { core::mem::uninitialized() },
}
}
} }
impl InactivePageTable for InactivePageTable0 { impl PageTableExt for PageTableImpl {
type Active = ActivePageTable;
fn new_bare() -> Self { fn new_bare() -> Self {
let target = alloc_frame().expect("failed to allocate frame"); let target = alloc_frame().expect("failed to allocate frame");
let frame = Frame::of_addr(PhysAddr::new(target)); let frame = Frame::of_addr(PhysAddr::new(target));
let table = unsafe { &mut *(target as *mut MIPSPageTable) }; let table = unsafe { &mut *(target as *mut MIPSPageTable) };
table.zero(); table.zero();
InactivePageTable0 { root_frame: frame }
PageTableImpl {
page_table: TwoLevelPageTable::new(table),
root_frame: frame,
entry: unsafe { core::mem::uninitialized() },
}
} }
fn map_kernel(&mut self) { fn map_kernel(&mut self) {
@ -194,75 +193,26 @@ impl InactivePageTable for InactivePageTable0 {
fn flush_tlb() { fn flush_tlb() {
unsafe { unsafe {
clear_all_tlb(); TLBEntry::clear_all();
}
}
fn edit<T>(&mut self, f: impl FnOnce(&mut Self::Active) -> T) -> T {
unsafe {
clear_all_tlb();
}
debug!(
"edit table {:x?} -> {:x?}",
Self::active_token(),
self.token()
);
let mut active = unsafe { ActivePageTable(self.token(), ::core::mem::uninitialized()) };
let ret = f(&mut active);
debug!("finish table");
unsafe {
clear_all_tlb();
}
ret
}
unsafe fn with<T>(&self, f: impl FnOnce() -> T) -> T {
let old_token = Self::active_token();
let new_token = self.token();
let old_mode = unsafe { __page_table_with_mode };
unsafe {
__page_table_with_mode = true;
}
debug!("switch table {:x?} -> {:x?}", old_token, new_token);
if old_token != new_token {
Self::set_token(new_token);
Self::flush_tlb();
} }
let ret = f();
debug!("switch table {:x?} -> {:x?}", new_token, old_token);
if old_token != new_token {
Self::set_token(old_token);
Self::flush_tlb();
}
unsafe {
__page_table_with_mode = old_mode;
}
ret
} }
} }
impl Drop for InactivePageTable0 { impl Drop for PageTableImpl {
fn drop(&mut self) { fn drop(&mut self) {
dealloc_frame(self.root_frame.start_address().as_usize()); dealloc_frame(self.root_frame.start_address().as_usize());
} }
} }
struct FrameAllocatorForRiscv; struct FrameAllocatorForMips;
impl FrameAllocator for FrameAllocatorForRiscv { impl FrameAllocator for FrameAllocatorForMips {
fn alloc(&mut self) -> Option<Frame> { fn alloc(&mut self) -> Option<Frame> {
alloc_frame().map(|addr| Frame::of_addr(PhysAddr::new(addr))) alloc_frame().map(|addr| Frame::of_addr(PhysAddr::new(addr)))
} }
} }
impl FrameDeallocator for FrameAllocatorForRiscv { impl FrameDeallocator for FrameAllocatorForMips {
fn dealloc(&mut self, frame: Frame) { fn dealloc(&mut self, frame: Frame) {
dealloc_frame(frame.start_address().as_usize()); dealloc_frame(frame.start_address().as_usize());
} }

@ -17,6 +17,9 @@ SECTIONS
.text : { .text : {
stext = .; stext = .;
*(.text.entry) *(.text.entry)
_copy_user_start = .;
*(.text.copy_user)
_copy_user_end = .;
*(.text .text.*) *(.text .text.*)
. = ALIGN(4K); . = ALIGN(4K);
etext = .; etext = .;

@ -100,6 +100,13 @@
compatible = "xlnx,xps-intc-1.00.a"; compatible = "xlnx,xps-intc-1.00.a";
reg = <0x61200000 0x1000>; reg = <0x61200000 0x1000>;
interrupt-parent = <&L10>; interrupt-parent = <&L10>;
interrupts = <0>;
};
router: router@64A00000 {
compatible = "rcore,router";
reg = <0x64A00000 0x1000>;
interrupt-parent = <&L10>;
interrupts = <1>; interrupts = <1>;
}; };
}; };

@ -17,6 +17,9 @@ SECTIONS
.text : { .text : {
stext = .; stext = .;
*(.text.entry) *(.text.entry)
_copy_user_start = .;
*(.text.copy_user)
_copy_user_end = .;
*(.text .text.*) *(.text .text.*)
. = ALIGN(4K); . = ALIGN(4K);
etext = .; etext = .;

@ -1,19 +1,20 @@
use super::consts::KERNEL_OFFSET; use crate::memory::phys_to_virt;
/// Mask all external interrupt except serial. /// Mask all external interrupt except serial.
pub unsafe fn init_external_interrupt() { pub unsafe fn init_external_interrupt() {
const HART0_S_MODE_INTERRUPT_ENABLES: *mut u64 = (KERNEL_OFFSET + 0x0C00_2080) as *mut u64; const HART0_S_MODE_INTERRUPT_ENABLES: *mut u64 = phys_to_virt(0x0C00_2080) as *mut u64;
// enable all external interrupts
HART0_S_MODE_INTERRUPT_ENABLES.write_volatile(0xf); HART0_S_MODE_INTERRUPT_ENABLES.write_volatile(0xf);
// mask interrupts first // mask interrupts first
const AXI_INTC_IER: *mut u32 = (KERNEL_OFFSET + 0x1900_0008) as *mut u32; const AXI_INTC_IER: *mut u32 = phys_to_virt(0x6120_0008) as *mut u32;
AXI_INTC_IER.write_volatile(0x0); AXI_INTC_IER.write_volatile(0x0);
// acknowledge all interrupts // acknowledge all interrupts
const AXI_INTC_IAR: *mut u32 = (KERNEL_OFFSET + 0x1900_000C) as *mut u32; const AXI_INTC_IAR: *mut u32 = phys_to_virt(0x6120_000C) as *mut u32;
AXI_INTC_IAR.write_volatile(0xffffffff); AXI_INTC_IAR.write_volatile(0xffffffff);
const AXI_INTC_MER: *mut u32 = (KERNEL_OFFSET + 0x1900_001C) as *mut u32; const AXI_INTC_MER: *mut u32 = phys_to_virt(0x6120_001C) as *mut u32;
// Hardware Interrupt enable | Enable irq output // Hardware Interrupt enable | Enable irq output
AXI_INTC_MER.write_volatile(0b11); AXI_INTC_MER.write_volatile(0b11);
@ -24,20 +25,19 @@ pub unsafe fn init_external_interrupt() {
/// Claim and complete external interrupt by reading and writing to /// Claim and complete external interrupt by reading and writing to
/// PLIC Interrupt Claim/Complete Register. /// PLIC Interrupt Claim/Complete Register.
pub unsafe fn handle_external_interrupt() { pub unsafe fn handle_external_interrupt() {
const HART0_S_MODE_INTERRUPT_CLAIM_COMPLETE: *mut u32 = const HART0_S_MODE_INTERRUPT_CLAIM_COMPLETE: *mut u32 = phys_to_virt(0x0C20_1004) as *mut u32;
(KERNEL_OFFSET + 0x0C20_1004) as *mut u32;
// claim // claim
let source = HART0_S_MODE_INTERRUPT_CLAIM_COMPLETE.read_volatile(); let source = HART0_S_MODE_INTERRUPT_CLAIM_COMPLETE.read_volatile();
// complete // complete
HART0_S_MODE_INTERRUPT_CLAIM_COMPLETE.write_volatile(source); HART0_S_MODE_INTERRUPT_CLAIM_COMPLETE.write_volatile(source);
// acknowledge all interrupts // acknowledge all interrupts
const AXI_INTC_IAR: *mut u32 = (KERNEL_OFFSET + 0x1900_000C) as *mut u32; const AXI_INTC_IAR: *mut u32 = phys_to_virt(0x6120_000C) as *mut u32;
AXI_INTC_IAR.write_volatile(0xffffffff); AXI_INTC_IAR.write_volatile(0xffffffff);
} }
pub unsafe fn enable_serial_interrupt() { pub unsafe fn enable_serial_interrupt() {
const SERIAL_BASE: *mut u32 = (KERNEL_OFFSET + 0x18000000) as *mut u32; const SERIAL_BASE: *mut u32 = phys_to_virt(0x60000000) as *mut u32;
const UART_CTRL_REG: usize = 3; const UART_CTRL_REG: usize = 3;
// Intr enable | rx reset | tx reset // Intr enable | rx reset | tx reset
const UART_IE: u32 = 0x13; const UART_IE: u32 = 0x13;

@ -17,6 +17,9 @@ SECTIONS
.text : { .text : {
stext = .; stext = .;
*(.text.entry) *(.text.entry)
_copy_user_start = .;
*(.text.copy_user)
_copy_user_end = .;
*(.text .text.*) *(.text .text.*)
. = ALIGN(4K); . = ALIGN(4K);
etext = .; etext = .;

@ -1,8 +1,9 @@
use super::consts::KERNEL_OFFSET; use super::consts::KERNEL_OFFSET;
use crate::memory::phys_to_virt;
/// Mask all external interrupt except serial. /// Mask all external interrupt except serial.
pub unsafe fn init_external_interrupt() { pub unsafe fn init_external_interrupt() {
const HART1_S_MODE_INTERRUPT_ENABLES: *mut u64 = (KERNEL_OFFSET + 0x0C00_2100) as *mut u64; const HART1_S_MODE_INTERRUPT_ENABLES: *mut u64 = phys_to_virt(0x0C00_2100) as *mut u64;
const SERIAL: u64 = 4; const SERIAL: u64 = 4;
HART1_S_MODE_INTERRUPT_ENABLES.write_volatile(1 << SERIAL); HART1_S_MODE_INTERRUPT_ENABLES.write_volatile(1 << SERIAL);
} }
@ -10,8 +11,7 @@ pub unsafe fn init_external_interrupt() {
/// Claim and complete external interrupt by reading and writing to /// Claim and complete external interrupt by reading and writing to
/// PLIC Interrupt Claim/Complete Register. /// PLIC Interrupt Claim/Complete Register.
pub unsafe fn handle_external_interrupt() { pub unsafe fn handle_external_interrupt() {
const HART1_S_MODE_INTERRUPT_CLAIM_COMPLETE: *mut u32 = const HART1_S_MODE_INTERRUPT_CLAIM_COMPLETE: *mut u32 = phys_to_virt(0x0C20_2004) as *mut u32;
(KERNEL_OFFSET + 0x0C20_2004) as *mut u32;
// claim // claim
let source = HART1_S_MODE_INTERRUPT_CLAIM_COMPLETE.read_volatile(); let source = HART1_S_MODE_INTERRUPT_CLAIM_COMPLETE.read_volatile();
// complete // complete
@ -19,7 +19,7 @@ pub unsafe fn handle_external_interrupt() {
} }
pub unsafe fn enable_serial_interrupt() { pub unsafe fn enable_serial_interrupt() {
const SERIAL_BASE: *mut u8 = (KERNEL_OFFSET + 0x10010000) as *mut u8; const SERIAL_BASE: *mut u8 = phys_to_virt(0x10010000) as *mut u8;
const UART_REG_IE: usize = 4; const UART_REG_IE: usize = 4;
const UART_RXWM: u8 = 0x2; const UART_RXWM: u8 = 0x2;
SERIAL_BASE.add(UART_REG_IE).write_volatile(UART_RXWM); SERIAL_BASE.add(UART_REG_IE).write_volatile(UART_RXWM);

@ -1,4 +1,4 @@
use super::consts::KERNEL_OFFSET; use crate::memory::phys_to_virt;
/// Mask all external interrupt except serial. /// Mask all external interrupt except serial.
pub unsafe fn init_external_interrupt() { pub unsafe fn init_external_interrupt() {
@ -6,13 +6,13 @@ pub unsafe fn init_external_interrupt() {
// riscv-pk (bbl) enables all S-Mode IRQs (ref: machine/minit.c) // riscv-pk (bbl) enables all S-Mode IRQs (ref: machine/minit.c)
// OpenSBI v0.3 disables all IRQs (ref: platform/common/irqchip/plic.c) // OpenSBI v0.3 disables all IRQs (ref: platform/common/irqchip/plic.c)
const HART0_S_MODE_INTERRUPT_ENABLES: *mut u32 = (KERNEL_OFFSET + 0x0C00_2080) as *mut u32; const HART0_S_MODE_INTERRUPT_ENABLES: *mut u32 = phys_to_virt(0x0C00_2080) as *mut u32;
const SERIAL: u32 = 0xa; const SERIAL: u32 = 0xa;
HART0_S_MODE_INTERRUPT_ENABLES.write_volatile(1 << SERIAL); HART0_S_MODE_INTERRUPT_ENABLES.write_volatile(1 << SERIAL);
} }
pub unsafe fn enable_serial_interrupt() { pub unsafe fn enable_serial_interrupt() {
const UART16550: *mut u8 = (KERNEL_OFFSET + 0x10000000) as *mut u8; const UART16550: *mut u8 = phys_to_virt(0x10000000) as *mut u8;
UART16550.add(4).write_volatile(0x0B); UART16550.add(4).write_volatile(0x0B);
UART16550.add(1).write_volatile(0x01); UART16550.add(1).write_volatile(0x01);
} }

@ -12,11 +12,27 @@ _start:
lui sp, %hi(bootstack) lui sp, %hi(bootstack)
add sp, sp, t0 add sp, sp, t0
# 2. enable paging # 2. paging
# satp = (1 << 31) | PPN(boot_page_table_sv32) # satp = (1 << 31) | PPN(boot_page_table_sv32)
lui t0, %hi(boot_page_table_sv32) lui t0, %hi(boot_page_table_sv32)
li t1, 0xc0000000 - 0x80000000 li t1, 0xc0000000 - 0x80000000
sub t0, t0, t1 sub t0, t0, t1
# 2.1 linear mapping (0xc0000000 -> 0x80000000)
li t2, 768*4
li t4, 0x400 << 10
li t5, 4
add t1, t0, t2
li t6, 1024*4
add t6, t0, t6
li t3, (0x80000 << 10) | 0xcf # VRWXAD
loop:
sw t3, 0(t1)
add t3, t3, t4
add t1, t1, t5
bne t1, t6, loop
# 2.2 enable paging
srli t0, t0, 12 srli t0, t0, 12
li t1, 1 << 31 li t1, 1 << 31
or t0, t0, t1 or t0, t0, t1
@ -41,15 +57,11 @@ bootstacktop:
boot_page_table_sv32: boot_page_table_sv32:
# NOTE: assume kernel image < 16M # NOTE: assume kernel image < 16M
# 0x80000000 -> 0x80000000 (4M * 4) # 0x80000000 -> 0x80000000 (4M * 4)
# 0xc0000000 -> 0x80000000 (4M * 4) # 0xc0000000 -> 0x80000000 (mapped in code above)
.zero 4 * 512 .zero 4 * 512
.word (0x80000 << 10) | 0xcf # VRWXAD .word (0x80000 << 10) | 0xcf # VRWXAD
.word (0x80400 << 10) | 0xcf # VRWXAD .word (0x80400 << 10) | 0xcf # VRWXAD
.word (0x80800 << 10) | 0xcf # VRWXAD .word (0x80800 << 10) | 0xcf # VRWXAD
.word (0x80c00 << 10) | 0xcf # VRWXAD .word (0x80c00 << 10) | 0xcf # VRWXAD
.zero 4 * 252 .zero 4 * 252
.word (0x80000 << 10) | 0xcf # VRWXAD .zero 4 * 256
.word (0x80400 << 10) | 0xcf # VRWXAD
.word (0x80800 << 10) | 0xcf # VRWXAD
.word (0x80c00 << 10) | 0xcf # VRWXAD
.zero 4 * 252

@ -22,6 +22,9 @@ SECTIONS
.text : { .text : {
stext = .; stext = .;
*(.text.entry) *(.text.entry)
_copy_user_start = .;
*(.text.copy_user)
_copy_user_end = .;
*(.text .text.*) *(.text .text.*)
. = ALIGN(4K); . = ALIGN(4K);
etext = .; etext = .;

@ -17,6 +17,9 @@ SECTIONS
.text : { .text : {
stext = .; stext = .;
*(.text.entry) *(.text.entry)
_copy_user_start = .;
*(.text.copy_user)
_copy_user_end = .;
*(.text .text.*) *(.text .text.*)
. = ALIGN(4K); . = ALIGN(4K);
etext = .; etext = .;

@ -1,27 +1,14 @@
// Physical address available on THINPAD: // Linear mapping
// [0x80000000, 0x80800000]
#[cfg(target_arch = "riscv32")] #[cfg(target_arch = "riscv32")]
pub const RECURSIVE_INDEX: usize = 0x3fd; pub const PHYSICAL_MEMORY_OFFSET: usize = 0x4000_0000;
#[cfg(target_arch = "riscv64")] #[cfg(target_arch = "riscv64")]
pub const RECURSIVE_INDEX: usize = 0o774; pub const PHYSICAL_MEMORY_OFFSET: usize = 0xFFFF_FFFF_4000_0000;
// Under riscv64, upon booting, paging is enabled by bbl and
// root_table[0777] maps to p3_table,
// and p3_table[0777] maps to gigapage 8000_0000H,
// so 0xFFFF_FFFF_8000_0000 maps to 0x8000_0000
// root_table[0774] points to root_table itself as page table
// root_table[0775] points to root_table itself as leaf page
// root_table[0776] points to a temp page table as leaf page
#[cfg(target_arch = "riscv32")] #[cfg(target_arch = "riscv32")]
pub const KERNEL_OFFSET: usize = 0xC000_0000; pub const KERNEL_OFFSET: usize = 0xC000_0000;
#[cfg(target_arch = "riscv64")] #[cfg(target_arch = "riscv64")]
pub const KERNEL_OFFSET: usize = 0xFFFF_FFFF_C000_0000; pub const KERNEL_OFFSET: usize = 0xFFFF_FFFF_C000_0000;
#[cfg(target_arch = "riscv32")]
pub const KERNEL_P2_INDEX: usize = (KERNEL_OFFSET >> 12 >> 10) & 0x3ff;
#[cfg(target_arch = "riscv64")]
pub const KERNEL_P4_INDEX: usize = (KERNEL_OFFSET >> 12 >> 9 >> 9 >> 9) & 0o777;
#[cfg(feature = "board_k210")] #[cfg(feature = "board_k210")]
pub const KERNEL_HEAP_SIZE: usize = 0x0020_0000; pub const KERNEL_HEAP_SIZE: usize = 0x0020_0000;
#[cfg(not(feature = "board_k210"))] #[cfg(not(feature = "board_k210"))]
@ -35,7 +22,7 @@ pub const MEMORY_END: usize = 0x8060_0000;
pub const MEMORY_END: usize = 0x8800_0000; pub const MEMORY_END: usize = 0x8800_0000;
// FIXME: rv64 `sh` and `ls` will crash if stack top > 0x80000000 ??? // FIXME: rv64 `sh` and `ls` will crash if stack top > 0x80000000 ???
pub const USER_STACK_OFFSET: usize = 0x80000000 - USER_STACK_SIZE; pub const USER_STACK_OFFSET: usize = 0x40000000 - USER_STACK_SIZE;
pub const USER_STACK_SIZE: usize = 0x10000; pub const USER_STACK_SIZE: usize = 0x10000;
pub const MAX_DTB_SIZE: usize = 0x2000; pub const MAX_DTB_SIZE: usize = 0x2000;

@ -132,6 +132,15 @@ fn page_fault(tf: &mut TrapFrame) {
trace!("\nEXCEPTION: Page Fault @ {:#x}", addr); trace!("\nEXCEPTION: Page Fault @ {:#x}", addr);
if !crate::memory::handle_page_fault(addr) { if !crate::memory::handle_page_fault(addr) {
extern "C" {
fn _copy_user_start();
fn _copy_user_end();
}
if tf.sepc >= _copy_user_start as usize && tf.sepc < _copy_user_end as usize {
debug!("fixup for addr {:x?}", addr);
tf.sepc = crate::memory::read_user_fixup as usize;
return;
}
crate::trap::error(tf); crate::trap::error(tf);
} }
} }

@ -17,10 +17,6 @@ pub fn init(dtb: usize) {
// initialize heap and Frame allocator // initialize heap and Frame allocator
init_frame_allocator(); init_frame_allocator();
init_heap(); init_heap();
// remap the kernel use 4K page
unsafe {
super::paging::setup_recursive_mapping();
}
remap_the_kernel(dtb); remap_the_kernel(dtb);
} }
@ -54,112 +50,8 @@ fn init_frame_allocator() {
} }
/// Remap the kernel memory address with 4K page recorded in p1 page table /// Remap the kernel memory address with 4K page recorded in p1 page table
fn remap_the_kernel(dtb: usize) { fn remap_the_kernel(_dtb: usize) {
let offset = -(KERNEL_OFFSET as isize - MEMORY_OFFSET as isize); let mut ms = MemorySet::new();
let mut ms = MemorySet::new_bare();
ms.push(
stext as usize,
etext as usize,
MemoryAttr::default().execute().readonly(),
Linear::new(offset),
"text",
);
ms.push(
sdata as usize,
edata as usize,
MemoryAttr::default(),
Linear::new(offset),
"data",
);
ms.push(
srodata as usize,
erodata as usize,
MemoryAttr::default().readonly(),
Linear::new(offset),
"rodata",
);
ms.push(
bootstack as usize,
bootstacktop as usize,
MemoryAttr::default(),
Linear::new(offset),
"stack",
);
ms.push(
sbss as usize,
ebss as usize,
MemoryAttr::default(),
Linear::new(offset),
"bss",
);
// dtb on rocket chip is embedded into kernel
#[cfg(not(feature = "board_rocket_chip"))]
ms.push(
dtb,
dtb + super::consts::MAX_DTB_SIZE,
MemoryAttr::default().readonly(),
Linear::new(offset),
"dts",
);
// map PLIC for HiFiveU & VirtIO
let offset = -(KERNEL_OFFSET as isize);
ms.push(
KERNEL_OFFSET + 0x0C00_2000,
KERNEL_OFFSET + 0x0C00_2000 + PAGE_SIZE,
MemoryAttr::default(),
Linear::new(offset),
"plic0",
);
ms.push(
KERNEL_OFFSET + 0x0C20_2000,
KERNEL_OFFSET + 0x0C20_2000 + PAGE_SIZE,
MemoryAttr::default(),
Linear::new(offset),
"plic1",
);
// map UART for HiFiveU
ms.push(
KERNEL_OFFSET + 0x10010000,
KERNEL_OFFSET + 0x10010000 + PAGE_SIZE,
MemoryAttr::default(),
Linear::new(offset),
"uart",
);
// map UART for VirtIO
ms.push(
KERNEL_OFFSET + 0x10000000,
KERNEL_OFFSET + 0x10000000 + PAGE_SIZE,
MemoryAttr::default(),
Linear::new(offset),
"uart16550",
);
// map PLIC for Rocket Chip
#[cfg(feature = "board_rocket_chip")]
ms.push(
KERNEL_OFFSET + 0x0C20_1000,
KERNEL_OFFSET + 0x0C20_1000 + PAGE_SIZE,
MemoryAttr::default(),
Linear::new(offset),
"plic2",
);
// map UART for Rocket Chip
#[cfg(feature = "board_rocket_chip")]
ms.push(
KERNEL_OFFSET + 0x18000000,
KERNEL_OFFSET + 0x18000000 + PAGE_SIZE,
MemoryAttr::default(),
Linear::new(-(KERNEL_OFFSET as isize + 0x18000000 - 0x60000000)),
"uartlite",
);
// map AXI INTC for Rocket Chip
#[cfg(feature = "board_rocket_chip")]
ms.push(
KERNEL_OFFSET + 0x19000000,
KERNEL_OFFSET + 0x19000000 + PAGE_SIZE,
MemoryAttr::default(),
Linear::new(-(KERNEL_OFFSET as isize + 0x19000000 - 0x61200000)),
"axi_intc",
);
unsafe { unsafe {
ms.activate(); ms.activate();
} }

@ -20,13 +20,13 @@ mod sbi;
pub mod syscall; pub mod syscall;
pub mod timer; pub mod timer;
use self::consts::{KERNEL_OFFSET, MEMORY_OFFSET}; use crate::memory::phys_to_virt;
use core::sync::atomic::{AtomicBool, Ordering}; use core::sync::atomic::{AtomicBool, Ordering};
use log::*; use log::*;
#[no_mangle] #[no_mangle]
pub extern "C" fn rust_main(hartid: usize, device_tree_paddr: usize) -> ! { pub extern "C" fn rust_main(hartid: usize, device_tree_paddr: usize) -> ! {
let mut device_tree_vaddr = device_tree_paddr - MEMORY_OFFSET + KERNEL_OFFSET; let mut device_tree_vaddr = phys_to_virt(device_tree_paddr);
unsafe { unsafe {
cpu::set_cpu_id(hartid); cpu::set_cpu_id(hartid);

@ -1,10 +1,5 @@
use crate::consts::RECURSIVE_INDEX; use crate::consts::PHYSICAL_MEMORY_OFFSET;
// Depends on kernel use crate::memory::{alloc_frame, dealloc_frame, phys_to_virt};
#[cfg(target_arch = "riscv32")]
use crate::consts::KERNEL_P2_INDEX;
#[cfg(target_arch = "riscv64")]
use crate::consts::KERNEL_P4_INDEX;
use crate::memory::{active_table, alloc_frame, dealloc_frame};
use log::*; use log::*;
use rcore_memory::paging::*; use rcore_memory::paging::*;
use riscv::addr::*; use riscv::addr::*;
@ -16,22 +11,29 @@ use riscv::paging::{
}; };
use riscv::register::satp; use riscv::register::satp;
pub struct ActivePageTable(RecursivePageTable<'static>, PageEntry); #[cfg(target_arch = "riscv32")]
type TopLevelPageTable<'a> = riscv::paging::Rv32PageTable<'a>;
#[cfg(target_arch = "riscv64")]
type TopLevelPageTable<'a> = riscv::paging::Rv39PageTable<'a>;
pub struct PageTableImpl {
page_table: TopLevelPageTable<'static>,
root_frame: Frame,
entry: PageEntry,
}
/// PageTableEntry: the contents of this entry. /// PageTableEntry: the contents of this entry.
/// Page: this entry is the pte of page `Page`. /// Page: this entry is the pte of page `Page`.
pub struct PageEntry(&'static mut PageTableEntry, Page); pub struct PageEntry(&'static mut PageTableEntry, Page);
impl PageTable for ActivePageTable { impl PageTable for PageTableImpl {
fn map(&mut self, addr: usize, target: usize) -> &mut Entry { fn map(&mut self, addr: usize, target: usize) -> &mut Entry {
// use riscv::paging:Mapper::map_to,
// map the 4K `page` to the 4K `frame` with `flags` // map the 4K `page` to the 4K `frame` with `flags`
let flags = EF::VALID | EF::READABLE | EF::WRITABLE; let flags = EF::VALID | EF::READABLE | EF::WRITABLE;
let page = Page::of_addr(VirtAddr::new(addr)); let page = Page::of_addr(VirtAddr::new(addr));
let frame = Frame::of_addr(PhysAddr::new(target)); let frame = Frame::of_addr(PhysAddr::new(target));
// map the page to the frame using FrameAllocatorForRiscv
// we may need frame allocator to alloc frame for new page table(first/second) // we may need frame allocator to alloc frame for new page table(first/second)
self.0 self.page_table
.map_to(page, frame, flags, &mut FrameAllocatorForRiscv) .map_to(page, frame, flags, &mut FrameAllocatorForRiscv)
.unwrap() .unwrap()
.flush(); .flush();
@ -40,59 +42,28 @@ impl PageTable for ActivePageTable {
fn unmap(&mut self, addr: usize) { fn unmap(&mut self, addr: usize) {
let page = Page::of_addr(VirtAddr::new(addr)); let page = Page::of_addr(VirtAddr::new(addr));
let (_, flush) = self.0.unmap(page).unwrap(); let (_, flush) = self.page_table.unmap(page).unwrap();
flush.flush(); flush.flush();
} }
fn get_entry(&mut self, vaddr: usize) -> Option<&mut Entry> { fn get_entry(&mut self, vaddr: usize) -> Option<&mut Entry> {
let page = Page::of_addr(VirtAddr::new(vaddr)); let page = Page::of_addr(VirtAddr::new(vaddr));
if let Ok(e) = self.0.ref_entry(page.clone()) { if let Ok(e) = self.page_table.ref_entry(page.clone()) {
let e = unsafe { &mut *(e as *mut PageTableEntry) }; let e = unsafe { &mut *(e as *mut PageTableEntry) };
self.1 = PageEntry(e, page); self.entry = PageEntry(e, page);
Some(&mut self.1 as &mut Entry) Some(&mut self.entry as &mut Entry)
} else { } else {
None None
} }
} }
}
impl PageTableExt for ActivePageTable {}
/// The virtual address of root page table fn get_page_slice_mut<'a>(&mut self, addr: usize) -> &'a mut [u8] {
#[cfg(target_arch = "riscv32")] let frame = self
const ROOT_PAGE_TABLE: *mut RvPageTable = .page_table
((RECURSIVE_INDEX << 12 << 10) | ((RECURSIVE_INDEX + 1) << 12)) as *mut RvPageTable; .translate_page(Page::of_addr(VirtAddr::new(addr)))
#[cfg(all(target_arch = "riscv64", feature = "sv39"))] .unwrap();
const ROOT_PAGE_TABLE: *mut RvPageTable = ((0xFFFF_0000_0000_0000) let vaddr = frame.start_address().as_usize() + PHYSICAL_MEMORY_OFFSET;
| (0o777 << 12 << 9 << 9 << 9) unsafe { core::slice::from_raw_parts_mut(vaddr as *mut u8, 0x1000) }
| (RECURSIVE_INDEX << 12 << 9 << 9)
| (RECURSIVE_INDEX << 12 << 9)
| ((RECURSIVE_INDEX + 1) << 12)) as *mut RvPageTable;
#[cfg(all(target_arch = "riscv64", not(feature = "sv39")))]
const ROOT_PAGE_TABLE: *mut RvPageTable = ((0xFFFF_0000_0000_0000)
| (RECURSIVE_INDEX << 12 << 9 << 9 << 9)
| (RECURSIVE_INDEX << 12 << 9 << 9)
| (RECURSIVE_INDEX << 12 << 9)
| ((RECURSIVE_INDEX + 1) << 12)) as *mut RvPageTable;
impl ActivePageTable {
#[cfg(target_arch = "riscv32")]
pub unsafe fn new() -> Self {
ActivePageTable(
RecursivePageTable::new(&mut *ROOT_PAGE_TABLE).unwrap(),
::core::mem::uninitialized(),
)
}
#[cfg(target_arch = "riscv64")]
pub unsafe fn new() -> Self {
#[cfg(feature = "sv39")]
let type_ = PageTableType::Sv39;
#[cfg(not(feature = "sv39"))]
let type_ = PageTableType::Sv48;
ActivePageTable(
RecursivePageTable::new(&mut *ROOT_PAGE_TABLE, type_).unwrap(),
::core::mem::uninitialized(),
)
} }
} }
@ -173,72 +144,67 @@ impl Entry for PageEntry {
fn set_mmio(&mut self, _value: u8) {} fn set_mmio(&mut self, _value: u8) {}
} }
#[derive(Debug)] impl PageTableImpl {
pub struct InactivePageTable0 { /// Unsafely get the current active page table.
root_frame: Frame, /// WARN: You MUST call `core::mem::forget` for it after use!
pub unsafe fn active() -> Self {
#[cfg(target_arch = "riscv32")]
let mask = 0x7fffffff;
#[cfg(target_arch = "riscv64")]
let mask = 0x0fffffff_ffffffff;
let frame = Frame::of_ppn(PageTableImpl::active_token() & mask);
let table = frame.as_kernel_mut(PHYSICAL_MEMORY_OFFSET);
PageTableImpl {
page_table: TopLevelPageTable::new(table, PHYSICAL_MEMORY_OFFSET),
root_frame: frame,
entry: unsafe { core::mem::uninitialized() },
}
}
} }
impl InactivePageTable for InactivePageTable0 { impl PageTableExt for PageTableImpl {
type Active = ActivePageTable;
fn new_bare() -> Self { fn new_bare() -> Self {
let target = alloc_frame().expect("failed to allocate frame"); let target = alloc_frame().expect("failed to allocate frame");
let frame = Frame::of_addr(PhysAddr::new(target)); let frame = Frame::of_addr(PhysAddr::new(target));
active_table().with_temporary_map(target, |_, table: &mut RvPageTable| {
table.zero();
table.set_recursive(RECURSIVE_INDEX, frame.clone());
});
InactivePageTable0 { root_frame: frame }
}
#[cfg(target_arch = "riscv32")] let table = unsafe { &mut *(phys_to_virt(target) as *mut RvPageTable) };
fn map_kernel(&mut self) { table.zero();
let table = unsafe { &mut *ROOT_PAGE_TABLE };
extern "C" {
fn start();
fn end();
}
let mut entrys: [PageTableEntry; 256] = unsafe { core::mem::uninitialized() };
let entry_start = start as usize >> 22;
let entry_end = (end as usize >> 22) + 1;
let entry_count = entry_end - entry_start;
for i in 0..entry_count {
entrys[i] = table[entry_start + i];
}
self.edit(|_| { PageTableImpl {
// NOTE: 'table' now refers to new page table page_table: TopLevelPageTable::new(table, PHYSICAL_MEMORY_OFFSET),
for i in 0..entry_count { root_frame: frame,
table[entry_start + i] = entrys[i]; entry: unsafe { core::mem::uninitialized() },
} }
});
} }
#[cfg(target_arch = "riscv64")]
fn map_kernel(&mut self) { fn map_kernel(&mut self) {
let table = unsafe { &mut *ROOT_PAGE_TABLE }; info!("mapping kernel linear mapping");
let e1 = table[KERNEL_P4_INDEX]; let table = unsafe {
assert!(!e1.is_unused()); &mut *(phys_to_virt(self.root_frame.start_address().as_usize()) as *mut RvPageTable)
};
self.edit(|_| { #[cfg(target_arch = "riscv32")]
table[KERNEL_P4_INDEX] = e1; for i in 256..1024 {
}); let flags =
EF::VALID | EF::READABLE | EF::WRITABLE | EF::EXECUTABLE | EF::ACCESSED | EF::DIRTY;
let frame = Frame::of_addr(PhysAddr::new((i << 22) - PHYSICAL_MEMORY_OFFSET));
table[i].set(frame, flags);
}
#[cfg(target_arch = "riscv64")]
for i in 509..512 {
let flags =
EF::VALID | EF::READABLE | EF::WRITABLE | EF::EXECUTABLE | EF::ACCESSED | EF::DIRTY;
let frame = Frame::of_addr(PhysAddr::new(
(0xFFFFFF80_00000000 + (i << 30)) - PHYSICAL_MEMORY_OFFSET,
));
table[i].set(frame, flags);
}
} }
#[cfg(target_arch = "riscv32")]
fn token(&self) -> usize { fn token(&self) -> usize {
self.root_frame.number() | (1 << 31) // as satp #[cfg(target_arch = "riscv32")]
} return self.root_frame.number() | (1 << 31);
#[cfg(target_arch = "riscv64")] #[cfg(target_arch = "riscv64")]
fn token(&self) -> usize { return self.root_frame.number() | (8 << 60);
use bit_field::BitField;
let mut satp = self.root_frame.number();
satp.set_bits(44..60, 0); // AS is 0
#[cfg(feature = "sv39")]
satp.set_bits(60..64, satp::Mode::Sv39 as usize);
#[cfg(not(feature = "sv39"))]
satp.set_bits(60..64, satp::Mode::Sv48 as usize);
satp
} }
unsafe fn set_token(token: usize) { unsafe fn set_token(token: usize) {
@ -246,7 +212,11 @@ impl InactivePageTable for InactivePageTable0 {
} }
fn active_token() -> usize { fn active_token() -> usize {
satp::read().bits() let mut token: usize = 0;
unsafe {
asm!("csrr $0, satp" : "=r"(token) ::: "volatile");
}
token
} }
fn flush_tlb() { fn flush_tlb() {
@ -254,33 +224,9 @@ impl InactivePageTable for InactivePageTable0 {
sfence_vma_all(); sfence_vma_all();
} }
} }
fn edit<T>(&mut self, f: impl FnOnce(&mut Self::Active) -> T) -> T {
let target = satp::read().frame().start_address().as_usize();
active_table().with_temporary_map(target, |active_table, root_table: &mut RvPageTable| {
let backup = root_table[RECURSIVE_INDEX].clone();
// overwrite recursive mapping
root_table[RECURSIVE_INDEX].set(self.root_frame.clone(), EF::VALID);
unsafe {
sfence_vma_all();
}
// execute f in the new context
let ret = f(active_table);
// restore recursive mapping to original p2 table
root_table[RECURSIVE_INDEX] = backup;
unsafe {
sfence_vma_all();
}
ret
})
}
} }
impl Drop for InactivePageTable0 { impl Drop for PageTableImpl {
fn drop(&mut self) { fn drop(&mut self) {
dealloc_frame(self.root_frame.start_address().as_usize()); dealloc_frame(self.root_frame.start_address().as_usize());
} }
@ -299,13 +245,3 @@ impl FrameDeallocator for FrameAllocatorForRiscv {
dealloc_frame(frame.start_address().as_usize()); dealloc_frame(frame.start_address().as_usize());
} }
} }
pub unsafe fn setup_recursive_mapping() {
let frame = satp::read().frame();
let root_page_table = unsafe { &mut *(frame.start_address().as_usize() as *mut RvPageTable) };
root_page_table.set_recursive(RECURSIVE_INDEX, frame);
unsafe {
sfence_vma_all();
}
info!("setup recursive mapping end");
}

@ -1,6 +1,7 @@
pub const MEMORY_OFFSET: usize = 0; pub const MEMORY_OFFSET: usize = 0;
pub const KERNEL_OFFSET: usize = 0xffffff00_00000000; pub const KERNEL_OFFSET: usize = 0xffffff00_00000000;
pub const KERNEL_HEAP_SIZE: usize = 8 * 1024 * 1024; // 8 MB pub const KERNEL_HEAP_SIZE: usize = 8 * 1024 * 1024; // 8 MB
pub const PHYSICAL_MEMORY_OFFSET: usize = 0xfffffc00_00000000;
pub const USER_STACK_OFFSET: usize = 0x00008000_00000000 - USER_STACK_SIZE; pub const USER_STACK_OFFSET: usize = 0x00008000_00000000 - USER_STACK_SIZE;
pub const USER_STACK_SIZE: usize = 8 * 1024 * 1024; // 8 MB, the default config of Linux pub const USER_STACK_SIZE: usize = 8 * 1024 * 1024; // 8 MB, the default config of Linux

@ -1,3 +1,4 @@
use crate::memory::phys_to_virt;
use apic::{LocalApic, XApic}; use apic::{LocalApic, XApic};
use raw_cpuid::CpuId; use raw_cpuid::CpuId;
use x86_64::registers::control::{Cr0, Cr0Flags}; use x86_64::registers::control::{Cr0, Cr0Flags};
@ -21,12 +22,12 @@ pub fn id() -> usize {
} }
pub fn send_ipi(cpu_id: usize) { pub fn send_ipi(cpu_id: usize) {
let mut lapic = unsafe { XApic::new(0xffffff00_fee00000) }; let mut lapic = unsafe { XApic::new(phys_to_virt(0xfee00000)) };
lapic.send_ipi(cpu_id as u8, 0x30); // TODO: Find a IPI trap num lapic.send_ipi(cpu_id as u8, 0x30); // TODO: Find a IPI trap num
} }
pub fn init() { pub fn init() {
let mut lapic = unsafe { XApic::new(0xffffff00_fee00000) }; let mut lapic = unsafe { XApic::new(phys_to_virt(0xfee00000)) };
lapic.cpu_init(); lapic.cpu_init();
// enable FPU, the manual Volume 3 Chapter 13 // enable FPU, the manual Volume 3 Chapter 13

@ -7,6 +7,7 @@ pub fn init() {
use crate::arch::interrupt::consts; use crate::arch::interrupt::consts;
use crate::arch::interrupt::enable_irq; use crate::arch::interrupt::enable_irq;
enable_irq(consts::Keyboard); enable_irq(consts::Keyboard);
info!("keyboard: init end");
} }
/// Receive character from keyboard /// Receive character from keyboard

@ -12,6 +12,7 @@ pub fn init() {
COM2.lock().init(); COM2.lock().init();
enable_irq(consts::COM1); enable_irq(consts::COM1);
enable_irq(consts::COM2); enable_irq(consts::COM2);
info!("serial: init end");
} }
pub trait SerialRead { pub trait SerialRead {

@ -6,7 +6,7 @@ use spin::Mutex;
use volatile::Volatile; use volatile::Volatile;
use x86_64::instructions::port::Port; use x86_64::instructions::port::Port;
use crate::consts::KERNEL_OFFSET; use crate::memory::phys_to_virt;
use crate::util::color::ConsoleColor; use crate::util::color::ConsoleColor;
use crate::util::escape_parser::{EscapeParser, CSI}; use crate::util::escape_parser::{EscapeParser, CSI};
@ -99,10 +99,9 @@ impl VgaBuffer {
} }
lazy_static! { lazy_static! {
pub static ref VGA_WRITER: Mutex<VgaWriter> = Mutex::new( pub static ref VGA_WRITER: Mutex<VgaWriter> = Mutex::new(VgaWriter::new(unsafe {
// VGA virtual address is specified at bootloader &mut *((phys_to_virt(0xb8000)) as *mut VgaBuffer)
VgaWriter::new(unsafe{ &mut *((KERNEL_OFFSET + 0xf0000000) as *mut VgaBuffer) }) }));
);
} }
pub struct VgaWriter { pub struct VgaWriter {

@ -48,10 +48,7 @@ impl Cpu {
} }
pub fn iter() -> impl Iterator<Item = &'static Self> { pub fn iter() -> impl Iterator<Item = &'static Self> {
unsafe { unsafe { CPUS.iter().filter_map(|x| x.as_ref()) }
CPUS.iter()
.filter_map(|x| x.as_ref())
}
} }
pub fn id(&self) -> usize { pub fn id(&self) -> usize {
self.id self.id
@ -114,7 +111,7 @@ const KCODE: Descriptor = Descriptor::UserSegment(0x0020980000000000); // EXECUT
const UCODE: Descriptor = Descriptor::UserSegment(0x0020F80000000000); // EXECUTABLE | USER_SEGMENT | USER_MODE | PRESENT | LONG_MODE const UCODE: Descriptor = Descriptor::UserSegment(0x0020F80000000000); // EXECUTABLE | USER_SEGMENT | USER_MODE | PRESENT | LONG_MODE
const KDATA: Descriptor = Descriptor::UserSegment(0x0000920000000000); // DATA_WRITABLE | USER_SEGMENT | PRESENT const KDATA: Descriptor = Descriptor::UserSegment(0x0000920000000000); // DATA_WRITABLE | USER_SEGMENT | PRESENT
const UDATA: Descriptor = Descriptor::UserSegment(0x0000F20000000000); // DATA_WRITABLE | USER_SEGMENT | USER_MODE | PRESENT const UDATA: Descriptor = Descriptor::UserSegment(0x0000F20000000000); // DATA_WRITABLE | USER_SEGMENT | USER_MODE | PRESENT
// Copied from xv6 // Copied from xv6
const UCODE32: Descriptor = Descriptor::UserSegment(0x00cffa00_0000ffff); // EXECUTABLE | USER_SEGMENT | USER_MODE | PRESENT const UCODE32: Descriptor = Descriptor::UserSegment(0x00cffa00_0000ffff); // EXECUTABLE | USER_SEGMENT | USER_MODE | PRESENT
const UDATA32: Descriptor = Descriptor::UserSegment(0x00cff200_0000ffff); // EXECUTABLE | USER_SEGMENT | USER_MODE | PRESENT const UDATA32: Descriptor = Descriptor::UserSegment(0x00cff200_0000ffff); // EXECUTABLE | USER_SEGMENT | USER_MODE | PRESENT

@ -147,6 +147,17 @@ fn page_fault(tf: &mut TrapFrame) {
if crate::memory::handle_page_fault(addr) { if crate::memory::handle_page_fault(addr) {
return; return;
} }
extern "C" {
fn _copy_user_start();
fn _copy_user_end();
}
if tf.rip >= _copy_user_start as usize && tf.rip < _copy_user_end as usize {
debug!("fixup for addr {:x?}", addr);
tf.rip = crate::memory::read_user_fixup as usize;
return;
}
error!("\nEXCEPTION: Page Fault @ {:#x}, code: {:?}", addr, code); error!("\nEXCEPTION: Page Fault @ {:#x}, code: {:?}", addr, code);
error(tf); error(tf);
} }

@ -5,7 +5,7 @@ mod trapframe;
pub use self::handler::*; pub use self::handler::*;
pub use self::trapframe::*; pub use self::trapframe::*;
use crate::consts::KERNEL_OFFSET; use crate::memory::phys_to_virt;
use apic::*; use apic::*;
#[inline(always)] #[inline(always)]
@ -39,12 +39,12 @@ pub fn no_interrupt(f: impl FnOnce()) {
#[inline(always)] #[inline(always)]
pub fn enable_irq(irq: u8) { pub fn enable_irq(irq: u8) {
let mut ioapic = unsafe { IoApic::new(KERNEL_OFFSET + IOAPIC_ADDR as usize) }; let mut ioapic = unsafe { IoApic::new(phys_to_virt(IOAPIC_ADDR as usize)) };
ioapic.enable(irq, 0); ioapic.enable(irq, 0);
} }
#[inline(always)] #[inline(always)]
pub fn ack(_irq: u8) { pub fn ack(_irq: u8) {
let mut lapic = unsafe { XApic::new(KERNEL_OFFSET + LAPIC_ADDR) }; let mut lapic = unsafe { XApic::new(phys_to_virt(LAPIC_ADDR)) };
lapic.eoi(); lapic.eoi();
} }

@ -1,7 +1,6 @@
use crate::memory::phys_to_virt;
/// Interface for inter-processor interrupt. /// Interface for inter-processor interrupt.
/// This module wraps inter-processor interrupt into a broadcast-calling style. /// This module wraps inter-processor interrupt into a broadcast-calling style.
use crate::consts::KERNEL_OFFSET;
use alloc::boxed::{Box, FnBox}; use alloc::boxed::{Box, FnBox};
use alloc::sync::Arc; use alloc::sync::Arc;
use apic::{LocalApic, XApic, LAPIC_ADDR}; use apic::{LocalApic, XApic, LAPIC_ADDR};
@ -10,7 +9,7 @@ use core::sync::atomic::{spin_loop_hint, AtomicU8, Ordering};
pub type IPIEventItem = Box<FnBox()>; pub type IPIEventItem = Box<FnBox()>;
unsafe fn get_apic() -> XApic { unsafe fn get_apic() -> XApic {
let mut lapic = unsafe { XApic::new(KERNEL_OFFSET + LAPIC_ADDR) }; let mut lapic = unsafe { XApic::new(phys_to_virt(LAPIC_ADDR)) };
lapic lapic
} }

@ -14,6 +14,9 @@ SECTIONS {
.text ALIGN(4K): .text ALIGN(4K):
{ {
stext = .; stext = .;
_copy_user_start = .;
*(.text.copy_user)
_copy_user_end = .;
*(.text .text.*) *(.text .text.*)
etext = .; etext = .;
} }

@ -1,15 +1,10 @@
use crate::consts::KERNEL_OFFSET;
use bitmap_allocator::BitAlloc;
// Depends on kernel
use super::{BootInfo, MemoryRegionType}; use super::{BootInfo, MemoryRegionType};
use crate::memory::{active_table, init_heap, FRAME_ALLOCATOR}; use crate::memory::{init_heap, FRAME_ALLOCATOR};
use log::*; use bitmap_allocator::BitAlloc;
use rcore_memory::paging::*; use rcore_memory::paging::*;
use rcore_memory::PAGE_SIZE;
pub fn init(boot_info: &BootInfo) { pub fn init(boot_info: &BootInfo) {
init_frame_allocator(boot_info); init_frame_allocator(boot_info);
init_device_vm_map();
init_heap(); init_heap();
info!("memory: init end"); info!("memory: init end");
} }
@ -25,15 +20,3 @@ fn init_frame_allocator(boot_info: &BootInfo) {
} }
} }
} }
fn init_device_vm_map() {
let mut page_table = active_table();
// IOAPIC
page_table
.map(KERNEL_OFFSET + 0xfec00000, 0xfec00000)
.update();
// LocalAPIC
page_table
.map(KERNEL_OFFSET + 0xfee00000, 0xfee00000)
.update();
}

@ -9,6 +9,7 @@ pub mod gdt;
pub mod idt; pub mod idt;
pub mod interrupt; pub mod interrupt;
pub mod io; pub mod io;
pub mod ipi;
pub mod memory; pub mod memory;
pub mod paging; pub mod paging;
pub mod rand; pub mod rand;
@ -26,16 +27,23 @@ pub extern "C" fn _start(boot_info: &'static BootInfo) -> ! {
println!("Hello world! from CPU {}!", cpu_id); println!("Hello world! from CPU {}!", cpu_id);
if cpu_id != 0 { if cpu_id != 0 {
while !AP_CAN_INIT.load(Ordering::Relaxed) {} while !AP_CAN_INIT.load(Ordering::Relaxed) {
spin_loop_hint();
}
other_start(); other_start();
} }
// First init log mod, so that we can print log info. // First init log mod, so that we can print log info.
crate::logging::init(); crate::logging::init();
info!("{:#?}", boot_info); info!("{:#x?}", boot_info);
assert_eq!(
boot_info.physical_memory_offset as usize,
consts::PHYSICAL_MEMORY_OFFSET
);
// Init trap handling. // Init trap handling.
idt::init(); idt::init();
// setup fast syscall in x86_64
interrupt::fast_syscall::init(); interrupt::fast_syscall::init();
// Init physical memory management and heap. // Init physical memory management and heap.
@ -63,14 +71,14 @@ pub extern "C" fn _start(boot_info: &'static BootInfo) -> ! {
/// The entry point for other processors /// The entry point for other processors
fn other_start() -> ! { fn other_start() -> ! {
// Init trap handling. // init trap handling.
idt::init(); idt::init();
// init gdt // init gdt
gdt::init(); gdt::init();
// init local apic // init local apic
cpu::init(); cpu::init();
// setup fast syscall in xv6-64 // setup fast syscall in x86_64
interrupt::fast_syscall::init(); interrupt::fast_syscall::init();
//call the first main function in kernel. // call the first main function in kernel.
crate::kmain(); crate::kmain();
} }

@ -1,6 +1,4 @@
// Depends on kernel use crate::memory::{alloc_frame, dealloc_frame, phys_to_virt};
use crate::consts::KERNEL_OFFSET;
use crate::memory::{active_table, alloc_frame, dealloc_frame};
use core::sync::atomic::Ordering; use core::sync::atomic::Ordering;
use log::*; use log::*;
use rcore_memory::paging::*; use rcore_memory::paging::*;
@ -8,12 +6,12 @@ use x86_64::instructions::tlb;
use x86_64::registers::control::{Cr3, Cr3Flags}; use x86_64::registers::control::{Cr3, Cr3Flags};
use x86_64::structures::paging::{ use x86_64::structures::paging::{
frame::PhysFrame as Frame, frame::PhysFrame as Frame,
mapper::{Mapper, RecursivePageTable}, mapper::{MappedPageTable, Mapper},
page::{Page, PageRange, Size4KiB}, page::{Page, PageRange, Size4KiB},
page_table::{PageTable as x86PageTable, PageTableEntry, PageTableFlags as EF}, page_table::{PageTable as x86PageTable, PageTableEntry, PageTableFlags as EF},
FrameAllocator, FrameDeallocator, FrameAllocator, FrameDeallocator,
}; };
use x86_64::{VirtAddr, PhysAddr}; use x86_64::{PhysAddr, VirtAddr};
pub trait PageExt { pub trait PageExt {
fn of_addr(address: usize) -> Self; fn of_addr(address: usize) -> Self;
@ -40,11 +38,15 @@ impl FrameExt for Frame {
} }
} }
pub struct ActivePageTable(RecursivePageTable<'static>); pub struct PageTableImpl(
MappedPageTable<'static, fn(Frame) -> *mut x86PageTable>,
PageEntry,
Frame,
);
pub struct PageEntry(PageTableEntry); pub struct PageEntry(&'static mut PageTableEntry, Page, Frame);
impl PageTable for ActivePageTable { impl PageTable for PageTableImpl {
fn map(&mut self, addr: usize, target: usize) -> &mut Entry { fn map(&mut self, addr: usize, target: usize) -> &mut Entry {
let flags = EF::PRESENT | EF::WRITABLE | EF::NO_EXECUTE; let flags = EF::PRESENT | EF::WRITABLE | EF::NO_EXECUTE;
unsafe { unsafe {
@ -59,7 +61,7 @@ impl PageTable for ActivePageTable {
.flush(); .flush();
} }
flush_tlb_all(addr); flush_tlb_all(addr);
unsafe { &mut *(get_entry_ptr(addr, 1)) } self.get_entry(addr).unwrap()
} }
fn unmap(&mut self, addr: usize) { fn unmap(&mut self, addr: usize) {
@ -68,33 +70,39 @@ impl PageTable for ActivePageTable {
} }
fn get_entry(&mut self, addr: usize) -> Option<&mut Entry> { fn get_entry(&mut self, addr: usize) -> Option<&mut Entry> {
for level in 0..3 { let mut page_table = frame_to_page_table(self.2);
let entry = get_entry_ptr(addr, 4 - level); for level in 0..4 {
if unsafe { !(*entry).present() } { let index = (addr >> (12 + (3 - level) * 9)) & 0o777;
let entry = unsafe { &mut (&mut *page_table)[index] };
if level == 3 {
let page = Page::of_addr(addr);
self.1 = PageEntry(entry, page, self.2);
return Some(&mut self.1 as &mut Entry);
}
if !entry.flags().contains(EF::PRESENT) {
return None; return None;
} }
page_table = frame_to_page_table(entry.frame().unwrap());
} }
unsafe { Some(&mut *(get_entry_ptr(addr, 1))) } unreachable!();
} }
}
impl PageTableExt for ActivePageTable { fn get_page_slice_mut<'a>(&mut self, addr: usize) -> &'a mut [u8] {
// FIXME: the default value 0xcafebe000 is so low that allocation might overwrite it sometimes. let frame = self.0.translate_page(Page::of_addr(addr)).unwrap();
// However, putting it to KERNEL_OFFSET | 0xcafeb000 has unintended effects. let vaddr = phys_to_virt(frame.start_address().as_u64() as usize);
// Someone needs to reconsider this and use an ultimate solution. unsafe { core::slice::from_raw_parts_mut(vaddr as *mut u8, 0x1000) }
// const TEMP_PAGE_ADDR: usize = KERNEL_OFFSET | 0xcafeb000; }
} }
impl ActivePageTable { fn frame_to_page_table(frame: Frame) -> *mut x86PageTable {
pub unsafe fn new() -> Self { let vaddr = phys_to_virt(frame.start_address().as_u64() as usize);
ActivePageTable(RecursivePageTable::new(&mut *(0xffffffff_fffff000 as *mut _)).unwrap()) vaddr as *mut x86PageTable
}
} }
impl Entry for PageEntry { impl Entry for PageEntry {
fn update(&mut self) { fn update(&mut self) {
use x86_64::{instructions::tlb::flush, VirtAddr}; use x86_64::{instructions::tlb::flush, VirtAddr};
let addr = VirtAddr::new_unchecked((self as *const _ as u64) << 9); let addr = self.1.start_address();
flush(addr); flush(addr);
flush_tlb_all(addr.as_u64() as usize); flush_tlb_all(addr.as_u64() as usize);
} }
@ -153,14 +161,18 @@ impl Entry for PageEntry {
self.0.flags().contains(EF::USER_ACCESSIBLE) self.0.flags().contains(EF::USER_ACCESSIBLE)
} }
fn set_user(&mut self, value: bool) { fn set_user(&mut self, value: bool) {
self.as_flags().set(EF::USER_ACCESSIBLE, value); // x86_64 page table struct do not implement setting USER bit
if value { if value {
let mut addr = self as *const _ as usize; let mut page_table = frame_to_page_table(self.2);
for _ in 0..3 { for level in 0..4 {
// Upper level entry let index =
addr = ((addr >> 9) & 0o777_777_777_7770) | 0xffffff80_00000000; (self.1.start_address().as_u64() as usize >> (12 + (3 - level) * 9)) & 0o777;
// set USER_ACCESSIBLE let entry = unsafe { &mut (&mut *page_table)[index] };
unsafe { (*(addr as *mut EF)).insert(EF::USER_ACCESSIBLE) }; entry.set_flags(entry.flags() | EF::USER_ACCESSIBLE);
if level == 3 {
return;
}
page_table = frame_to_page_table(entry.frame().unwrap());
} }
} }
} }
@ -176,51 +188,57 @@ impl Entry for PageEntry {
fn set_mmio(&mut self, _value: u8) {} fn set_mmio(&mut self, _value: u8) {}
} }
fn get_entry_ptr(addr: usize, level: u8) -> *mut PageEntry {
debug_assert!(level <= 4);
let entry_addr = ((addr >> (level * 9)) & !0x7) | !((1 << (48 - level * 9)) - 1);
entry_addr as *mut PageEntry
}
impl PageEntry { impl PageEntry {
fn as_flags(&mut self) -> &mut EF { fn as_flags(&mut self) -> &mut EF {
unsafe { &mut *(self as *mut _ as *mut EF) } unsafe { &mut *(self.0 as *mut _ as *mut EF) }
} }
} }
#[derive(Debug)] impl PageTableImpl {
pub struct InactivePageTable0 { /// Unsafely get the current active page table.
p4_frame: Frame, /// WARN: You MUST call `core::mem::forget` for it after use!
pub unsafe fn active() -> Self {
let frame = Cr3::read().0;
let table = unsafe { &mut *frame_to_page_table(frame) };
PageTableImpl(
MappedPageTable::new(table, frame_to_page_table),
core::mem::uninitialized(),
frame,
)
}
} }
impl InactivePageTable for InactivePageTable0 { impl PageTableExt for PageTableImpl {
type Active = ActivePageTable;
fn new_bare() -> Self { fn new_bare() -> Self {
let target = alloc_frame().expect("failed to allocate frame"); let target = alloc_frame().expect("failed to allocate frame");
let frame = Frame::of_addr(target); let frame = Frame::of_addr(target);
active_table().with_temporary_map(target, |_, table: &mut x86PageTable| { let table = unsafe { &mut *frame_to_page_table(frame) };
table.zero(); table.zero();
// set up recursive mapping for the table unsafe {
table[511].set_frame(frame.clone(), EF::PRESENT | EF::WRITABLE); PageTableImpl(
}); MappedPageTable::new(table, frame_to_page_table),
InactivePageTable0 { p4_frame: frame } core::mem::uninitialized(),
frame,
)
}
} }
fn map_kernel(&mut self) { fn map_kernel(&mut self) {
let table = unsafe { &mut *(0xffffffff_fffff000 as *mut x86PageTable) }; let table = unsafe { &mut *frame_to_page_table(Cr3::read().0) };
// Kernel at 0xffff_ff00_0000_0000 // Kernel at 0xffff_ff00_0000_0000
// Kernel stack at 0x0000_57ac_0000_0000 (defined in bootloader crate) // Kernel stack at 0x0000_57ac_0000_0000 (defined in bootloader crate)
let e510 = table[510].clone(); let ekernel = table[510].clone();
let ephysical = table[0x1f8].clone();
let estack = table[175].clone(); let estack = table[175].clone();
self.edit(|_| {
table[510].set_addr(e510.addr(), e510.flags() | EF::GLOBAL); let table = unsafe { &mut *frame_to_page_table(self.2) };
table[175].set_addr(estack.addr(), estack.flags() | EF::GLOBAL); table[510].set_addr(ekernel.addr(), ekernel.flags() | EF::GLOBAL);
}); table[0x1f8].set_addr(ephysical.addr(), ephysical.flags() | EF::GLOBAL);
table[175].set_addr(estack.addr(), estack.flags() | EF::GLOBAL);
} }
fn token(&self) -> usize { fn token(&self) -> usize {
self.p4_frame.start_address().as_u64() as usize // as CR3 self.2.start_address().as_u64() as usize // as CR3
} }
unsafe fn set_token(token: usize) { unsafe fn set_token(token: usize) {
@ -237,40 +255,18 @@ impl InactivePageTable for InactivePageTable0 {
fn flush_tlb() { fn flush_tlb() {
tlb::flush_all(); tlb::flush_all();
} }
fn edit<T>(&mut self, f: impl FnOnce(&mut Self::Active) -> T) -> T {
let target = Cr3::read().0.start_address().as_u64() as usize;
if self.p4_frame == Cr3::read().0 {
return f(&mut active_table());
}
active_table().with_temporary_map(target, |active_table, p4_table: &mut x86PageTable| {
let backup = p4_table[0o777].clone();
// overwrite recursive mapping
p4_table[0o777].set_frame(self.p4_frame.clone(), EF::PRESENT | EF::WRITABLE);
tlb::flush_all();
// execute f in the new context
let ret = f(active_table);
// restore recursive mapping to original p4 table
p4_table[0o777] = backup;
tlb::flush_all();
ret
})
}
} }
impl Drop for InactivePageTable0 { impl Drop for PageTableImpl {
fn drop(&mut self) { fn drop(&mut self) {
info!("PageTable dropping: {:?}", self); info!("PageTable dropping: {:?}", self.2);
dealloc_frame(self.p4_frame.start_address().as_u64() as usize); dealloc_frame(self.2.start_address().as_u64() as usize);
} }
} }
struct FrameAllocatorForX86; struct FrameAllocatorForX86;
impl FrameAllocator<Size4KiB> for FrameAllocatorForX86 { unsafe impl FrameAllocator<Size4KiB> for FrameAllocatorForX86 {
fn allocate_frame(&mut self) -> Option<Frame> { fn allocate_frame(&mut self) -> Option<Frame> {
alloc_frame().map(|addr| Frame::of_addr(addr)) alloc_frame().map(|addr| Frame::of_addr(addr))
} }
@ -284,11 +280,10 @@ impl FrameDeallocator<Size4KiB> for FrameAllocatorForX86 {
/// Flush TLB for `vaddr` on all CPU /// Flush TLB for `vaddr` on all CPU
fn flush_tlb_all(vaddr: usize) { fn flush_tlb_all(vaddr: usize) {
// FIXME: too slow, disable now.
return;
if !super::AP_CAN_INIT.load(Ordering::Relaxed) { if !super::AP_CAN_INIT.load(Ordering::Relaxed) {
return; return;
} }
super::ipi::invoke_on_allcpu( super::ipi::invoke_on_allcpu(move || tlb::flush(VirtAddr::new(vaddr as u64)), false);
move || tlb::flush(VirtAddr::new(vaddr as u64)),
false,
);
} }

@ -8,16 +8,16 @@ use bitflags::*;
use device_tree::util::SliceRead; use device_tree::util::SliceRead;
use device_tree::Node; use device_tree::Node;
use log::*; use log::*;
use rcore_memory::paging::PageTable;
use rcore_memory::PAGE_SIZE; use rcore_memory::PAGE_SIZE;
use volatile::Volatile; use volatile::Volatile;
use crate::arch::consts::PHYSICAL_MEMORY_OFFSET;
use crate::drivers::BlockDriver; use crate::drivers::BlockDriver;
use crate::memory::active_table;
use crate::sync::SpinNoIrqLock as Mutex; use crate::sync::SpinNoIrqLock as Mutex;
use super::super::bus::virtio_mmio::*; use super::super::bus::virtio_mmio::*;
use super::super::{DeviceType, Driver, BLK_DRIVERS, DRIVERS}; use super::super::{DeviceType, Driver, BLK_DRIVERS, DRIVERS};
use crate::memory::phys_to_virt;
pub struct VirtIOBlk { pub struct VirtIOBlk {
interrupt_parent: u32, interrupt_parent: u32,
@ -106,8 +106,6 @@ impl Driver for VirtIOBlkDriver {
fn try_handle_interrupt(&self, _irq: Option<u32>) -> bool { fn try_handle_interrupt(&self, _irq: Option<u32>) -> bool {
let driver = self.0.lock(); let driver = self.0.lock();
// ensure header page is mapped
active_table().map_if_not_exists(driver.header as usize, driver.header as usize);
let header = unsafe { &mut *(driver.header as *mut VirtIOHeader) }; let header = unsafe { &mut *(driver.header as *mut VirtIOHeader) };
let interrupt = header.interrupt_status.read(); let interrupt = header.interrupt_status.read();
if interrupt != 0 { if interrupt != 0 {
@ -127,9 +125,6 @@ impl Driver for VirtIOBlkDriver {
fn read_block(&self, block_id: usize, buf: &mut [u8]) -> bool { fn read_block(&self, block_id: usize, buf: &mut [u8]) -> bool {
let mut driver = self.0.lock(); let mut driver = self.0.lock();
// ensure header page is mapped
active_table().map_if_not_exists(driver.header as usize, driver.header as usize);
let mut req = VirtIOBlkReadReq::default(); let mut req = VirtIOBlkReadReq::default();
req.req_type = VIRTIO_BLK_T_IN; req.req_type = VIRTIO_BLK_T_IN;
req.reserved = 0; req.reserved = 0;
@ -155,9 +150,6 @@ impl Driver for VirtIOBlkDriver {
fn write_block(&self, block_id: usize, buf: &[u8]) -> bool { fn write_block(&self, block_id: usize, buf: &[u8]) -> bool {
let mut driver = self.0.lock(); let mut driver = self.0.lock();
// ensure header page is mapped
active_table().map_if_not_exists(driver.header as usize, driver.header as usize);
let mut req: VirtIOBlkWriteReq = unsafe { zeroed() }; let mut req: VirtIOBlkWriteReq = unsafe { zeroed() };
req.req_type = VIRTIO_BLK_T_OUT; req.req_type = VIRTIO_BLK_T_OUT;
req.reserved = 0; req.reserved = 0;
@ -184,8 +176,9 @@ impl Driver for VirtIOBlkDriver {
pub fn virtio_blk_init(node: &Node) { pub fn virtio_blk_init(node: &Node) {
let reg = node.prop_raw("reg").unwrap(); let reg = node.prop_raw("reg").unwrap();
let from = reg.as_slice().read_be_u64(0).unwrap(); let paddr = reg.as_slice().read_be_u64(0).unwrap();
let header = unsafe { &mut *(from as *mut VirtIOHeader) }; let vaddr = phys_to_virt(paddr as usize);
let header = unsafe { &mut *(vaddr as *mut VirtIOHeader) };
header.status.write(VirtIODeviceStatus::DRIVER.bits()); header.status.write(VirtIODeviceStatus::DRIVER.bits());
@ -199,7 +192,7 @@ pub fn virtio_blk_init(node: &Node) {
header.write_driver_features(driver_features); header.write_driver_features(driver_features);
// read configuration space // read configuration space
let config = unsafe { &mut *((from + VIRTIO_CONFIG_SPACE_OFFSET) as *mut VirtIOBlkConfig) }; let config = unsafe { &mut *((vaddr + VIRTIO_CONFIG_SPACE_OFFSET) as *mut VirtIOBlkConfig) };
info!("Config: {:?}", config); info!("Config: {:?}", config);
info!( info!(
"Found a block device of size {}KB", "Found a block device of size {}KB",
@ -213,7 +206,7 @@ pub fn virtio_blk_init(node: &Node) {
let driver = VirtIOBlkDriver(Mutex::new(VirtIOBlk { let driver = VirtIOBlkDriver(Mutex::new(VirtIOBlk {
interrupt: node.prop_u32("interrupts").unwrap(), interrupt: node.prop_u32("interrupts").unwrap(),
interrupt_parent: node.prop_u32("interrupt-parent").unwrap(), interrupt_parent: node.prop_u32("interrupt-parent").unwrap(),
header: from as usize, header: vaddr as usize,
queue: VirtIOVirtqueue::new(header, 0, 16), queue: VirtIOVirtqueue::new(header, 0, 16),
capacity: config.capacity.read() as usize, capacity: config.capacity.read() as usize,
})); }));

@ -1,12 +1,11 @@
use crate::consts::KERNEL_OFFSET;
use crate::drivers::block::*; use crate::drivers::block::*;
use crate::drivers::net::*; use crate::drivers::net::*;
use crate::drivers::{Driver, DRIVERS, NET_DRIVERS}; use crate::drivers::{Driver, DRIVERS, NET_DRIVERS};
use crate::memory::active_table; use crate::memory::phys_to_virt;
use alloc::collections::BTreeMap; use alloc::collections::BTreeMap;
use alloc::sync::Arc; use alloc::sync::Arc;
use pci::*; use pci::*;
use rcore_memory::{paging::PageTable, PAGE_SIZE}; use rcore_memory::PAGE_SIZE;
use spin::Mutex; use spin::Mutex;
const PCI_COMMAND: u16 = 0x04; const PCI_COMMAND: u16 = 0x04;
@ -141,12 +140,7 @@ pub fn init_driver(dev: &PCIDevice) {
// 82574L Gigabit Network Connection // 82574L Gigabit Network Connection
if let Some(BAR::Memory(addr, len, _, _)) = dev.bars[0] { if let Some(BAR::Memory(addr, len, _, _)) = dev.bars[0] {
let irq = unsafe { enable(dev.loc) }; let irq = unsafe { enable(dev.loc) };
let vaddr = KERNEL_OFFSET + addr as usize; let vaddr = phys_to_virt(addr as usize);
let mut current_addr = addr as usize;
while current_addr < addr as usize + len as usize {
active_table().map_if_not_exists(KERNEL_OFFSET + current_addr, current_addr);
current_addr = current_addr + PAGE_SIZE;
}
let index = NET_DRIVERS.read().len(); let index = NET_DRIVERS.read().len();
e1000::init(name, irq, vaddr, len as usize, index); e1000::init(name, irq, vaddr, len as usize, index);
} }
@ -155,12 +149,7 @@ pub fn init_driver(dev: &PCIDevice) {
// 82599ES 10-Gigabit SFI/SFP+ Network Connection // 82599ES 10-Gigabit SFI/SFP+ Network Connection
if let Some(BAR::Memory(addr, len, _, _)) = dev.bars[0] { if let Some(BAR::Memory(addr, len, _, _)) = dev.bars[0] {
let irq = unsafe { enable(dev.loc) }; let irq = unsafe { enable(dev.loc) };
let vaddr = KERNEL_OFFSET + addr as usize; let vaddr = phys_to_virt(addr as usize);
let mut current_addr = addr as usize;
while current_addr < addr as usize + len as usize {
active_table().map_if_not_exists(KERNEL_OFFSET + current_addr, current_addr);
current_addr = current_addr + PAGE_SIZE;
}
let index = NET_DRIVERS.read().len(); let index = NET_DRIVERS.read().len();
PCI_DRIVERS.lock().insert( PCI_DRIVERS.lock().insert(
dev.loc, dev.loc,
@ -173,8 +162,7 @@ pub fn init_driver(dev: &PCIDevice) {
if let Some(BAR::Memory(addr, len, _, _)) = dev.bars[5] { if let Some(BAR::Memory(addr, len, _, _)) = dev.bars[5] {
let irq = unsafe { enable(dev.loc) }; let irq = unsafe { enable(dev.loc) };
assert!(len as usize <= PAGE_SIZE); assert!(len as usize <= PAGE_SIZE);
let vaddr = KERNEL_OFFSET + addr as usize; let vaddr = phys_to_virt(addr as usize);
active_table().map(vaddr, addr as usize);
PCI_DRIVERS PCI_DRIVERS
.lock() .lock()
.insert(dev.loc, ahci::init(irq, vaddr, len as usize)); .insert(dev.loc, ahci::init(irq, vaddr, len as usize));

@ -8,18 +8,16 @@ use bitflags::*;
use device_tree::util::SliceRead; use device_tree::util::SliceRead;
use device_tree::Node; use device_tree::Node;
use log::*; use log::*;
use rcore_memory::paging::PageTable;
use rcore_memory::PAGE_SIZE; use rcore_memory::PAGE_SIZE;
use volatile::{ReadOnly, Volatile, WriteOnly}; use volatile::{ReadOnly, Volatile, WriteOnly};
use crate::arch::consts::{KERNEL_OFFSET, MEMORY_OFFSET};
use crate::memory::active_table;
use crate::HEAP_ALLOCATOR; use crate::HEAP_ALLOCATOR;
use super::super::block::virtio_blk; use super::super::block::virtio_blk;
use super::super::gpu::virtio_gpu; use super::super::gpu::virtio_gpu;
use super::super::input::virtio_input; use super::super::input::virtio_input;
use super::super::net::virtio_net; use super::super::net::virtio_net;
use crate::memory::{phys_to_virt, virt_to_phys};
// virtio 4.2.4 Legacy interface // virtio 4.2.4 Legacy interface
#[repr(C)] #[repr(C)]
@ -85,10 +83,10 @@ impl VirtIOVirtqueue {
assert_eq!(header.queue_pfn.read(), 0); // not in use assert_eq!(header.queue_pfn.read(), 0); // not in use
let queue_num_max = header.queue_num_max.read(); let queue_num_max = header.queue_num_max.read();
assert!(queue_num_max >= queue_num as u32); // queue available assert!(queue_num_max >= queue_num as u32); // queue available
assert!(queue_num & (queue_num - 1) == 0); // power of two assert_eq!(queue_num & (queue_num - 1), 0); // power of two
let align = PAGE_SIZE; let align = PAGE_SIZE;
let size = virtqueue_size(queue_num, align); let size = virtqueue_size(queue_num, align);
assert!(size % align == 0); assert_eq!(size % align, 0);
// alloc continuous pages // alloc continuous pages
let address = let address =
unsafe { HEAP_ALLOCATOR.alloc_zeroed(Layout::from_size_align(size, align).unwrap()) } unsafe { HEAP_ALLOCATOR.alloc_zeroed(Layout::from_size_align(size, align).unwrap()) }
@ -96,9 +94,7 @@ impl VirtIOVirtqueue {
header.queue_num.write(queue_num as u32); header.queue_num.write(queue_num as u32);
header.queue_align.write(align as u32); header.queue_align.write(align as u32);
header header.queue_pfn.write((virt_to_phys(address) as u32) >> 12);
.queue_pfn
.write(((address - KERNEL_OFFSET + MEMORY_OFFSET) as u32) >> 12);
// link desc together // link desc together
let desc = let desc =
@ -146,7 +142,7 @@ impl VirtIOVirtqueue {
desc[cur].flags.write(VirtIOVirtqueueFlag::NEXT.bits()); desc[cur].flags.write(VirtIOVirtqueueFlag::NEXT.bits());
desc[cur] desc[cur]
.addr .addr
.write(output[i].as_ptr() as u64 - KERNEL_OFFSET as u64 + MEMORY_OFFSET as u64); .write(virt_to_phys(output[i].as_ptr() as usize) as u64);
desc[cur].len.write(output[i].len() as u32); desc[cur].len.write(output[i].len() as u32);
prev = cur; prev = cur;
cur = desc[cur].next.read() as usize; cur = desc[cur].next.read() as usize;
@ -157,7 +153,7 @@ impl VirtIOVirtqueue {
.write((VirtIOVirtqueueFlag::NEXT | VirtIOVirtqueueFlag::WRITE).bits()); .write((VirtIOVirtqueueFlag::NEXT | VirtIOVirtqueueFlag::WRITE).bits());
desc[cur] desc[cur]
.addr .addr
.write(input[i].as_ptr() as u64 - KERNEL_OFFSET as u64 + MEMORY_OFFSET as u64); .write(virt_to_phys(input[i].as_ptr() as usize) as u64);
desc[cur].len.write(input[i].len() as u32); desc[cur].len.write(input[i].len() as u32);
prev = cur; prev = cur;
cur = desc[cur].next.read() as usize; cur = desc[cur].next.read() as usize;
@ -222,7 +218,7 @@ impl VirtIOVirtqueue {
let mut output = Vec::new(); let mut output = Vec::new();
loop { loop {
let flags = VirtIOVirtqueueFlag::from_bits_truncate(desc[cur].flags.read()); let flags = VirtIOVirtqueueFlag::from_bits_truncate(desc[cur].flags.read());
let addr = desc[cur].addr.read() as u64 - MEMORY_OFFSET as u64 + KERNEL_OFFSET as u64; let addr = phys_to_virt(desc[cur].addr.read() as usize);
let buffer = let buffer =
unsafe { slice::from_raw_parts(addr as *const u8, desc[cur].len.read() as usize) }; unsafe { slice::from_raw_parts(addr as *const u8, desc[cur].len.read() as usize) };
if flags.contains(VirtIOVirtqueueFlag::WRITE) { if flags.contains(VirtIOVirtqueueFlag::WRITE) {
@ -265,7 +261,7 @@ impl VirtIOVirtqueue {
} }
} }
pub const VIRTIO_CONFIG_SPACE_OFFSET: u64 = 0x100; pub const VIRTIO_CONFIG_SPACE_OFFSET: usize = 0x100;
impl VirtIOHeader { impl VirtIOHeader {
pub fn read_device_features(&mut self) -> u64 { pub fn read_device_features(&mut self) -> u64 {
@ -354,12 +350,13 @@ pub fn virtqueue_used_elem_offset(num: usize, align: usize) -> usize {
pub fn virtio_probe(node: &Node) { pub fn virtio_probe(node: &Node) {
if let Some(reg) = node.prop_raw("reg") { if let Some(reg) = node.prop_raw("reg") {
let from = reg.as_slice().read_be_u64(0).unwrap(); let paddr = reg.as_slice().read_be_u64(0).unwrap();
let vaddr = phys_to_virt(paddr as usize);
debug!("walk dt {:x} {:x}", paddr, vaddr);
let size = reg.as_slice().read_be_u64(8).unwrap(); let size = reg.as_slice().read_be_u64(8).unwrap();
// assuming one page // assuming one page
assert_eq!(size as usize, PAGE_SIZE); assert_eq!(size as usize, PAGE_SIZE);
active_table().map(from as usize, from as usize); let header = unsafe { &mut *(vaddr as *mut VirtIOHeader) };
let header = unsafe { &mut *(from as *mut VirtIOHeader) };
let magic = header.magic.read(); let magic = header.magic.read();
let version = header.version.read(); let version = header.version.read();
let device_id = header.device_id.read(); let device_id = header.device_id.read();
@ -374,23 +371,13 @@ pub fn virtio_probe(node: &Node) {
// virtio 3.1.1 Device Initialization // virtio 3.1.1 Device Initialization
header.status.write(0); header.status.write(0);
header.status.write(VirtIODeviceStatus::ACKNOWLEDGE.bits()); header.status.write(VirtIODeviceStatus::ACKNOWLEDGE.bits());
if device_id == 1 { match device_id {
// net device 1 => virtio_net::virtio_net_init(node),
virtio_net::virtio_net_init(node); 2 => virtio_blk::virtio_blk_init(node),
} else if device_id == 2 { 16 => virtio_gpu::virtio_gpu_init(node),
// blk device 18 => virtio_input::virtio_input_init(node),
virtio_blk::virtio_blk_init(node); _ => warn!("Unrecognized virtio device {}", device_id),
} else if device_id == 16 {
// gpu device
virtio_gpu::virtio_gpu_init(node);
} else if device_id == 18 {
// input device
virtio_input::virtio_input_init(node);
} else {
println!("Unrecognized virtio device {}", device_id);
} }
} else {
active_table().unmap(from as usize);
} }
} }
} }

@ -184,6 +184,12 @@ impl<F: Font> Console<F> {
self.buf.delete(self.row, self.col); self.buf.delete(self.row, self.col);
} }
} }
b'\t' => {
self.write_byte(b' ');
while self.col % 8 != 0 {
self.write_byte(b' ');
}
}
b'\n' => self.new_line(), b'\n' => self.new_line(),
b'\r' => self.col = 0, b'\r' => self.col = 0,
b'\x1b' => self.parser.start_parse(), b'\x1b' => self.parser.start_parse(),

@ -4,6 +4,7 @@ use core::slice;
use device_tree::{DeviceTree, Node}; use device_tree::{DeviceTree, Node};
use super::bus::virtio_mmio::virtio_probe; use super::bus::virtio_mmio::virtio_probe;
use super::net::router::router_init;
use super::CMDLINE; use super::CMDLINE;
const DEVICE_TREE_MAGIC: u32 = 0xd00dfeed; const DEVICE_TREE_MAGIC: u32 = 0xd00dfeed;
@ -14,6 +15,9 @@ fn walk_dt_node(dt: &Node) {
if compatible == "virtio,mmio" { if compatible == "virtio,mmio" {
virtio_probe(dt); virtio_probe(dt);
} }
if compatible == "rcore,router" {
router_init();
}
// TODO: initial other devices (16650, etc.) // TODO: initial other devices (16650, etc.)
} }
if let Ok(bootargs) = dt.prop_str("bootargs") { if let Ok(bootargs) = dt.prop_str("bootargs") {

@ -7,19 +7,18 @@ use bitflags::*;
use device_tree::util::SliceRead; use device_tree::util::SliceRead;
use device_tree::Node; use device_tree::Node;
use log::*; use log::*;
use rcore_memory::paging::PageTable;
use rcore_memory::PAGE_SIZE; use rcore_memory::PAGE_SIZE;
use volatile::{ReadOnly, Volatile, WriteOnly}; use volatile::{ReadOnly, Volatile, WriteOnly};
use crate::arch::consts::{KERNEL_OFFSET, MEMORY_OFFSET};
use crate::arch::cpu; use crate::arch::cpu;
use crate::memory::active_table; use crate::memory::virt_to_phys;
use crate::sync::SpinNoIrqLock as Mutex; use crate::sync::SpinNoIrqLock as Mutex;
use crate::HEAP_ALLOCATOR; use crate::HEAP_ALLOCATOR;
use super::super::bus::virtio_mmio::*; use super::super::bus::virtio_mmio::*;
use super::super::{DeviceType, Driver, DRIVERS}; use super::super::{DeviceType, Driver, DRIVERS};
use super::test::mandelbrot; use super::test::mandelbrot;
use crate::memory::phys_to_virt;
const VIRTIO_GPU_EVENT_DISPLAY: u32 = 1 << 0; const VIRTIO_GPU_EVENT_DISPLAY: u32 = 1 << 0;
@ -198,11 +197,6 @@ impl Driver for VirtIOGpuDriver {
let mut driver = self.0.lock(); let mut driver = self.0.lock();
// ensure header page is mapped
// TODO: this should be mapped in all page table by default
let header_addr = &mut driver.header as *mut _ as usize;
active_table().map_if_not_exists(header_addr, header_addr);
let interrupt = driver.header.interrupt_status.read(); let interrupt = driver.header.interrupt_status.read();
if interrupt != 0 { if interrupt != 0 {
driver.header.interrupt_ack.write(interrupt); driver.header.interrupt_ack.write(interrupt);
@ -285,7 +279,7 @@ fn setup_framebuffer(driver: &mut VirtIOGpu) {
header: VirtIOGpuCtrlHdr::with_type(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING), header: VirtIOGpuCtrlHdr::with_type(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING),
resource_id: VIRTIO_GPU_RESOURCE_ID, resource_id: VIRTIO_GPU_RESOURCE_ID,
nr_entries: 1, nr_entries: 1,
addr: (frame_buffer - KERNEL_OFFSET + MEMORY_OFFSET) as u64, addr: virt_to_phys(frame_buffer) as u64,
length: size, length: size,
padding: 0, padding: 0,
}; };
@ -350,8 +344,9 @@ fn flush_frame_buffer_to_screen(driver: &mut VirtIOGpu) {
pub fn virtio_gpu_init(node: &Node) { pub fn virtio_gpu_init(node: &Node) {
let reg = node.prop_raw("reg").unwrap(); let reg = node.prop_raw("reg").unwrap();
let from = reg.as_slice().read_be_u64(0).unwrap(); let paddr = reg.as_slice().read_be_u64(0).unwrap();
let header = unsafe { &mut *(from as *mut VirtIOHeader) }; let vaddr = phys_to_virt(paddr as usize);
let header = unsafe { &mut *(vaddr as *mut VirtIOHeader) };
header.status.write(VirtIODeviceStatus::DRIVER.bits()); header.status.write(VirtIODeviceStatus::DRIVER.bits());
@ -365,7 +360,7 @@ pub fn virtio_gpu_init(node: &Node) {
header.write_driver_features(driver_features); header.write_driver_features(driver_features);
// read configuration space // read configuration space
let config = unsafe { &mut *((from + VIRTIO_CONFIG_SPACE_OFFSET) as *mut VirtIOGpuConfig) }; let config = unsafe { &mut *((vaddr + VIRTIO_CONFIG_SPACE_OFFSET) as *mut VirtIOGpuConfig) };
info!("Config: {:?}", config); info!("Config: {:?}", config);
// virtio 4.2.4 Legacy interface // virtio 4.2.4 Legacy interface

@ -16,11 +16,11 @@ use rcore_memory::PAGE_SIZE;
use volatile::Volatile; use volatile::Volatile;
use crate::arch::cpu; use crate::arch::cpu;
use crate::memory::active_table;
use crate::sync::SpinNoIrqLock as Mutex; use crate::sync::SpinNoIrqLock as Mutex;
use super::super::bus::virtio_mmio::*; use super::super::bus::virtio_mmio::*;
use super::super::{DeviceType, Driver, DRIVERS}; use super::super::{DeviceType, Driver, DRIVERS};
use crate::memory::phys_to_virt;
struct VirtIOInput { struct VirtIOInput {
interrupt_parent: u32, interrupt_parent: u32,
@ -125,11 +125,6 @@ impl VirtIOInput {
return false; return false;
} }
// ensure header page is mapped
// TODO: this should be mapped in all page table by default
let header_addr = self.header as *mut _ as usize;
active_table().map_if_not_exists(header_addr, header_addr);
let interrupt = self.header.interrupt_status.read(); let interrupt = self.header.interrupt_status.read();
if interrupt != 0 { if interrupt != 0 {
self.header.interrupt_ack.write(interrupt); self.header.interrupt_ack.write(interrupt);
@ -173,8 +168,9 @@ impl Driver for VirtIOInputDriver {
pub fn virtio_input_init(node: &Node) { pub fn virtio_input_init(node: &Node) {
let reg = node.prop_raw("reg").unwrap(); let reg = node.prop_raw("reg").unwrap();
let from = reg.as_slice().read_be_u64(0).unwrap(); let paddr = reg.as_slice().read_be_u64(0).unwrap();
let header = unsafe { &mut *(from as *mut VirtIOHeader) }; let vaddr = phys_to_virt(paddr as usize);
let header = unsafe { &mut *(vaddr as *mut VirtIOHeader) };
header.status.write(VirtIODeviceStatus::DRIVER.bits()); header.status.write(VirtIODeviceStatus::DRIVER.bits());
@ -188,7 +184,7 @@ pub fn virtio_input_init(node: &Node) {
header.write_driver_features(driver_features); header.write_driver_features(driver_features);
// read configuration space // read configuration space
let config = unsafe { &mut *((from + VIRTIO_CONFIG_SPACE_OFFSET) as *mut VirtIOInputConfig) }; let config = unsafe { &mut *((vaddr + VIRTIO_CONFIG_SPACE_OFFSET) as *mut VirtIOInputConfig) };
info!("Config: {:?}", config); info!("Config: {:?}", config);
// virtio 4.2.4 Legacy interface // virtio 4.2.4 Legacy interface

@ -1,3 +1,4 @@
pub mod e1000; pub mod e1000;
pub mod ixgbe; pub mod ixgbe;
pub mod router;
pub mod virtio_net; pub mod virtio_net;

@ -0,0 +1,228 @@
//! rCore Router Driver
use alloc::collections::BTreeMap;
use alloc::string::String;
use alloc::sync::Arc;
use alloc::vec::Vec;
use smoltcp::iface::*;
use smoltcp::phy::{self, DeviceCapabilities};
use smoltcp::time::Instant;
use smoltcp::wire::*;
use smoltcp::Result;
use rcore_memory::PAGE_SIZE;
use crate::drivers::provider::Provider;
use crate::net::SOCKETS;
use crate::sync::SpinNoIrqLock as Mutex;
use super::super::{DeviceType, Driver, DRIVERS, NET_DRIVERS, SOCKET_ACTIVITY};
use crate::memory::phys_to_virt;
const AXI_STREAM_FIFO_ISR: *mut u32 = phys_to_virt(0x1820_0000) as *mut u32;
const AXI_STREAM_FIFO_IER: *mut u32 = phys_to_virt(0x1820_0004) as *mut u32;
const AXI_STREAM_FIFO_TDFR: *mut u32 = phys_to_virt(0x1820_0008) as *mut u32;
const AXI_STREAM_FIFO_TDFD: *mut u32 = phys_to_virt(0x1820_0010) as *mut u32;
const AXI_STREAM_FIFO_TLR: *mut u32 = phys_to_virt(0x1820_0014) as *mut u32;
const AXI_STREAM_FIFO_RDFR: *mut u32 = phys_to_virt(0x1820_0018) as *mut u32;
const AXI_STREAM_FIFO_RDFO: *mut u32 = phys_to_virt(0x1820_001C) as *mut u32;
const AXI_STREAM_FIFO_RDFD: *mut u32 = phys_to_virt(0x1820_0020) as *mut u32;
const AXI_STREAM_FIFO_RLR: *mut u32 = phys_to_virt(0x1820_0024) as *mut u32;
const AXI_STREAM_FIFO_TDR: *mut u32 = phys_to_virt(0x1820_002C) as *mut u32;
const AXI_STREAM_FIFO_RDR: *mut u32 = phys_to_virt(0x1820_0030) as *mut u32;
pub struct Router {
buffer: Vec<Vec<u8>>,
}
impl Router {
fn transmit_available(&self) -> bool {
true
}
fn receive_available(&self) -> bool {
self.buffer.len() > 0
}
}
#[derive(Clone)]
pub struct RouterDriver(Arc<Mutex<Router>>);
pub struct RouterRxToken(RouterDriver);
pub struct RouterTxToken(RouterDriver);
impl<'a> phy::Device<'a> for RouterDriver {
type RxToken = RouterRxToken;
type TxToken = RouterTxToken;
fn receive(&'a mut self) -> Option<(Self::RxToken, Self::TxToken)> {
let driver = self.0.lock();
if driver.transmit_available() && driver.receive_available() {
// potential racing
Some((RouterRxToken(self.clone()), RouterTxToken(self.clone())))
} else {
None
}
}
fn transmit(&'a mut self) -> Option<Self::TxToken> {
let driver = self.0.lock();
if driver.transmit_available() {
Some(RouterTxToken(self.clone()))
} else {
None
}
}
fn capabilities(&self) -> DeviceCapabilities {
let mut caps = DeviceCapabilities::default();
caps.max_transmission_unit = 1536;
caps.max_burst_size = Some(1);
caps
}
}
impl phy::RxToken for RouterRxToken {
fn consume<R, F>(self, _timestamp: Instant, f: F) -> Result<R>
where
F: FnOnce(&[u8]) -> Result<R>,
{
let mut router = (self.0).0.lock();
let buffer = router.buffer.pop().unwrap();
f(&buffer)
}
}
impl phy::TxToken for RouterTxToken {
fn consume<R, F>(self, _timestamp: Instant, len: usize, f: F) -> Result<R>
where
F: FnOnce(&mut [u8]) -> Result<R>,
{
let mut buffer = vec![0; len];
let res = f(&mut buffer);
debug!("out buf {}", len);
unsafe {
AXI_STREAM_FIFO_TDR.write_volatile(2);
for byte in buffer {
AXI_STREAM_FIFO_TDFD.write_volatile(byte as u32);
}
AXI_STREAM_FIFO_TLR.write((len * 4) as u32);
}
res
}
}
pub struct RouterInterface {
iface: Mutex<EthernetInterface<'static, 'static, 'static, RouterDriver>>,
driver: RouterDriver,
}
impl Driver for RouterInterface {
fn try_handle_interrupt(&self, _irq: Option<u32>) -> bool {
let mut driver = self.driver.0.lock();
let isr = unsafe { AXI_STREAM_FIFO_ISR.read_volatile() };
if isr > 0 {
debug!("handle router interrupt {:b}", isr);
unsafe {
AXI_STREAM_FIFO_ISR.write(isr);
let rdfo = AXI_STREAM_FIFO_RDFO.read_volatile();
if rdfo > 0 {
let mut buffer = Vec::new();
let rlr = AXI_STREAM_FIFO_RLR.read_volatile();
let rdr = AXI_STREAM_FIFO_RDR.read_volatile();
for i in 0..rdfo {
buffer.push(AXI_STREAM_FIFO_RDFD.read_volatile() as u8);
}
debug!("got packet of length {}", rdfo);
driver.buffer.push(buffer);
}
drop(driver);
let timestamp = Instant::from_millis(crate::trap::uptime_msec() as i64);
let mut sockets = SOCKETS.lock();
match self.iface.lock().poll(&mut sockets, timestamp) {
Ok(_) => {
SOCKET_ACTIVITY.notify_all();
}
Err(err) => {
debug!("poll got err {}", err);
}
}
}
return true;
}
return false;
}
fn device_type(&self) -> DeviceType {
DeviceType::Net
}
fn get_id(&self) -> String {
format!("router")
}
fn get_mac(&self) -> EthernetAddress {
unimplemented!()
}
fn get_ifname(&self) -> String {
format!("router")
}
fn ipv4_address(&self) -> Option<Ipv4Address> {
unimplemented!()
}
fn poll(&self) {
unimplemented!()
}
}
pub fn router_init() -> Arc<RouterInterface> {
unsafe {
// reset tx fifo
AXI_STREAM_FIFO_TDFR.write_volatile(0xA5);
// reset rx fifo
AXI_STREAM_FIFO_RDFR.write_volatile(0xA5);
}
let ethernet_addr = EthernetAddress::from_bytes(&[2, 2, 3, 3, 0, 0]);
let net_driver = RouterDriver(Arc::new(Mutex::new(Router { buffer: Vec::new() })));
let ip_addrs = [
IpCidr::new(IpAddress::v4(10, 0, 0, 1), 24),
IpCidr::new(IpAddress::v4(10, 0, 1, 1), 24),
];
let neighbor_cache = NeighborCache::new(BTreeMap::new());
let routes = Routes::new(BTreeMap::new());
let iface = EthernetInterfaceBuilder::new(net_driver.clone())
.ethernet_addr(ethernet_addr)
.ip_addrs(ip_addrs)
.neighbor_cache(neighbor_cache)
.routes(routes)
.finalize();
info!("router interface up");
let router_iface = RouterInterface {
iface: Mutex::new(iface),
driver: net_driver,
};
let driver = Arc::new(router_iface);
DRIVERS.write().push(driver.clone());
NET_DRIVERS.write().push(driver.clone());
// Enable Receive Complete Interrupt
unsafe {
AXI_STREAM_FIFO_IER.write_volatile(1 << 26);
}
driver
}

@ -9,7 +9,6 @@ use bitflags::*;
use device_tree::util::SliceRead; use device_tree::util::SliceRead;
use device_tree::Node; use device_tree::Node;
use log::*; use log::*;
use rcore_memory::paging::PageTable;
use rcore_memory::PAGE_SIZE; use rcore_memory::PAGE_SIZE;
use smoltcp::phy::{self, DeviceCapabilities}; use smoltcp::phy::{self, DeviceCapabilities};
use smoltcp::time::Instant; use smoltcp::time::Instant;
@ -17,12 +16,12 @@ use smoltcp::wire::{EthernetAddress, Ipv4Address};
use smoltcp::Result; use smoltcp::Result;
use volatile::{ReadOnly, Volatile}; use volatile::{ReadOnly, Volatile};
use crate::memory::active_table;
use crate::sync::SpinNoIrqLock as Mutex; use crate::sync::SpinNoIrqLock as Mutex;
use crate::HEAP_ALLOCATOR; use crate::HEAP_ALLOCATOR;
use super::super::bus::virtio_mmio::*; use super::super::bus::virtio_mmio::*;
use super::super::{DeviceType, Driver, DRIVERS, NET_DRIVERS}; use super::super::{DeviceType, Driver, DRIVERS, NET_DRIVERS};
use crate::memory::phys_to_virt;
pub struct VirtIONet { pub struct VirtIONet {
interrupt_parent: u32, interrupt_parent: u32,
@ -43,9 +42,6 @@ impl Driver for VirtIONetDriver {
fn try_handle_interrupt(&self, _irq: Option<u32>) -> bool { fn try_handle_interrupt(&self, _irq: Option<u32>) -> bool {
let driver = self.0.lock(); let driver = self.0.lock();
// ensure header page is mapped
active_table().map_if_not_exists(driver.header as usize, driver.header as usize);
let header = unsafe { &mut *(driver.header as *mut VirtIOHeader) }; let header = unsafe { &mut *(driver.header as *mut VirtIOHeader) };
let interrupt = header.interrupt_status.read(); let interrupt = header.interrupt_status.read();
if interrupt != 0 { if interrupt != 0 {
@ -138,10 +134,6 @@ impl phy::RxToken for VirtIONetRxToken {
{ {
let (input, output, _, user_data) = { let (input, output, _, user_data) = {
let mut driver = (self.0).0.lock(); let mut driver = (self.0).0.lock();
// ensure header page is mapped
active_table().map_if_not_exists(driver.header as usize, driver.header as usize);
driver.queues[VIRTIO_QUEUE_RECEIVE].get().unwrap() driver.queues[VIRTIO_QUEUE_RECEIVE].get().unwrap()
}; };
let result = f(&input[0][size_of::<VirtIONetHeader>()..]); let result = f(&input[0][size_of::<VirtIONetHeader>()..]);
@ -159,10 +151,6 @@ impl phy::TxToken for VirtIONetTxToken {
{ {
let output = { let output = {
let mut driver = (self.0).0.lock(); let mut driver = (self.0).0.lock();
// ensure header page is mapped
active_table().map_if_not_exists(driver.header as usize, driver.header as usize);
if let Some((_, output, _, _)) = driver.queues[VIRTIO_QUEUE_TRANSMIT].get() { if let Some((_, output, _, _)) = driver.queues[VIRTIO_QUEUE_TRANSMIT].get() {
unsafe { slice::from_raw_parts_mut(output[0].as_ptr() as *mut u8, output[0].len()) } unsafe { slice::from_raw_parts_mut(output[0].as_ptr() as *mut u8, output[0].len()) }
} else { } else {
@ -252,8 +240,9 @@ struct VirtIONetHeader {
pub fn virtio_net_init(node: &Node) { pub fn virtio_net_init(node: &Node) {
let reg = node.prop_raw("reg").unwrap(); let reg = node.prop_raw("reg").unwrap();
let from = reg.as_slice().read_be_u64(0).unwrap(); let paddr = reg.as_slice().read_be_u64(0).unwrap();
let header = unsafe { &mut *(from as *mut VirtIOHeader) }; let vaddr = phys_to_virt(paddr as usize);
let header = unsafe { &mut *(vaddr as *mut VirtIOHeader) };
header.status.write(VirtIODeviceStatus::DRIVER.bits()); header.status.write(VirtIODeviceStatus::DRIVER.bits());
@ -267,7 +256,8 @@ pub fn virtio_net_init(node: &Node) {
header.write_driver_features(driver_features); header.write_driver_features(driver_features);
// read configuration space // read configuration space
let config = unsafe { &mut *((from + VIRTIO_CONFIG_SPACE_OFFSET) as *mut VirtIONetworkConfig) }; let config =
unsafe { &mut *((vaddr + VIRTIO_CONFIG_SPACE_OFFSET) as *mut VirtIONetworkConfig) };
let mac = config.mac; let mac = config.mac;
let status = VirtIONetworkStatus::from_bits_truncate(config.status.read()); let status = VirtIONetworkStatus::from_bits_truncate(config.status.read());
debug!("Got MAC address {:?} and status {:?}", mac, status); debug!("Got MAC address {:?} and status {:?}", mac, status);
@ -280,7 +270,7 @@ pub fn virtio_net_init(node: &Node) {
let mut driver = VirtIONet { let mut driver = VirtIONet {
interrupt: node.prop_u32("interrupts").unwrap(), interrupt: node.prop_u32("interrupts").unwrap(),
interrupt_parent: node.prop_u32("interrupt-parent").unwrap(), interrupt_parent: node.prop_u32("interrupt-parent").unwrap(),
header: from as usize, header: vaddr as usize,
mac: EthernetAddress(mac), mac: EthernetAddress(mac),
queues: [ queues: [
VirtIOVirtqueue::new(header, VIRTIO_QUEUE_RECEIVE, queue_num), VirtIOVirtqueue::new(header, VIRTIO_QUEUE_RECEIVE, queue_num),

@ -1,25 +1,31 @@
use alloc::alloc::{alloc_zeroed, dealloc, Layout}; use alloc::alloc::{alloc_zeroed, dealloc, Layout};
pub use crate::arch::paging::PageTableImpl;
use crate::consts::PHYSICAL_MEMORY_OFFSET;
use crate::memory::{alloc_frame, dealloc_frame, phys_to_virt, virt_to_phys};
use isomorphic_drivers::provider; use isomorphic_drivers::provider;
use rcore_memory::paging::PageTable; use rcore_memory::paging::PageTable;
use rcore_memory::PAGE_SIZE; use rcore_memory::PAGE_SIZE;
use crate::memory::active_table;
pub struct Provider; pub struct Provider;
impl provider::Provider for Provider { impl provider::Provider for Provider {
const PAGE_SIZE: usize = PAGE_SIZE; const PAGE_SIZE: usize = PAGE_SIZE;
fn alloc_dma(size: usize) -> (usize, usize) { fn alloc_dma(size: usize) -> (usize, usize) {
let layout = Layout::from_size_align(size, PAGE_SIZE).unwrap(); // TODO: allocate continuous pages
let vaddr = unsafe { alloc_zeroed(layout) } as usize; let mut paddr = alloc_frame().unwrap();
let paddr = active_table().get_entry(vaddr).unwrap().target(); for i in 1..(size / PAGE_SIZE) {
let paddr_new = alloc_frame().unwrap();
assert_eq!(paddr - PAGE_SIZE, paddr_new);
paddr = paddr_new;
}
let vaddr = phys_to_virt(paddr);
(vaddr, paddr) (vaddr, paddr)
} }
fn dealloc_dma(vaddr: usize, size: usize) { fn dealloc_dma(vaddr: usize, size: usize) {
let layout = Layout::from_size_align(size, PAGE_SIZE).unwrap(); let paddr = virt_to_phys(vaddr);
unsafe { dealloc(vaddr as *mut u8, layout) } dealloc_frame(paddr);
} }
} }

@ -22,7 +22,7 @@ mod pseudo;
mod stdio; mod stdio;
mod vga; mod vga;
/// Hard link user programs // Hard link user programs
#[cfg(feature = "link_user")] #[cfg(feature = "link_user")]
global_asm!(concat!( global_asm!(concat!(
r#" r#"

@ -45,13 +45,25 @@ impl Pipe {
) )
} }
pub fn can_read(&self) -> bool { fn can_read(&self) -> bool {
if let PipeEnd::Read = self.direction { if let PipeEnd::Read = self.direction {
self.data.lock().buf.len() > 0 self.data.lock().buf.len() > 0 || self.is_broken()
} else { } else {
false false
} }
} }
fn can_write(&self) -> bool {
if let PipeEnd::Write = self.direction {
!self.is_broken()
} else {
false
}
}
fn is_broken(&self) -> bool {
Arc::strong_count(&self.data) < 2
}
} }
// TODO: better way to provide default impl? // TODO: better way to provide default impl?
@ -105,39 +117,11 @@ impl INode for Pipe {
} }
fn poll(&self) -> Result<PollStatus> { fn poll(&self) -> Result<PollStatus> {
let data = self.data.lock(); Ok(PollStatus {
match self.direction { read: self.can_read(),
PipeEnd::Read => { write: self.can_write(),
if data.buf.len() > 0 { error: false,
Ok(PollStatus { })
read: true,
write: false,
error: false,
})
} else {
Ok(PollStatus {
read: false,
write: false,
error: false,
})
}
}
PipeEnd::Write => {
if data.buf.len() > 0 {
Ok(PollStatus {
read: false,
write: true,
error: false,
})
} else {
Ok(PollStatus {
read: false,
write: false,
error: false,
})
}
}
}
} }
impl_inode!(); impl_inode!();
} }

@ -3,6 +3,7 @@ use core::fmt;
use lazy_static::lazy_static; use lazy_static::lazy_static;
use log::{self, Level, LevelFilter, Log, Metadata, Record}; use log::{self, Level, LevelFilter, Log, Metadata, Record};
use crate::processor;
use crate::sync::SpinNoIrqLock as Mutex; use crate::sync::SpinNoIrqLock as Mutex;
use crate::util::color::ConsoleColor; use crate::util::color::ConsoleColor;
@ -63,12 +64,17 @@ impl Log for SimpleLogger {
true true
} }
fn log(&self, record: &Record) { fn log(&self, record: &Record) {
static DISABLED_TARGET: &[&str] = &[]; if !self.enabled(record.metadata()) {
if self.enabled(record.metadata()) && !DISABLED_TARGET.contains(&record.target()) { return;
// let target = record.target(); }
// let begin = target.as_bytes().iter().rposition(|&c| c == b':').map(|i| i + 1).unwrap_or(0); if let Some(tid) = processor().tid_option() {
print_in_color(
format_args!("[{:>5}][{}] {}\n", record.level(), tid, record.args()),
ConsoleColor::from(record.level()),
);
} else {
print_in_color( print_in_color(
format_args!("[{:>5}] {}\n", record.level(), record.args()), format_args!("[{:>5}][-] {}\n", record.level(), record.args()),
ConsoleColor::from(record.level()), ConsoleColor::from(record.level()),
); );
} }

@ -14,20 +14,21 @@
use super::HEAP_ALLOCATOR; use super::HEAP_ALLOCATOR;
pub use crate::arch::paging::*; pub use crate::arch::paging::*;
use crate::consts::{KERNEL_OFFSET, MEMORY_OFFSET}; use crate::consts::{KERNEL_OFFSET, MEMORY_OFFSET, PHYSICAL_MEMORY_OFFSET};
use crate::process::current_thread; use crate::process::current_thread;
use crate::sync::SpinNoIrqLock; use crate::sync::{MutexGuard, SpinNoIrq, SpinNoIrqLock};
use alloc::boxed::Box; use alloc::boxed::Box;
use bitmap_allocator::BitAlloc; use bitmap_allocator::BitAlloc;
use buddy_system_allocator::Heap; use buddy_system_allocator::Heap;
use core::mem; use core::mem;
use core::mem::size_of;
use lazy_static::*; use lazy_static::*;
use log::*; use log::*;
pub use rcore_memory::memory_set::{handler::*, MemoryArea, MemoryAttr}; pub use rcore_memory::memory_set::{handler::*, MemoryArea, MemoryAttr};
use rcore_memory::paging::PageTable; use rcore_memory::paging::PageTable;
use rcore_memory::*; use rcore_memory::*;
pub type MemorySet = rcore_memory::memory_set::MemorySet<InactivePageTable0>; pub type MemorySet = rcore_memory::memory_set::MemorySet<PageTableImpl>;
// x86_64 support up to 64G memory // x86_64 support up to 64G memory
#[cfg(target_arch = "x86_64")] #[cfg(target_arch = "x86_64")]
@ -54,22 +55,14 @@ lazy_static! {
SpinNoIrqLock::new(FrameAlloc::default()); SpinNoIrqLock::new(FrameAlloc::default());
} }
/// The only way to get active page table /// Convert physical address to virtual address
/// pub const fn phys_to_virt(paddr: usize) -> usize {
/// ## CHANGE LOG PHYSICAL_MEMORY_OFFSET + paddr
/// }
/// In the past, this function returns a `MutexGuard` of a global
/// `Mutex<ActiveTable>` object, which means only one CPU core /// Convert virtual address to physical address
/// can access its active table at a time. pub const fn virt_to_phys(vaddr: usize) -> usize {
/// vaddr - PHYSICAL_MEMORY_OFFSET
/// But given that a page table is ** process local **, and being active
/// when and only when a thread of the process is running.
/// The ownership of this page table is in the `MemorySet` object.
/// So it's safe to access the active table inside `MemorySet`.
/// But the shared parts is readonly, e.g. all pages mapped in
/// `InactivePageTable::map_kernel()`.
pub fn active_table() -> ActivePageTable {
unsafe { ActivePageTable::new() }
} }
#[derive(Debug, Clone, Copy)] #[derive(Debug, Clone, Copy)]
@ -153,13 +146,9 @@ pub fn init_heap() {
pub fn enlarge_heap(heap: &mut Heap) { pub fn enlarge_heap(heap: &mut Heap) {
info!("Enlarging heap to avoid oom"); info!("Enlarging heap to avoid oom");
let mut page_table = active_table();
let mut addrs = [(0, 0); 32]; let mut addrs = [(0, 0); 32];
let mut addr_len = 0; let mut addr_len = 0;
#[cfg(target_arch = "x86_64")] let va_offset = PHYSICAL_MEMORY_OFFSET;
let va_offset = KERNEL_OFFSET + 0xe0000000;
#[cfg(not(target_arch = "x86_64"))]
let va_offset = KERNEL_OFFSET + 0x00e00000;
for i in 0..16384 { for i in 0..16384 {
let page = alloc_frame().unwrap(); let page = alloc_frame().unwrap();
let va = va_offset + page; let va = va_offset + page;
@ -175,12 +164,56 @@ pub fn enlarge_heap(heap: &mut Heap) {
addr_len += 1; addr_len += 1;
} }
for (addr, len) in addrs[..addr_len].into_iter() { for (addr, len) in addrs[..addr_len].into_iter() {
for va in (*addr..(*addr + *len)).step_by(PAGE_SIZE) {
page_table.map(va, va - va_offset).update();
}
info!("Adding {:#X} {:#X} to heap", addr, len); info!("Adding {:#X} {:#X} to heap", addr, len);
unsafe { unsafe {
heap.init(*addr, *len); heap.init(*addr, *len);
} }
} }
} }
pub fn access_ok(from: usize, len: usize) -> bool {
from < PHYSICAL_MEMORY_OFFSET && (from + len) < PHYSICAL_MEMORY_OFFSET
}
#[naked]
pub unsafe extern "C" fn read_user_fixup() -> usize {
return 1;
}
#[no_mangle]
pub fn copy_from_user_u8(addr: *const u8) -> Option<u8> {
#[naked]
#[inline(never)]
#[link_section = ".text.copy_user"]
unsafe extern "C" fn read_user_u8(dst: *mut u8, src: *const u8) -> usize {
dst.copy_from_nonoverlapping(src, 1);
0
}
if !access_ok(addr as usize, size_of::<u8>()) {
return None;
}
let mut dst: u8 = 0;
match unsafe { read_user_u8((&mut dst) as *mut u8, addr) } {
0 => Some(dst),
_ => None,
}
}
#[no_mangle]
pub fn copy_from_user_usize(addr: *const usize) -> Option<usize> {
#[naked]
#[inline(never)]
#[link_section = ".text.copy_user"]
unsafe extern "C" fn read_user_usize(dst: *mut usize, src: *const usize) -> usize {
dst.copy_from_nonoverlapping(src, 1);
0
}
if !access_ok(addr as usize, size_of::<usize>()) {
return None;
}
let mut dst: usize = 0;
match unsafe { read_user_usize((&mut dst) as *mut usize, addr) } {
0 => Some(dst),
_ => None,
}
}

@ -20,6 +20,7 @@ use crate::memory::{
use crate::sync::{Condvar, SpinNoIrqLock as Mutex}; use crate::sync::{Condvar, SpinNoIrqLock as Mutex};
use super::abi::{self, ProcInitInfo}; use super::abi::{self, ProcInitInfo};
use crate::processor;
use core::mem::uninitialized; use core::mem::uninitialized;
use rcore_fs::vfs::INode; use rcore_fs::vfs::INode;
@ -66,7 +67,7 @@ pub struct Process {
// relationship // relationship
pub pid: Pid, // i.e. tgid, usually the tid of first thread pub pid: Pid, // i.e. tgid, usually the tid of first thread
pub parent: Option<Arc<Mutex<Process>>>, pub parent: Weak<Mutex<Process>>,
pub children: Vec<Weak<Mutex<Process>>>, pub children: Vec<Weak<Mutex<Process>>>,
pub threads: Vec<Tid>, // threads in the same process pub threads: Vec<Tid>, // threads in the same process
@ -75,8 +76,8 @@ pub struct Process {
pub child_exit_code: BTreeMap<usize, usize>, // child process store its exit code here pub child_exit_code: BTreeMap<usize, usize>, // child process store its exit code here
} }
/// Records the mapping between pid and Process struct.
lazy_static! { lazy_static! {
/// Records the mapping between pid and Process struct.
pub static ref PROCESSES: RwLock<BTreeMap<usize, Weak<Mutex<Process>>>> = pub static ref PROCESSES: RwLock<BTreeMap<usize, Weak<Mutex<Process>>>> =
RwLock::new(BTreeMap::new()); RwLock::new(BTreeMap::new());
} }
@ -125,7 +126,7 @@ impl Thread {
exec_path: String::new(), exec_path: String::new(),
futexes: BTreeMap::default(), futexes: BTreeMap::default(),
pid: Pid(0), pid: Pid(0),
parent: None, parent: Weak::new(),
children: Vec::new(), children: Vec::new(),
threads: Vec::new(), threads: Vec::new(),
child_exit: Arc::new(Condvar::new()), child_exit: Arc::new(Condvar::new()),
@ -306,7 +307,7 @@ impl Thread {
exec_path: String::from(exec_path), exec_path: String::from(exec_path),
futexes: BTreeMap::default(), futexes: BTreeMap::default(),
pid: Pid(0), pid: Pid(0),
parent: None, parent: Weak::new(),
children: Vec::new(), children: Vec::new(),
threads: Vec::new(), threads: Vec::new(),
child_exit: Arc::new(Condvar::new()), child_exit: Arc::new(Condvar::new()),
@ -332,7 +333,7 @@ impl Thread {
exec_path: proc.exec_path.clone(), exec_path: proc.exec_path.clone(),
futexes: BTreeMap::default(), futexes: BTreeMap::default(),
pid: Pid(0), pid: Pid(0),
parent: Some(self.proc.clone()), parent: Arc::downgrade(&self.proc),
children: Vec::new(), children: Vec::new(),
threads: Vec::new(), threads: Vec::new(),
child_exit: Arc::new(Condvar::new()), child_exit: Arc::new(Condvar::new()),
@ -406,6 +407,20 @@ impl Process {
} }
self.futexes.get(&uaddr).unwrap().clone() self.futexes.get(&uaddr).unwrap().clone()
} }
/// Exit the process.
/// Kill all threads and notify parent with the exit code.
pub fn exit(&mut self, exit_code: usize) {
// quit all threads
for tid in self.threads.iter() {
processor().manager().exit(*tid, 1);
}
// notify parent and fill exit code
if let Some(parent) = self.parent.upgrade() {
let mut parent = parent.lock();
parent.child_exit_code.insert(self.pid.get(), exit_code);
parent.child_exit.notify_one();
}
}
} }
trait ToMemoryAttr { trait ToMemoryAttr {

@ -28,6 +28,7 @@
use super::Condvar; use super::Condvar;
use crate::arch::interrupt; use crate::arch::interrupt;
use crate::processor;
use core::cell::UnsafeCell; use core::cell::UnsafeCell;
use core::fmt; use core::fmt;
use core::ops::{Deref, DerefMut}; use core::ops::{Deref, DerefMut};
@ -35,11 +36,12 @@ use core::sync::atomic::{AtomicBool, Ordering};
pub type SpinLock<T> = Mutex<T, Spin>; pub type SpinLock<T> = Mutex<T, Spin>;
pub type SpinNoIrqLock<T> = Mutex<T, SpinNoIrq>; pub type SpinNoIrqLock<T> = Mutex<T, SpinNoIrq>;
pub type ThreadLock<T> = Mutex<T, Condvar>; pub type SleepLock<T> = Mutex<T, Condvar>;
pub struct Mutex<T: ?Sized, S: MutexSupport> { pub struct Mutex<T: ?Sized, S: MutexSupport> {
lock: AtomicBool, lock: AtomicBool,
support: S, support: S,
user: UnsafeCell<(usize, usize)>, // (cid, tid)
data: UnsafeCell<T>, data: UnsafeCell<T>,
} }
@ -78,6 +80,7 @@ impl<T, S: MutexSupport> Mutex<T, S> {
lock: AtomicBool::new(false), lock: AtomicBool::new(false),
data: UnsafeCell::new(user_data), data: UnsafeCell::new(user_data),
support: S::new(), support: S::new(),
user: UnsafeCell::new((0, 0)),
} }
} }
@ -93,11 +96,23 @@ impl<T, S: MutexSupport> Mutex<T, S> {
impl<T: ?Sized, S: MutexSupport> Mutex<T, S> { impl<T: ?Sized, S: MutexSupport> Mutex<T, S> {
fn obtain_lock(&self) { fn obtain_lock(&self) {
while self.lock.compare_and_swap(false, true, Ordering::Acquire) != false { while self.lock.compare_and_swap(false, true, Ordering::Acquire) != false {
let mut try_count = 0;
// Wait until the lock looks unlocked before retrying // Wait until the lock looks unlocked before retrying
while self.lock.load(Ordering::Relaxed) { while self.lock.load(Ordering::Relaxed) {
self.support.cpu_relax(); self.support.cpu_relax();
try_count += 1;
if try_count == 0x100000 {
let (cid, tid) = unsafe { *self.user.get() };
error!(
"Mutex: deadlock detected! locked by cpu {} thread {} @ {:?}",
cid, tid, self as *const Self
);
}
} }
} }
let cid = crate::arch::cpu::id();
let tid = processor().tid_option().unwrap_or(0);
unsafe { self.user.get().write((cid, tid)) };
} }
/// Locks the spinlock and returns a guard. /// Locks the spinlock and returns a guard.

@ -3,7 +3,7 @@
//! The code is borrowed from [RustDoc - Dining Philosophers](https://doc.rust-lang.org/1.6.0/book/dining-philosophers.html) //! The code is borrowed from [RustDoc - Dining Philosophers](https://doc.rust-lang.org/1.6.0/book/dining-philosophers.html)
use crate::sync::Condvar; use crate::sync::Condvar;
use crate::sync::ThreadLock as Mutex; use crate::sync::SleepLock as Mutex;
use crate::thread; use crate::thread;
use alloc::vec; use alloc::vec;
use alloc::{sync::Arc, vec::Vec}; use alloc::{sync::Arc, vec::Vec};

@ -141,6 +141,9 @@ impl Syscall<'_> {
return Ok(0); return Ok(0);
} }
// NOTE: To run rustc, uncomment yield_now and comment Condvar.
// Waking up from pipe is unimplemented now.
// thread::yield_now();
Condvar::wait_any(&[&STDIN.pushed, &(*SOCKET_ACTIVITY)]); Condvar::wait_any(&[&STDIN.pushed, &(*SOCKET_ACTIVITY)]);
} }
} }
@ -269,7 +272,7 @@ impl Syscall<'_> {
mode: usize, mode: usize,
) -> SysResult { ) -> SysResult {
let mut proc = self.process(); let mut proc = self.process();
let path = unsafe { self.vm().check_and_clone_cstr(path)? }; let path = unsafe { check_and_clone_cstr(path)? };
let flags = OpenFlags::from_bits_truncate(flags); let flags = OpenFlags::from_bits_truncate(flags);
info!( info!(
"openat: dir_fd: {}, path: {:?}, flags: {:?}, mode: {:#o}", "openat: dir_fd: {}, path: {:?}, flags: {:?}, mode: {:#o}",
@ -333,7 +336,7 @@ impl Syscall<'_> {
) -> SysResult { ) -> SysResult {
// TODO: check permissions based on uid/gid // TODO: check permissions based on uid/gid
let proc = self.process(); let proc = self.process();
let path = unsafe { self.vm().check_and_clone_cstr(path)? }; let path = unsafe { check_and_clone_cstr(path)? };
let flags = AtFlags::from_bits_truncate(flags); let flags = AtFlags::from_bits_truncate(flags);
if !proc.pid.is_init() { if !proc.pid.is_init() {
// we trust pid 0 process // we trust pid 0 process
@ -383,7 +386,7 @@ impl Syscall<'_> {
flags: usize, flags: usize,
) -> SysResult { ) -> SysResult {
let proc = self.process(); let proc = self.process();
let path = unsafe { self.vm().check_and_clone_cstr(path)? }; let path = unsafe { check_and_clone_cstr(path)? };
let stat_ref = unsafe { self.vm().check_write_ptr(stat_ptr)? }; let stat_ref = unsafe { self.vm().check_write_ptr(stat_ptr)? };
let flags = AtFlags::from_bits_truncate(flags); let flags = AtFlags::from_bits_truncate(flags);
info!( info!(
@ -414,7 +417,7 @@ impl Syscall<'_> {
len: usize, len: usize,
) -> SysResult { ) -> SysResult {
let proc = self.process(); let proc = self.process();
let path = unsafe { self.vm().check_and_clone_cstr(path)? }; let path = unsafe { check_and_clone_cstr(path)? };
let slice = unsafe { self.vm().check_write_array(base, len)? }; let slice = unsafe { self.vm().check_write_array(base, len)? };
info!( info!(
"readlinkat: dirfd: {}, path: {:?}, base: {:?}, len: {}", "readlinkat: dirfd: {}, path: {:?}, base: {:?}, len: {}",
@ -460,7 +463,7 @@ impl Syscall<'_> {
pub fn sys_truncate(&mut self, path: *const u8, len: usize) -> SysResult { pub fn sys_truncate(&mut self, path: *const u8, len: usize) -> SysResult {
let proc = self.process(); let proc = self.process();
let path = unsafe { self.vm().check_and_clone_cstr(path)? }; let path = unsafe { check_and_clone_cstr(path)? };
info!("truncate: path: {:?}, len: {}", path, len); info!("truncate: path: {:?}, len: {}", path, len);
proc.lookup_inode(&path)?.resize(len)?; proc.lookup_inode(&path)?.resize(len)?;
Ok(0) Ok(0)
@ -524,7 +527,7 @@ impl Syscall<'_> {
arg3: usize, arg3: usize,
) -> SysResult { ) -> SysResult {
info!( info!(
"ioctl: fd: {}, request: {:x}, args: {} {} {}", "ioctl: fd: {}, request: {:#x}, args: {:#x} {:#x} {:#x}",
fd, request, arg1, arg2, arg3 fd, request, arg1, arg2, arg3
); );
let mut proc = self.process(); let mut proc = self.process();
@ -534,7 +537,7 @@ impl Syscall<'_> {
pub fn sys_chdir(&mut self, path: *const u8) -> SysResult { pub fn sys_chdir(&mut self, path: *const u8) -> SysResult {
let mut proc = self.process(); let mut proc = self.process();
let path = unsafe { self.vm().check_and_clone_cstr(path)? }; let path = unsafe { check_and_clone_cstr(path)? };
if !proc.pid.is_init() { if !proc.pid.is_init() {
// we trust pid 0 process // we trust pid 0 process
info!("chdir: path: {:?}", path); info!("chdir: path: {:?}", path);
@ -587,8 +590,8 @@ impl Syscall<'_> {
newpath: *const u8, newpath: *const u8,
) -> SysResult { ) -> SysResult {
let proc = self.process(); let proc = self.process();
let oldpath = unsafe { self.vm().check_and_clone_cstr(oldpath)? }; let oldpath = unsafe { check_and_clone_cstr(oldpath)? };
let newpath = unsafe { self.vm().check_and_clone_cstr(newpath)? }; let newpath = unsafe { check_and_clone_cstr(newpath)? };
info!( info!(
"renameat: olddirfd: {}, oldpath: {:?}, newdirfd: {}, newpath: {:?}", "renameat: olddirfd: {}, oldpath: {:?}, newdirfd: {}, newpath: {:?}",
olddirfd as isize, oldpath, newdirfd as isize, newpath olddirfd as isize, oldpath, newdirfd as isize, newpath
@ -608,7 +611,7 @@ impl Syscall<'_> {
pub fn sys_mkdirat(&mut self, dirfd: usize, path: *const u8, mode: usize) -> SysResult { pub fn sys_mkdirat(&mut self, dirfd: usize, path: *const u8, mode: usize) -> SysResult {
let proc = self.process(); let proc = self.process();
let path = unsafe { self.vm().check_and_clone_cstr(path)? }; let path = unsafe { check_and_clone_cstr(path)? };
// TODO: check pathname // TODO: check pathname
info!( info!(
"mkdirat: dirfd: {}, path: {:?}, mode: {:#o}", "mkdirat: dirfd: {}, path: {:?}, mode: {:#o}",
@ -626,7 +629,7 @@ impl Syscall<'_> {
pub fn sys_rmdir(&mut self, path: *const u8) -> SysResult { pub fn sys_rmdir(&mut self, path: *const u8) -> SysResult {
let proc = self.process(); let proc = self.process();
let path = unsafe { self.vm().check_and_clone_cstr(path)? }; let path = unsafe { check_and_clone_cstr(path)? };
info!("rmdir: path: {:?}", path); info!("rmdir: path: {:?}", path);
let (dir_path, file_name) = split_path(&path); let (dir_path, file_name) = split_path(&path);
@ -652,8 +655,8 @@ impl Syscall<'_> {
flags: usize, flags: usize,
) -> SysResult { ) -> SysResult {
let proc = self.process(); let proc = self.process();
let oldpath = unsafe { self.vm().check_and_clone_cstr(oldpath)? }; let oldpath = unsafe { check_and_clone_cstr(oldpath)? };
let newpath = unsafe { self.vm().check_and_clone_cstr(newpath)? }; let newpath = unsafe { check_and_clone_cstr(newpath)? };
let flags = AtFlags::from_bits_truncate(flags); let flags = AtFlags::from_bits_truncate(flags);
info!( info!(
"linkat: olddirfd: {}, oldpath: {:?}, newdirfd: {}, newpath: {:?}, flags: {:?}", "linkat: olddirfd: {}, oldpath: {:?}, newdirfd: {}, newpath: {:?}, flags: {:?}",
@ -673,7 +676,7 @@ impl Syscall<'_> {
pub fn sys_unlinkat(&mut self, dirfd: usize, path: *const u8, flags: usize) -> SysResult { pub fn sys_unlinkat(&mut self, dirfd: usize, path: *const u8, flags: usize) -> SysResult {
let proc = self.process(); let proc = self.process();
let path = unsafe { self.vm().check_and_clone_cstr(path)? }; let path = unsafe { check_and_clone_cstr(path)? };
let flags = AtFlags::from_bits_truncate(flags); let flags = AtFlags::from_bits_truncate(flags);
info!( info!(
"unlinkat: dirfd: {}, path: {:?}, flags: {:?}", "unlinkat: dirfd: {}, path: {:?}, flags: {:?}",
@ -1433,6 +1436,7 @@ impl IoVecs {
} }
#[repr(C)] #[repr(C)]
#[derive(Debug)]
pub struct PollFd { pub struct PollFd {
fd: u32, fd: u32,
events: PollEvents, events: PollEvents,

@ -69,10 +69,9 @@ impl Syscall<'_> {
val, val,
timeout timeout
); );
// if op & OP_PRIVATE == 0 { if op & OP_PRIVATE == 0 {
// unimplemented!("futex only support process-private"); warn!("process-shared futex is unimplemented");
// return Err(SysError::ENOSYS); }
// }
if uaddr % size_of::<u32>() != 0 { if uaddr % size_of::<u32>() != 0 {
return Err(SysError::EINVAL); return Err(SysError::EINVAL);
} }
@ -80,7 +79,7 @@ impl Syscall<'_> {
const OP_WAIT: u32 = 0; const OP_WAIT: u32 = 0;
const OP_WAKE: u32 = 1; const OP_WAKE: u32 = 1;
const OP_PRIVATE: u32 = 128; const OP_PRIVATE: u32 = 0x80;
let mut proc = self.process(); let mut proc = self.process();
let queue = proc.get_futex(uaddr); let queue = proc.get_futex(uaddr);

@ -10,7 +10,7 @@ use rcore_memory::VMError;
use crate::arch::cpu; use crate::arch::cpu;
use crate::arch::interrupt::TrapFrame; use crate::arch::interrupt::TrapFrame;
use crate::arch::syscall::*; use crate::arch::syscall::*;
use crate::memory::MemorySet; use crate::memory::{copy_from_user_u8, copy_from_user_usize, MemorySet};
use crate::process::*; use crate::process::*;
use crate::sync::{Condvar, MutexGuard, SpinNoIrq}; use crate::sync::{Condvar, MutexGuard, SpinNoIrq};
use crate::thread; use crate::thread;
@ -559,3 +559,38 @@ pub fn spin_and_wait<T>(condvars: &[&Condvar], mut action: impl FnMut() -> Optio
Condvar::wait_any(&condvars); Condvar::wait_any(&condvars);
} }
} }
pub fn check_and_clone_cstr(user: *const u8) -> Result<String, SysError> {
let mut buffer = Vec::new();
for i in 0.. {
let addr = unsafe { user.add(i) };
if let Some(data) = copy_from_user_u8(addr) {
if data > 0 {
buffer.push(data);
} else {
break;
}
} else {
return Err(SysError::EFAULT);
}
}
return String::from_utf8(buffer).map_err(|_| SysError::EFAULT);
}
pub fn check_and_clone_cstr_array(user: *const *const u8) -> Result<Vec<String>, SysError> {
let mut buffer = Vec::new();
for i in 0.. {
let addr = unsafe { user.add(i) };
if let Some(str_addr) = copy_from_user_usize(addr as *const usize) {
if str_addr > 0 {
let string = check_and_clone_cstr(str_addr as *const u8)?;
buffer.push(string);
} else {
break;
}
} else {
return Err(SysError::EFAULT);
}
}
return Ok(buffer);
}

@ -54,7 +54,6 @@ impl Syscall<'_> {
let new_thread = self let new_thread = self
.thread .thread
.clone(self.tf, newsp, newtls, child_tid as usize); .clone(self.tf, newsp, newtls, child_tid as usize);
// FIXME: parent pid
let tid = processor().manager().add(new_thread); let tid = processor().manager().add(new_thread);
processor().manager().detach(tid); processor().manager().detach(tid);
info!("clone: {} -> {}", thread::current().id(), tid); info!("clone: {} -> {}", thread::current().id(), tid);
@ -66,7 +65,7 @@ impl Syscall<'_> {
/// Wait for the process exit. /// Wait for the process exit.
/// Return the PID. Store exit code to `wstatus` if it's not null. /// Return the PID. Store exit code to `wstatus` if it's not null.
pub fn sys_wait4(&mut self, pid: isize, wstatus: *mut i32) -> SysResult { pub fn sys_wait4(&mut self, pid: isize, wstatus: *mut i32) -> SysResult {
//info!("wait4: pid: {}, code: {:?}", pid, wstatus); info!("wait4: pid: {}, code: {:?}", pid, wstatus);
let wstatus = if !wstatus.is_null() { let wstatus = if !wstatus.is_null() {
Some(unsafe { self.vm().check_write_ptr(wstatus)? }) Some(unsafe { self.vm().check_write_ptr(wstatus)? })
} else { } else {
@ -75,10 +74,12 @@ impl Syscall<'_> {
#[derive(Debug)] #[derive(Debug)]
enum WaitFor { enum WaitFor {
AnyChild, AnyChild,
AnyChildInGroup,
Pid(usize), Pid(usize),
} }
let target = match pid { let target = match pid {
-1 | 0 => WaitFor::AnyChild, -1 => WaitFor::AnyChild,
0 => WaitFor::AnyChildInGroup,
p if p > 0 => WaitFor::Pid(p as usize), p if p > 0 => WaitFor::Pid(p as usize),
_ => unimplemented!(), _ => unimplemented!(),
}; };
@ -86,7 +87,7 @@ impl Syscall<'_> {
let mut proc = self.process(); let mut proc = self.process();
// check child_exit_code // check child_exit_code
let find = match target { let find = match target {
WaitFor::AnyChild => proc WaitFor::AnyChild | WaitFor::AnyChildInGroup => proc
.child_exit_code .child_exit_code
.iter() .iter()
.next() .next()
@ -102,17 +103,19 @@ impl Syscall<'_> {
return Ok(pid); return Ok(pid);
} }
// if not, check pid // if not, check pid
let children: Vec<_> = proc let invalid = {
.children let children: Vec<_> = proc
.iter() .children
.filter_map(|weak| weak.upgrade())
.collect();
let invalid = match target {
WaitFor::AnyChild => children.len() == 0,
WaitFor::Pid(pid) => children
.iter() .iter()
.find(|p| p.lock().pid.get() == pid) .filter_map(|weak| weak.upgrade())
.is_none(), .collect();
match target {
WaitFor::AnyChild | WaitFor::AnyChildInGroup => children.len() == 0,
WaitFor::Pid(pid) => children
.iter()
.find(|p| p.lock().pid.get() == pid)
.is_none(),
}
}; };
if invalid { if invalid {
return Err(SysError::ECHILD); return Err(SysError::ECHILD);
@ -150,9 +153,9 @@ impl Syscall<'_> {
path, argv, envp path, argv, envp
); );
let mut proc = self.process(); let mut proc = self.process();
let path = unsafe { self.vm().check_and_clone_cstr(path)? }; let path = unsafe { check_and_clone_cstr(path)? };
let args = unsafe { self.vm().check_and_clone_cstr_array(argv)? }; let args = unsafe { check_and_clone_cstr_array(argv)? };
let envs = unsafe { self.vm().check_and_clone_cstr_array(envp)? }; let envs = unsafe { check_and_clone_cstr_array(envp)? };
if args.is_empty() { if args.is_empty() {
error!("exec: args is null"); error!("exec: args is null");
@ -204,7 +207,7 @@ impl Syscall<'_> {
/// Kill the process /// Kill the process
pub fn sys_kill(&mut self, pid: usize, sig: usize) -> SysResult { pub fn sys_kill(&mut self, pid: usize, sig: usize) -> SysResult {
info!( info!(
"kill: {} killed: {} with sig {}", "kill: thread {} kill process {} with signal {}",
thread::current().id(), thread::current().id(),
pid, pid,
sig sig
@ -215,21 +218,8 @@ impl Syscall<'_> {
self.sys_exit_group(sig); self.sys_exit_group(sig);
} else { } else {
if let Some(proc_arc) = PROCESSES.read().get(&pid).and_then(|weak| weak.upgrade()) { if let Some(proc_arc) = PROCESSES.read().get(&pid).and_then(|weak| weak.upgrade()) {
let proc = proc_arc.lock(); let mut proc = proc_arc.lock();
// quit all threads proc.exit(sig);
for tid in proc.threads.iter() {
processor().manager().exit(*tid, sig);
}
// notify parent and fill exit code
// avoid deadlock
let proc_parent = proc.parent.clone();
let pid = proc.pid.get();
drop(proc);
if let Some(parent) = proc_parent {
let mut parent = parent.lock();
parent.child_exit_code.insert(pid, sig);
parent.child_exit.notify_one();
}
Ok(0) Ok(0)
} else { } else {
Err(SysError::EINVAL) Err(SysError::EINVAL)
@ -252,7 +242,7 @@ impl Syscall<'_> {
/// Get the parent process id /// Get the parent process id
pub fn sys_getppid(&mut self) -> SysResult { pub fn sys_getppid(&mut self) -> SysResult {
if let Some(parent) = self.process().parent.as_ref() { if let Some(parent) = self.process().parent.upgrade() {
Ok(parent.lock().pid.get()) Ok(parent.lock().pid.get())
} else { } else {
Ok(0) Ok(0)
@ -266,26 +256,15 @@ impl Syscall<'_> {
let mut proc = self.process(); let mut proc = self.process();
proc.threads.retain(|&id| id != tid); proc.threads.retain(|&id| id != tid);
// for last thread, // for last thread, exit the process
// notify parent and fill exit code if proc.threads.len() == 0 {
// avoid deadlock proc.exit(exit_code);
let exit = proc.threads.len() == 0;
let proc_parent = proc.parent.clone();
let pid = proc.pid.get();
drop(proc);
if exit {
if let Some(parent) = proc_parent {
let mut parent = parent.lock();
parent.child_exit_code.insert(pid, exit_code);
parent.child_exit.notify_one();
}
} }
// perform futex wake 1 // perform futex wake 1
// ref: http://man7.org/linux/man-pages/man2/set_tid_address.2.html // ref: http://man7.org/linux/man-pages/man2/set_tid_address.2.html
// FIXME: do it in all possible ways a thread can exit // FIXME: do it in all possible ways a thread can exit
// it has memory access so we can't move it to Thread::drop? // it has memory access so we can't move it to Thread::drop?
let mut proc = self.process();
let clear_child_tid = self.thread.clear_child_tid as *mut u32; let clear_child_tid = self.thread.clear_child_tid as *mut u32;
if !clear_child_tid.is_null() { if !clear_child_tid.is_null() {
info!("exit: futex {:#?} wake 1", clear_child_tid); info!("exit: futex {:#?} wake 1", clear_child_tid);
@ -304,24 +283,10 @@ impl Syscall<'_> {
/// Exit the current thread group (i.e. process) /// Exit the current thread group (i.e. process)
pub fn sys_exit_group(&mut self, exit_code: usize) -> ! { pub fn sys_exit_group(&mut self, exit_code: usize) -> ! {
let proc = self.process(); let mut proc = self.process();
info!("exit_group: {}, code: {}", proc.pid, exit_code); info!("exit_group: {}, code: {}", proc.pid, exit_code);
// quit all threads proc.exit(exit_code);
for tid in proc.threads.iter() {
processor().manager().exit(*tid, exit_code);
}
// notify parent and fill exit code
// avoid deadlock
let proc_parent = proc.parent.clone();
let pid = proc.pid.get();
drop(proc);
if let Some(parent) = proc_parent {
let mut parent = parent.lock();
parent.child_exit_code.insert(pid, exit_code);
parent.child_exit.notify_one();
}
processor().yield_now(); processor().yield_now();
unreachable!(); unreachable!();

@ -79,7 +79,7 @@ impl Syscall<'_> {
} }
} }
/// should be initialized together // should be initialized together
lazy_static! { lazy_static! {
pub static ref EPOCH_BASE: u64 = crate::arch::timer::read_epoch(); pub static ref EPOCH_BASE: u64 = crate::arch::timer::read_epoch();
pub static ref TICK_BASE: u64 = unsafe { crate::trap::TICK as u64 }; pub static ref TICK_BASE: u64 = unsafe { crate::trap::TICK as u64 };

@ -1 +1 @@
Subproject commit ad822e6d3b626b598874bb52a407e90f549c5ab9 Subproject commit bf02e72b85784af3555c7abe6b985aefc215023e
Loading…
Cancel
Save