reformat code using `cargo fmt`

master
WangRunji 6 years ago
parent fe88f4f77f
commit b836b11792

@ -80,8 +80,16 @@ impl<T: BitAlloc> BitAllocCascade16<T> {
assert!(start <= end); assert!(start <= end);
assert!(end <= Self::CAP); assert!(end <= Self::CAP);
for i in start / T::CAP..=(end - 1) / T::CAP { for i in start / T::CAP..=(end - 1) / T::CAP {
let begin = if start / T::CAP == i { start % T::CAP } else { 0 }; let begin = if start / T::CAP == i {
let end = if end / T::CAP == i { end % T::CAP } else { T::CAP }; start % T::CAP
} else {
0
};
let end = if end / T::CAP == i {
end % T::CAP
} else {
T::CAP
};
f(&mut self.sub[i], begin..end); f(&mut self.sub[i], begin..end);
self.bitset.set_bit(i, self.sub[i].any()); self.bitset.set_bit(i, self.sub[i].any());
} }

@ -24,7 +24,9 @@ impl Page {
** @retval Page the page of the given virtual address ** @retval Page the page of the given virtual address
*/ */
pub fn of_addr(addr: VirtAddr) -> Self { pub fn of_addr(addr: VirtAddr) -> Self {
Page { number: addr / PAGE_SIZE } Page {
number: addr / PAGE_SIZE,
}
} }
/* /*
@ -44,7 +46,9 @@ impl Page {
impl Add<usize> for Page { impl Add<usize> for Page {
type Output = Self; type Output = Self;
fn add(self, rhs: usize) -> Self::Output { fn add(self, rhs: usize) -> Self::Output {
Page { number: self.number + rhs } Page {
number: self.number + rhs,
}
} }
} }

@ -69,8 +69,7 @@ impl<T: PageTable> CowExt<T> {
** @retval none ** @retval none
*/ */
pub fn unmap_shared(&mut self, addr: VirtAddr) { pub fn unmap_shared(&mut self, addr: VirtAddr) {
let entry = self.page_table.get_entry(addr) let entry = self.page_table.get_entry(addr).expect("entry not exist");
.expect("entry not exist");
let frame = entry.target() / PAGE_SIZE; let frame = entry.target() / PAGE_SIZE;
if entry.readonly_shared() { if entry.readonly_shared() {
self.rc_map.read_decrease(&frame); self.rc_map.read_decrease(&frame);
@ -89,7 +88,11 @@ impl<T: PageTable> CowExt<T> {
** of beginning of the page ** of beginning of the page
** @retval bool whether copy-on-write happens. ** @retval bool whether copy-on-write happens.
*/ */
pub fn page_fault_handler(&mut self, addr: VirtAddr, alloc_frame: impl FnOnce() -> PhysAddr) -> bool { pub fn page_fault_handler(
&mut self,
addr: VirtAddr,
alloc_frame: impl FnOnce() -> PhysAddr,
) -> bool {
let entry = self.page_table.get_entry(addr); let entry = self.page_table.get_entry(addr);
if entry.is_none() { if entry.is_none() {
return false; return false;
@ -113,7 +116,8 @@ impl<T: PageTable> CowExt<T> {
self.unmap_shared(addr); self.unmap_shared(addr);
self.map(addr, alloc_frame()); self.map(addr, alloc_frame());
self.get_page_slice_mut(addr).copy_from_slice(&temp_data[..]); self.get_page_slice_mut(addr)
.copy_from_slice(&temp_data[..]);
true true
} }
} }
@ -222,7 +226,8 @@ pub mod test {
} }
let mut alloc = FrameAlloc(4); let mut alloc = FrameAlloc(4);
pt.page_table.set_handler(Box::new(move |_, addr: VirtAddr| { pt.page_table
.set_handler(Box::new(move |_, addr: VirtAddr| {
pt0.page_fault_handler(addr, || alloc.alloc()); pt0.page_fault_handler(addr, || alloc.alloc());
})); }));
@ -263,8 +268,11 @@ pub mod test {
pt.write(0x2000, 3); pt.write(0x2000, 3);
assert_eq!(pt.rc_map.read_count(&frame), 0); assert_eq!(pt.rc_map.read_count(&frame), 0);
assert_eq!(pt.rc_map.write_count(&frame), 0); assert_eq!(pt.rc_map.write_count(&frame), 0);
assert_eq!(pt.get_entry(0x2000).unwrap().target(), target, assert_eq!(
"The last write reference should not allocate new frame."); pt.get_entry(0x2000).unwrap().target(),
target,
"The last write reference should not allocate new frame."
);
assert_eq!(pt.read(0x1000), 2); assert_eq!(pt.read(0x1000), 2);
assert_eq!(pt.read(0x2000), 3); assert_eq!(pt.read(0x2000), 3);
} }

@ -6,17 +6,17 @@
use log::*; use log::*;
extern crate alloc; extern crate alloc;
pub mod paging; mod addr;
pub mod cow; pub mod cow;
pub mod swap;
pub mod memory_set; pub mod memory_set;
mod addr;
pub mod no_mmu; pub mod no_mmu;
pub mod paging;
pub mod swap;
pub use crate::addr::*; pub use crate::addr::*;
pub enum VMError { pub enum VMError {
InvalidPtr InvalidPtr,
} }
pub type VMResult<T> = Result<T, VMError>; pub type VMResult<T> = Result<T, VMError>;

@ -34,11 +34,11 @@ pub trait FrameAllocator: Debug + Clone + 'static {
fn dealloc(&self, target: PhysAddr); fn dealloc(&self, target: PhysAddr);
} }
mod linear;
mod byframe; mod byframe;
mod delay; mod delay;
mod linear;
//mod swap; //mod swap;
pub use self::linear::Linear;
pub use self::byframe::ByFrame; pub use self::byframe::ByFrame;
pub use self::delay::Delay; pub use self::delay::Delay;
pub use self::linear::Linear;

@ -1,7 +1,7 @@
//! memory set, area //! memory set, area
//! and the inactive page table //! and the inactive page table
use alloc::{boxed::Box, vec::Vec, string::String}; use alloc::{boxed::Box, string::String, vec::Vec};
use core::fmt::{Debug, Error, Formatter}; use core::fmt::{Debug, Error, Formatter};
use crate::paging::*; use crate::paging::*;
@ -23,7 +23,7 @@ pub struct MemoryArea {
name: &'static str, name: &'static str,
} }
unsafe impl Send for MemoryArea { } unsafe impl Send for MemoryArea {}
impl MemoryArea { impl MemoryArea {
/* /*
@ -31,14 +31,20 @@ impl MemoryArea {
** @retval &[u8] the slice of the content in the memory area ** @retval &[u8] the slice of the content in the memory area
*/ */
pub unsafe fn as_slice(&self) -> &[u8] { pub unsafe fn as_slice(&self) -> &[u8] {
::core::slice::from_raw_parts(self.start_addr as *const u8, self.end_addr - self.start_addr) ::core::slice::from_raw_parts(
self.start_addr as *const u8,
self.end_addr - self.start_addr,
)
} }
/* /*
** @brief get mutable slice of the content in the memory area ** @brief get mutable slice of the content in the memory area
** @retval &mut[u8] the mutable slice of the content in the memory area ** @retval &mut[u8] the mutable slice of the content in the memory area
*/ */
pub unsafe fn as_slice_mut(&self) -> &mut [u8] { pub unsafe fn as_slice_mut(&self) -> &mut [u8] {
::core::slice::from_raw_parts_mut(self.start_addr as *mut u8, self.end_addr - self.start_addr) ::core::slice::from_raw_parts_mut(
self.start_addr as *mut u8,
self.end_addr - self.start_addr,
)
} }
/* /*
** @brief test whether a virtual address is in the memory area ** @brief test whether a virtual address is in the memory area
@ -50,8 +56,7 @@ impl MemoryArea {
} }
/// Check the array is within the readable memory /// Check the array is within the readable memory
fn check_read_array<S>(&self, ptr: *const S, count: usize) -> bool { fn check_read_array<S>(&self, ptr: *const S, count: usize) -> bool {
ptr as usize >= self.start_addr && ptr as usize >= self.start_addr && unsafe { ptr.add(count) as usize } <= self.end_addr
unsafe { ptr.add(count) as usize } <= self.end_addr
} }
/// Check the array is within the writable memory /// Check the array is within the writable memory
fn check_write_array<S>(&self, ptr: *mut S, count: usize) -> bool { fn check_write_array<S>(&self, ptr: *mut S, count: usize) -> bool {
@ -206,24 +211,30 @@ impl<T: InactivePageTable> MemorySet<T> {
} }
/// Check the array is within the readable memory /// Check the array is within the readable memory
pub fn check_read_array<S>(&self, ptr: *const S, count: usize) -> VMResult<()> { pub fn check_read_array<S>(&self, ptr: *const S, count: usize) -> VMResult<()> {
self.areas.iter() self.areas
.iter()
.find(|area| area.check_read_array(ptr, count)) .find(|area| area.check_read_array(ptr, count))
.map(|_|()).ok_or(VMError::InvalidPtr) .map(|_| ())
.ok_or(VMError::InvalidPtr)
} }
/// Check the array is within the writable memory /// Check the array is within the writable memory
pub fn check_write_array<S>(&self, ptr: *mut S, count: usize) -> VMResult<()> { pub fn check_write_array<S>(&self, ptr: *mut S, count: usize) -> VMResult<()> {
self.areas.iter() self.areas
.iter()
.find(|area| area.check_write_array(ptr, count)) .find(|area| area.check_write_array(ptr, count))
.map(|_|()).ok_or(VMError::InvalidPtr) .map(|_| ())
.ok_or(VMError::InvalidPtr)
} }
/// Check the null-end C string is within the readable memory, and is valid. /// Check the null-end C string is within the readable memory, and is valid.
/// If so, clone it to a String. /// If so, clone it to a String.
/// ///
/// Unsafe: the page table must be active. /// Unsafe: the page table must be active.
pub unsafe fn check_and_clone_cstr(&self, ptr: *const u8) -> VMResult<String> { pub unsafe fn check_and_clone_cstr(&self, ptr: *const u8) -> VMResult<String> {
self.areas.iter() self.areas
.iter()
.filter_map(|area| area.check_and_clone_cstr(ptr)) .filter_map(|area| area.check_and_clone_cstr(ptr))
.next().ok_or(VMError::InvalidPtr) .next()
.ok_or(VMError::InvalidPtr)
} }
/// Find a free area with hint address `addr_hint` and length `len`. /// Find a free area with hint address `addr_hint` and length `len`.
/// Return the start address of found free area. /// Return the start address of found free area.
@ -239,7 +250,8 @@ impl<T: InactivePageTable> MemorySet<T> {
} }
/// Test if [`start_addr`, `end_addr`) is a free area /// Test if [`start_addr`, `end_addr`) is a free area
fn test_free_area(&self, start_addr: usize, end_addr: usize) -> bool { fn test_free_area(&self, start_addr: usize, end_addr: usize) -> bool {
self.areas.iter() self.areas
.iter()
.find(|area| area.is_overlap_with(start_addr, end_addr)) .find(|area| area.is_overlap_with(start_addr, end_addr))
.is_none() .is_none()
} }
@ -248,10 +260,26 @@ impl<T: InactivePageTable> MemorySet<T> {
** @param area: MemoryArea the memory area to add ** @param area: MemoryArea the memory area to add
** @retval none ** @retval none
*/ */
pub fn push(&mut self, start_addr: VirtAddr, end_addr: VirtAddr, attr: MemoryAttr, handler: impl MemoryHandler, name: &'static str) { pub fn push(
&mut self,
start_addr: VirtAddr,
end_addr: VirtAddr,
attr: MemoryAttr,
handler: impl MemoryHandler,
name: &'static str,
) {
assert!(start_addr <= end_addr, "invalid memory area"); assert!(start_addr <= end_addr, "invalid memory area");
assert!(self.test_free_area(start_addr, end_addr), "memory area overlap"); assert!(
let area = MemoryArea { start_addr, end_addr, attr, handler: Box::new(handler), name }; self.test_free_area(start_addr, end_addr),
"memory area overlap"
);
let area = MemoryArea {
start_addr,
end_addr,
attr,
handler: Box::new(handler),
name,
};
self.page_table.edit(|pt| area.map(pt)); self.page_table.edit(|pt| area.map(pt));
self.areas.push(area); self.areas.push(area);
} }
@ -288,28 +316,73 @@ impl<T: InactivePageTable> MemorySet<T> {
let area = self.areas.remove(i); let area = self.areas.remove(i);
self.page_table.edit(|pt| area.unmap(pt)); self.page_table.edit(|pt| area.unmap(pt));
i -= 1; i -= 1;
} else if self.areas[i].start_addr >= start_addr && self.areas[i].start_addr < end_addr { } else if self.areas[i].start_addr >= start_addr
&& self.areas[i].start_addr < end_addr
{
// prefix // prefix
let area = self.areas.remove(i); let area = self.areas.remove(i);
let dead_area = MemoryArea { start_addr: area.start_addr, end_addr, attr: area.attr, handler: area.handler.box_clone(), name: area.name }; let dead_area = MemoryArea {
start_addr: area.start_addr,
end_addr,
attr: area.attr,
handler: area.handler.box_clone(),
name: area.name,
};
self.page_table.edit(|pt| dead_area.unmap(pt)); self.page_table.edit(|pt| dead_area.unmap(pt));
let new_area = MemoryArea { start_addr: end_addr, end_addr: area.end_addr, attr: area.attr, handler: area.handler, name: area.name }; let new_area = MemoryArea {
start_addr: end_addr,
end_addr: area.end_addr,
attr: area.attr,
handler: area.handler,
name: area.name,
};
self.areas.insert(i, new_area); self.areas.insert(i, new_area);
} else if self.areas[i].end_addr <= end_addr && self.areas[i].end_addr > start_addr { } else if self.areas[i].end_addr <= end_addr && self.areas[i].end_addr > start_addr
{
// postfix // postfix
let area = self.areas.remove(i); let area = self.areas.remove(i);
let dead_area = MemoryArea { start_addr: start_addr, end_addr: area.end_addr, attr: area.attr, handler: area.handler.box_clone(), name: area.name }; let dead_area = MemoryArea {
start_addr: start_addr,
end_addr: area.end_addr,
attr: area.attr,
handler: area.handler.box_clone(),
name: area.name,
};
self.page_table.edit(|pt| dead_area.unmap(pt)); self.page_table.edit(|pt| dead_area.unmap(pt));
let new_area = MemoryArea { start_addr: area.start_addr, end_addr: start_addr, attr: area.attr, handler: area.handler, name: area.name }; let new_area = MemoryArea {
start_addr: area.start_addr,
end_addr: start_addr,
attr: area.attr,
handler: area.handler,
name: area.name,
};
self.areas.insert(i, new_area); self.areas.insert(i, new_area);
} else { } else {
// superset // superset
let area = self.areas.remove(i); let area = self.areas.remove(i);
let dead_area = MemoryArea { start_addr: start_addr, end_addr: end_addr, attr: area.attr, handler: area.handler.box_clone(), name: area.name }; let dead_area = MemoryArea {
start_addr: start_addr,
end_addr: end_addr,
attr: area.attr,
handler: area.handler.box_clone(),
name: area.name,
};
self.page_table.edit(|pt| dead_area.unmap(pt)); self.page_table.edit(|pt| dead_area.unmap(pt));
let new_area_left = MemoryArea { start_addr: area.start_addr, end_addr: start_addr, attr: area.attr, handler: area.handler.box_clone(), name: area.name }; let new_area_left = MemoryArea {
start_addr: area.start_addr,
end_addr: start_addr,
attr: area.attr,
handler: area.handler.box_clone(),
name: area.name,
};
self.areas.insert(i, new_area_left); self.areas.insert(i, new_area_left);
let new_area_right = MemoryArea { start_addr: end_addr, end_addr: area.end_addr, attr: area.attr, handler: area.handler, name: area.name }; let new_area_right = MemoryArea {
start_addr: end_addr,
end_addr: area.end_addr,
attr: area.attr,
handler: area.handler,
name: area.name,
};
self.areas.insert(i + 1, new_area_right); self.areas.insert(i + 1, new_area_right);
i += 1; i += 1;
} }
@ -323,7 +396,7 @@ impl<T: InactivePageTable> MemorySet<T> {
** @retval impl Iterator<Item=&MemoryArea> ** @retval impl Iterator<Item=&MemoryArea>
** the memory area iterator ** the memory area iterator
*/ */
pub fn iter(&self) -> impl Iterator<Item=&MemoryArea> { pub fn iter(&self) -> impl Iterator<Item = &MemoryArea> {
self.areas.iter() self.areas.iter()
} }
pub fn edit(&mut self, f: impl FnOnce(&mut T::Active)) { pub fn edit(&mut self, f: impl FnOnce(&mut T::Active)) {
@ -356,7 +429,11 @@ impl<T: InactivePageTable> MemorySet<T> {
** @retval none ** @retval none
*/ */
pub fn clear(&mut self) { pub fn clear(&mut self) {
let Self { ref mut page_table, ref mut areas, .. } = self; let Self {
ref mut page_table,
ref mut areas,
..
} = self;
page_table.edit(|pt| { page_table.edit(|pt| {
for area in areas.iter() { for area in areas.iter() {
area.unmap(pt); area.unmap(pt);
@ -382,14 +459,16 @@ impl<T: InactivePageTable> MemorySet<T> {
** @brief get the mutable reference for the inactive page table ** @brief get the mutable reference for the inactive page table
** @retval: &mut T the mutable reference of the inactive page table ** @retval: &mut T the mutable reference of the inactive page table
*/ */
pub fn get_page_table_mut(&mut self) -> &mut T{ pub fn get_page_table_mut(&mut self) -> &mut T {
&mut self.page_table &mut self.page_table
} }
pub fn handle_page_fault(&mut self, addr: VirtAddr) -> bool { pub fn handle_page_fault(&mut self, addr: VirtAddr) -> bool {
let area = self.areas.iter().find(|area| area.contains(addr)); let area = self.areas.iter().find(|area| area.contains(addr));
match area { match area {
Some(area) => self.page_table.edit(|pt| area.handler.handle_page_fault(pt, addr)), Some(area) => self
.page_table
.edit(|pt| area.handler.handle_page_fault(pt, addr)),
None => false, None => false,
} }
} }
@ -419,8 +498,6 @@ impl<T: InactivePageTable> Drop for MemorySet<T> {
impl<T: InactivePageTable> Debug for MemorySet<T> { impl<T: InactivePageTable> Debug for MemorySet<T> {
fn fmt(&self, f: &mut Formatter) -> Result<(), Error> { fn fmt(&self, f: &mut Formatter) -> Result<(), Error> {
f.debug_list() f.debug_list().entries(self.areas.iter()).finish()
.entries(self.areas.iter())
.finish()
} }
} }

@ -1,5 +1,5 @@
use alloc::alloc::{GlobalAlloc, Layout};
use alloc::vec::Vec; use alloc::vec::Vec;
use alloc::alloc::{Layout, GlobalAlloc};
use core::marker::PhantomData; use core::marker::PhantomData;
pub trait NoMMUSupport { pub trait NoMMUSupport {
@ -28,8 +28,12 @@ impl<S: NoMMUSupport> MemorySet<S> {
slice slice
} }
// empty impls // empty impls
pub fn with<T>(&self, f: impl FnOnce() -> T) -> T { f() } pub fn with<T>(&self, f: impl FnOnce() -> T) -> T {
pub fn token(&self) -> usize { 0 } f()
}
pub fn token(&self) -> usize {
0
}
pub unsafe fn activate(&self) {} pub unsafe fn activate(&self) {}
} }
@ -44,7 +48,11 @@ impl<S: NoMMUSupport> MemoryArea<S> {
fn new(size: usize) -> Self { fn new(size: usize) -> Self {
let layout = Layout::from_size_align(size, 1).unwrap(); let layout = Layout::from_size_align(size, 1).unwrap();
let ptr = unsafe { S::allocator().alloc(layout) } as usize; let ptr = unsafe { S::allocator().alloc(layout) } as usize;
MemoryArea { ptr, layout, support: PhantomData } MemoryArea {
ptr,
layout,
support: PhantomData,
}
} }
unsafe fn as_buf(&self) -> &'static mut [u8] { unsafe fn as_buf(&self) -> &'static mut [u8] {
core::slice::from_raw_parts_mut(self.ptr as *mut u8, self.layout.size()) core::slice::from_raw_parts_mut(self.ptr as *mut u8, self.layout.size())

@ -8,9 +8,14 @@ pub trait PageTableExt: PageTable {
// So this should be really high in kernel space when necessary. // So this should be really high in kernel space when necessary.
const TEMP_PAGE_ADDR: VirtAddr = 0xcafeb000; const TEMP_PAGE_ADDR: VirtAddr = 0xcafeb000;
fn with_temporary_map<T, D>(&mut self, target: PhysAddr, f: impl FnOnce(&mut Self, &mut D) -> T) -> T { fn with_temporary_map<T, D>(
&mut self,
target: PhysAddr,
f: impl FnOnce(&mut Self, &mut D) -> T,
) -> T {
self.map(Self::TEMP_PAGE_ADDR, target); self.map(Self::TEMP_PAGE_ADDR, target);
let data = unsafe { &mut *(self.get_page_slice_mut(Self::TEMP_PAGE_ADDR).as_ptr() as *mut D) }; let data =
unsafe { &mut *(self.get_page_slice_mut(Self::TEMP_PAGE_ADDR).as_ptr() as *mut D) };
let ret = f(self, data); let ret = f(self, data);
self.unmap(Self::TEMP_PAGE_ADDR); self.unmap(Self::TEMP_PAGE_ADDR);
ret ret

@ -3,8 +3,8 @@
//! An mock implementation for the PageTable. //! An mock implementation for the PageTable.
//! Used to test page table operation. //! Used to test page table operation.
use alloc::boxed::Box;
use super::*; use super::*;
use alloc::boxed::Box;
const PAGE_COUNT: usize = 16; const PAGE_COUNT: usize = 16;
const PAGE_SIZE: usize = 4096; const PAGE_SIZE: usize = 4096;
@ -31,18 +31,42 @@ pub struct MockEntry {
impl Entry for MockEntry { impl Entry for MockEntry {
fn update(&mut self) {} fn update(&mut self) {}
fn accessed(&self) -> bool { self.accessed } fn accessed(&self) -> bool {
fn dirty(&self) -> bool { self.dirty } self.accessed
fn writable(&self) -> bool { self.writable } }
fn present(&self) -> bool { self.present } fn dirty(&self) -> bool {
fn clear_accessed(&mut self) { self.accessed = false; } self.dirty
fn clear_dirty(&mut self) { self.dirty = false; } }
fn set_writable(&mut self, value: bool) { self.writable = value; } fn writable(&self) -> bool {
fn set_present(&mut self, value: bool) { self.present = value; } self.writable
fn target(&self) -> usize { self.target } }
fn set_target(&mut self, target: usize) { self.target = target; } fn present(&self) -> bool {
fn writable_shared(&self) -> bool { self.writable_shared } self.present
fn readonly_shared(&self) -> bool { self.readonly_shared } }
fn clear_accessed(&mut self) {
self.accessed = false;
}
fn clear_dirty(&mut self) {
self.dirty = false;
}
fn set_writable(&mut self, value: bool) {
self.writable = value;
}
fn set_present(&mut self, value: bool) {
self.present = value;
}
fn target(&self) -> usize {
self.target
}
fn set_target(&mut self, target: usize) {
self.target = target;
}
fn writable_shared(&self) -> bool {
self.writable_shared
}
fn readonly_shared(&self) -> bool {
self.readonly_shared
}
fn set_shared(&mut self, writable: bool) { fn set_shared(&mut self, writable: bool) {
self.writable_shared = writable; self.writable_shared = writable;
self.readonly_shared = !writable; self.readonly_shared = !writable;
@ -51,20 +75,36 @@ impl Entry for MockEntry {
self.writable_shared = false; self.writable_shared = false;
self.readonly_shared = false; self.readonly_shared = false;
} }
fn swapped(&self) -> bool { self.swapped } fn swapped(&self) -> bool {
fn set_swapped(&mut self, value: bool) { self.swapped = value; } self.swapped
fn user(&self) -> bool { unimplemented!() } }
fn set_user(&mut self, _value: bool) { unimplemented!() } fn set_swapped(&mut self, value: bool) {
fn execute(&self) -> bool { unimplemented!() } self.swapped = value;
fn set_execute(&mut self, _value: bool) { unimplemented!() } }
fn mmio(&self) -> u8 { unimplemented!() } fn user(&self) -> bool {
fn set_mmio(&mut self, _value: u8) { unimplemented!() } unimplemented!()
}
fn set_user(&mut self, _value: bool) {
unimplemented!()
}
fn execute(&self) -> bool {
unimplemented!()
}
fn set_execute(&mut self, _value: bool) {
unimplemented!()
}
fn mmio(&self) -> u8 {
unimplemented!()
}
fn set_mmio(&mut self, _value: u8) {
unimplemented!()
}
} }
type PageFaultHandler = Box<FnMut(&mut MockPageTable, VirtAddr)>; type PageFaultHandler = Box<FnMut(&mut MockPageTable, VirtAddr)>;
impl PageTable for MockPageTable { impl PageTable for MockPageTable {
// type Entry = MockEntry; // type Entry = MockEntry;
fn map(&mut self, addr: VirtAddr, target: PhysAddr) -> &mut Entry { fn map(&mut self, addr: VirtAddr, target: PhysAddr) -> &mut Entry {
let entry = &mut self.entries[addr / PAGE_SIZE]; let entry = &mut self.entries[addr / PAGE_SIZE];
@ -82,10 +122,10 @@ impl PageTable for MockPageTable {
fn get_entry(&mut self, addr: VirtAddr) -> Option<&mut Entry> { fn get_entry(&mut self, addr: VirtAddr) -> Option<&mut Entry> {
Some(&mut self.entries[addr / PAGE_SIZE]) Some(&mut self.entries[addr / PAGE_SIZE])
} }
fn get_page_slice_mut<'a,'b>(&'a mut self, addr: VirtAddr) -> &'b mut [u8] { fn get_page_slice_mut<'a, 'b>(&'a mut self, addr: VirtAddr) -> &'b mut [u8] {
self._read(addr); self._read(addr);
let pa = self.translate(addr) & !(PAGE_SIZE - 1); let pa = self.translate(addr) & !(PAGE_SIZE - 1);
let data = unsafe{ &mut *(&mut self.data as *mut [u8; PAGE_SIZE * PAGE_COUNT])}; let data = unsafe { &mut *(&mut self.data as *mut [u8; PAGE_SIZE * PAGE_COUNT]) };
&mut data[pa..pa + PAGE_SIZE] &mut data[pa..pa + PAGE_SIZE]
} }
fn read(&mut self, addr: usize) -> u8 { fn read(&mut self, addr: usize) -> u8 {

@ -2,18 +2,17 @@
//! //!
//! Implemented for every architecture, used by OS. //! Implemented for every architecture, used by OS.
use super::*; pub use self::ext::*;
#[cfg(test)] #[cfg(test)]
pub use self::mock_page_table::MockPageTable; pub use self::mock_page_table::MockPageTable;
pub use self::ext::*; use super::*;
mod ext;
#[cfg(test)] #[cfg(test)]
mod mock_page_table; mod mock_page_table;
mod ext;
pub trait PageTable { pub trait PageTable {
// type Entry: Entry; // type Entry: Entry;
/// Map a page of virual address `addr` to the frame of physics address `target` /// Map a page of virual address `addr` to the frame of physics address `target`
/// Return the page table entry of the mapped virual address /// Return the page table entry of the mapped virual address

@ -1,8 +1,7 @@
//! Implememnt the swap manager with the FIFO page replacement algorithm //! Implememnt the swap manager with the FIFO page replacement algorithm
use alloc::collections::VecDeque;
use super::*; use super::*;
use alloc::collections::VecDeque;
#[derive(Default)] #[derive(Default)]
pub struct FifoSwapManager { pub struct FifoSwapManager {
@ -13,22 +12,29 @@ impl SwapManager for FifoSwapManager {
fn tick(&mut self) {} fn tick(&mut self) {}
fn push(&mut self, frame: Frame) { fn push(&mut self, frame: Frame) {
info!("SwapManager push token: {:x?} vaddr: {:x?}", frame.get_token(), frame.get_virtaddr()); info!(
"SwapManager push token: {:x?} vaddr: {:x?}",
frame.get_token(),
frame.get_virtaddr()
);
self.deque.push_back(frame); self.deque.push_back(frame);
} }
fn remove(&mut self, token: usize, addr: VirtAddr) { fn remove(&mut self, token: usize, addr: VirtAddr) {
info!("SwapManager remove token: {:x?} vaddr: {:x?}", token, addr); info!("SwapManager remove token: {:x?} vaddr: {:x?}", token, addr);
let id = self.deque.iter() let id = self
.deque
.iter()
.position(|ref x| x.get_virtaddr() == addr && x.get_token() == token) .position(|ref x| x.get_virtaddr() == addr && x.get_token() == token)
.expect("address not found"); .expect("address not found");
self.deque.remove(id); self.deque.remove(id);
//info!("SwapManager remove token finished: {:x?} vaddr: {:x?}", token, addr); //info!("SwapManager remove token finished: {:x?} vaddr: {:x?}", token, addr);
} }
fn pop<T, S>(&mut self, _: &mut T, _: &mut S) -> Option<Frame> fn pop<T, S>(&mut self, _: &mut T, _: &mut S) -> Option<Frame>
where T: PageTable, S: Swapper where
T: PageTable,
S: Swapper,
{ {
self.deque.pop_front() self.deque.pop_front()
} }

@ -17,7 +17,7 @@ pub struct MockSwapper {
impl Swapper for MockSwapper { impl Swapper for MockSwapper {
fn swap_out(&mut self, data: &[u8]) -> Result<usize, ()> { fn swap_out(&mut self, data: &[u8]) -> Result<usize, ()> {
let id = self.alloc_id(); let id = self.alloc_id();
let mut slice: [u8; PAGE_SIZE] = unsafe{ uninitialized() }; let mut slice: [u8; PAGE_SIZE] = unsafe { uninitialized() };
slice.copy_from_slice(data); slice.copy_from_slice(data);
self.map.insert(id, slice); self.map.insert(id, slice);
Ok(id) Ok(id)
@ -27,7 +27,7 @@ impl Swapper for MockSwapper {
if !self.map.contains_key(&token) { if !self.map.contains_key(&token) {
return Err(()); return Err(());
} }
let mut slice: [u8; PAGE_SIZE] = unsafe{ uninitialized() }; let mut slice: [u8; PAGE_SIZE] = unsafe { uninitialized() };
slice.copy_from_slice(data); slice.copy_from_slice(data);
self.map.insert(token, slice); self.map.insert(token, slice);
Ok(()) Ok(())
@ -47,7 +47,7 @@ impl MockSwapper {
** @retval usize the allocated location id ** @retval usize the allocated location id
*/ */
fn alloc_id(&self) -> usize { fn alloc_id(&self) -> usize {
(0 .. 100usize).find(|i| !self.map.contains_key(i)).unwrap() (0..100usize).find(|i| !self.map.contains_key(i)).unwrap()
} }
} }
@ -64,8 +64,8 @@ mod test {
#[test] #[test]
fn swap_out_in() { fn swap_out_in() {
let mut swapper = MockSwapper::default(); let mut swapper = MockSwapper::default();
let mut data: [u8; 4096] = unsafe{ uninitialized() }; let mut data: [u8; 4096] = unsafe { uninitialized() };
let data1: [u8; 4096] = unsafe{ uninitialized() }; let data1: [u8; 4096] = unsafe { uninitialized() };
let token = swapper.swap_out(&data1).unwrap(); let token = swapper.swap_out(&data1).unwrap();
swapper.swap_in(token, &mut data).unwrap(); swapper.swap_in(token, &mut data).unwrap();
assert_data_eq(&data, &data1); assert_data_eq(&data, &data1);
@ -74,9 +74,9 @@ mod test {
#[test] #[test]
fn swap_update() { fn swap_update() {
let mut swapper = MockSwapper::default(); let mut swapper = MockSwapper::default();
let mut data: [u8; 4096] = unsafe{ uninitialized() }; let mut data: [u8; 4096] = unsafe { uninitialized() };
let data1: [u8; 4096] = unsafe{ uninitialized() }; let data1: [u8; 4096] = unsafe { uninitialized() };
let data2: [u8; 4096] = unsafe{ uninitialized() }; let data2: [u8; 4096] = unsafe { uninitialized() };
let token = swapper.swap_out(&data1).unwrap(); let token = swapper.swap_out(&data1).unwrap();
swapper.swap_update(token, &data2).unwrap(); swapper.swap_update(token, &data2).unwrap();
swapper.swap_in(token, &mut data).unwrap(); swapper.swap_in(token, &mut data).unwrap();
@ -86,7 +86,7 @@ mod test {
#[test] #[test]
fn invalid_token() { fn invalid_token() {
let mut swapper = MockSwapper::default(); let mut swapper = MockSwapper::default();
let mut data: [u8; 4096] = unsafe{ uninitialized() }; let mut data: [u8; 4096] = unsafe { uninitialized() };
assert_eq!(swapper.swap_in(0, &mut data), Err(())); assert_eq!(swapper.swap_in(0, &mut data), Err(()));
} }
} }

@ -6,9 +6,9 @@
//! Invoke page_fault_handler() on the SwapExt to run the swap process //! Invoke page_fault_handler() on the SwapExt to run the swap process
//! If the method above returns true, a page is swapped in, else do your own things. //! If the method above returns true, a page is swapped in, else do your own things.
use super::*;
use super::paging::*;
use super::addr::Frame; use super::addr::Frame;
use super::paging::*;
use super::*;
use core::ops::{Deref, DerefMut}; use core::ops::{Deref, DerefMut};
//pub use self::fifo::FifoSwapManager; //pub use self::fifo::FifoSwapManager;
@ -52,7 +52,9 @@ pub trait SwapManager {
** @retval Option<Frame> the Frame of the victim page, if present ** @retval Option<Frame> the Frame of the victim page, if present
*/ */
fn pop<T, S>(&mut self, page_table: &mut T, swapper: &mut S) -> Option<Frame> fn pop<T, S>(&mut self, page_table: &mut T, swapper: &mut S) -> Option<Frame>
where T: PageTable, S: Swapper; where
T: PageTable,
S: Swapper;
} }
/// Implement swap in & out execution /// Implement swap in & out execution
@ -107,15 +109,25 @@ impl<T: PageTable, M: SwapManager, S: Swapper> SwapExt<T, M, S> {
** @param pt: *mut T2 the raw pointer for the target page's inactive page table ** @param pt: *mut T2 the raw pointer for the target page's inactive page table
** @param addr: VirtAddr the target page's virtual address ** @param addr: VirtAddr the target page's virtual address
*/ */
pub unsafe fn set_swappable<T2: InactivePageTable>(&mut self, pt: *mut T2, addr: VirtAddr){ pub unsafe fn set_swappable<T2: InactivePageTable>(&mut self, pt: *mut T2, addr: VirtAddr) {
let Self {ref mut page_table, ref mut swap_manager, ..} = self; let Self {
ref mut page_table,
ref mut swap_manager,
..
} = self;
let targetpt = &mut *(pt); let targetpt = &mut *(pt);
let pttoken = { let pttoken = {
info!("SET_SWAPPABLE: the target page table token is {:x?}, addr is {:x?}", targetpt.token(), addr); info!(
"SET_SWAPPABLE: the target page table token is {:x?}, addr is {:x?}",
targetpt.token(),
addr
);
targetpt.token() targetpt.token()
}; };
targetpt.with(||{ targetpt.with(|| {
let entry = page_table.get_entry(addr).expect("failed to get page entry when set swappable"); let entry = page_table
.get_entry(addr)
.expect("failed to get page entry when set swappable");
if entry.present() { if entry.present() {
let frame = Frame::new(pt as usize, addr, pttoken); let frame = Frame::new(pt as usize, addr, pttoken);
swap_manager.push(frame); swap_manager.push(frame);
@ -136,20 +148,33 @@ impl<T: PageTable, M: SwapManager, S: Swapper> SwapExt<T, M, S> {
** @param addr: VirtAddr the target page's virtual address ** @param addr: VirtAddr the target page's virtual address
** @param alloc_frame: the function to alloc a free physical frame for once ** @param alloc_frame: the function to alloc a free physical frame for once
*/ */
pub unsafe fn remove_from_swappable<T2: InactivePageTable>(&mut self, pt: *mut T2, addr: VirtAddr, alloc_frame: impl FnOnce() -> PhysAddr){ pub unsafe fn remove_from_swappable<T2: InactivePageTable>(
&mut self,
pt: *mut T2,
addr: VirtAddr,
alloc_frame: impl FnOnce() -> PhysAddr,
) {
//info!("come into remove_from swappable"); //info!("come into remove_from swappable");
let Self {ref mut page_table, ref mut swap_manager, ref mut swapper} = self; let Self {
ref mut page_table,
ref mut swap_manager,
ref mut swapper,
} = self;
let targetpt = &mut *(pt); let targetpt = &mut *(pt);
let pttoken = { let pttoken = {
info!("SET_UNSWAPPABLE: the target page table token is {:x?}, addr is {:x?}", targetpt.token(), addr); info!(
"SET_UNSWAPPABLE: the target page table token is {:x?}, addr is {:x?}",
targetpt.token(),
addr
);
targetpt.token() targetpt.token()
}; };
//info!("try to change pagetable"); //info!("try to change pagetable");
targetpt.with(||{ targetpt.with(|| {
let token = { let token = {
let entry = page_table.get_entry(addr).unwrap(); let entry = page_table.get_entry(addr).unwrap();
if !entry.swapped() { if !entry.swapped() {
if entry.present(){ if entry.present() {
// if the addr isn't indicating a swapped page, panic occured here // if the addr isn't indicating a swapped page, panic occured here
swap_manager.remove(pttoken, addr); swap_manager.remove(pttoken, addr);
} }
@ -191,7 +216,11 @@ impl<T: PageTable, M: SwapManager, S: Swapper> SwapExt<T, M, S> {
pub fn swap_out_any<T2: InactivePageTable>(&mut self) -> Result<PhysAddr, SwapError> { pub fn swap_out_any<T2: InactivePageTable>(&mut self) -> Result<PhysAddr, SwapError> {
info!("COME in to swap_out_any"); info!("COME in to swap_out_any");
let victim: Option<Frame> = { let victim: Option<Frame> = {
let Self {ref mut page_table, ref mut swap_manager, ref mut swapper} = self; let Self {
ref mut page_table,
ref mut swap_manager,
ref mut swapper,
} = self;
swap_manager.pop(page_table, swapper) swap_manager.pop(page_table, swapper)
}; };
info!("swap out page {}", victim.unwrap().get_virtaddr()); info!("swap out page {}", victim.unwrap().get_virtaddr());
@ -209,14 +238,19 @@ impl<T: PageTable, M: SwapManager, S: Swapper> SwapExt<T, M, S> {
** the error if failed ** the error if failed
*/ */
fn swap_out<T2: InactivePageTable>(&mut self, frame: &Frame) -> Result<PhysAddr, SwapError> { fn swap_out<T2: InactivePageTable>(&mut self, frame: &Frame) -> Result<PhysAddr, SwapError> {
let Self {ref mut page_table, ref mut swapper, ..} = self; let Self {
let ret = unsafe{ ref mut page_table,
ref mut swapper,
..
} = self;
let ret = unsafe {
let pt = &mut *(frame.get_page_table() as *mut T2); let pt = &mut *(frame.get_page_table() as *mut T2);
pt.with(|| { pt.with(|| {
//use core::slice; //use core::slice;
//let data = unsafe { slice::from_raw_parts_mut((frame.virtaddr & !(PAGE_SIZE - 1)) as *mut u8, PAGE_SIZE) }; //let data = unsafe { slice::from_raw_parts_mut((frame.virtaddr & !(PAGE_SIZE - 1)) as *mut u8, PAGE_SIZE) };
let data = page_table.get_page_slice_mut(frame.get_virtaddr()); let data = page_table.get_page_slice_mut(frame.get_virtaddr());
let entry = page_table.get_entry(frame.get_virtaddr()) let entry = page_table
.get_entry(frame.get_virtaddr())
.ok_or(SwapError::NotMapped)?; .ok_or(SwapError::NotMapped)?;
if entry.swapped() { if entry.swapped() {
return Err(SwapError::AlreadySwapped); return Err(SwapError::AlreadySwapped);
@ -242,9 +276,16 @@ impl<T: PageTable, M: SwapManager, S: Swapper> SwapExt<T, M, S> {
** @retval Result<()), SwapError> ** @retval Result<()), SwapError>
** the execute result, and the error if failed ** the execute result, and the error if failed
*/ */
fn swap_in<T2: InactivePageTable>(&mut self, pt: *mut T2, addr: VirtAddr, target: PhysAddr) -> Result<(), SwapError> { fn swap_in<T2: InactivePageTable>(
&mut self,
pt: *mut T2,
addr: VirtAddr,
target: PhysAddr,
) -> Result<(), SwapError> {
info!("come in to swap in"); info!("come in to swap in");
let entry = self.page_table.get_entry(addr) let entry = self
.page_table
.get_entry(addr)
.ok_or(SwapError::NotMapped)?; .ok_or(SwapError::NotMapped)?;
if !entry.swapped() { if !entry.swapped() {
return Err(SwapError::NotSwapped); return Err(SwapError::NotSwapped);
@ -255,10 +296,10 @@ impl<T: PageTable, M: SwapManager, S: Swapper> SwapExt<T, M, S> {
entry.set_present(true); entry.set_present(true);
entry.update(); entry.update();
let data = self.page_table.get_page_slice_mut(addr); let data = self.page_table.get_page_slice_mut(addr);
self.swapper.swap_in(token, data).map_err(|_| SwapError::IOError)?; self.swapper
let pttoken = unsafe{ .swap_in(token, data)
(*pt).token() .map_err(|_| SwapError::IOError)?;
}; let pttoken = unsafe { (*pt).token() };
let frame = Frame::new(pt as usize, addr, pttoken); let frame = Frame::new(pt as usize, addr, pttoken);
; ;
self.swap_manager.push(frame); self.swap_manager.push(frame);
@ -276,11 +317,17 @@ impl<T: PageTable, M: SwapManager, S: Swapper> SwapExt<T, M, S> {
** of beginning of the page ** of beginning of the page
** @retval bool whether swap in happens. ** @retval bool whether swap in happens.
*/ */
pub fn page_fault_handler<T2: InactivePageTable>(&mut self, pt: *mut T2, addr: VirtAddr, swapin: bool, alloc_frame: impl FnOnce() -> PhysAddr) -> bool { pub fn page_fault_handler<T2: InactivePageTable>(
&mut self,
pt: *mut T2,
addr: VirtAddr,
swapin: bool,
alloc_frame: impl FnOnce() -> PhysAddr,
) -> bool {
// handle page delayed allocating // handle page delayed allocating
{ {
info!("try handling delayed frame allocator"); info!("try handling delayed frame allocator");
let need_alloc ={ let need_alloc = {
let entry = self.page_table.get_entry(addr).expect("fail to get entry"); let entry = self.page_table.get_entry(addr).expect("fail to get entry");
//info!("got entry!"); //info!("got entry!");
!entry.present() && !entry.swapped() !entry.present() && !entry.swapped()
@ -311,7 +358,11 @@ impl<T: PageTable, M: SwapManager, S: Swapper> SwapExt<T, M, S> {
match self.page_table.get_entry(addr) { match self.page_table.get_entry(addr) {
// infact the get_entry(addr) should not be None here // infact the get_entry(addr) should not be None here
None => return false, None => return false,
Some(entry) => if !entry.swapped() { return false; }, Some(entry) => {
if !entry.swapped() {
return false;
}
}
} }
// Allocate a frame, if failed, swap out a page // Allocate a frame, if failed, swap out a page
let frame = alloc_frame(); let frame = alloc_frame();

@ -1,9 +1,9 @@
//! entrance to test the communication in processes with solving five philosophers problem //! entrance to test the communication in processes with solving five philosophers problem
mod mutex;
mod monitor; mod monitor;
mod mutex;
fn main() { fn main() {
// mutex::main(); // mutex::main();
monitor::main(); monitor::main();
} }

@ -1,7 +1,7 @@
//! solve the five philosophers problem with monitor //! solve the five philosophers problem with monitor
use std::sync::{Arc, Condvar, Mutex};
use std::thread; use std::thread;
use std::sync::{Mutex, Condvar, Arc};
use std::time::Duration; use std::time::Duration;
struct Philosopher { struct Philosopher {
@ -57,7 +57,13 @@ struct Table {
pub fn main() { pub fn main() {
let table = Arc::new(Table { let table = Arc::new(Table {
fork_status: Mutex::new(vec![false; 5]), fork_status: Mutex::new(vec![false; 5]),
fork_condvar: vec![Condvar::new(), Condvar::new(), Condvar::new(), Condvar::new(), Condvar::new()], fork_condvar: vec![
Condvar::new(),
Condvar::new(),
Condvar::new(),
Condvar::new(),
Condvar::new(),
],
}); });
let philosophers = vec![ let philosophers = vec![
@ -68,7 +74,9 @@ pub fn main() {
Philosopher::new("5", 0, 4), Philosopher::new("5", 0, 4),
]; ];
let handles: Vec<_> = philosophers.into_iter().map(|p| { let handles: Vec<_> = philosophers
.into_iter()
.map(|p| {
let table = table.clone(); let table = table.clone();
thread::spawn(move || { thread::spawn(move || {
@ -77,7 +85,8 @@ pub fn main() {
p.eat(&table); p.eat(&table);
} }
}) })
}).collect(); })
.collect();
for h in handles { for h in handles {
h.join().unwrap(); h.join().unwrap();

@ -1,7 +1,7 @@
//! solve the five philosophers problem with mutex //! solve the five philosophers problem with mutex
use std::sync::{Arc, Mutex};
use std::thread; use std::thread;
use std::sync::{Mutex, Arc};
use std::time::Duration; use std::time::Duration;
struct Philosopher { struct Philosopher {
@ -46,7 +46,7 @@ pub fn main() {
Mutex::new(()), Mutex::new(()),
Mutex::new(()), Mutex::new(()),
Mutex::new(()), Mutex::new(()),
] ],
}); });
let philosophers = vec![ let philosophers = vec![
@ -57,7 +57,9 @@ pub fn main() {
Philosopher::new("5", 0, 4), Philosopher::new("5", 0, 4),
]; ];
let handles: Vec<_> = philosophers.into_iter().map(|p| { let handles: Vec<_> = philosophers
.into_iter()
.map(|p| {
let table = table.clone(); let table = table.clone();
thread::spawn(move || { thread::spawn(move || {
@ -66,7 +68,8 @@ pub fn main() {
p.eat(&table); p.eat(&table);
} }
}) })
}).collect(); })
.collect();
for h in handles { for h in handles {
h.join().unwrap(); h.join().unwrap();

@ -1,2 +1 @@
fn main() { fn main() {}
}

@ -8,12 +8,12 @@
extern crate alloc; extern crate alloc;
mod thread_pool; mod interrupt;
mod processor; mod processor;
pub mod scheduler; pub mod scheduler;
pub mod std_thread; pub mod std_thread;
mod thread_pool;
mod timer; mod timer;
mod interrupt;
pub use crate::thread_pool::*;
pub use crate::processor::Processor; pub use crate::processor::Processor;
pub use crate::thread_pool::*;

@ -1,9 +1,9 @@
use crate::interrupt;
use crate::thread_pool::*;
use alloc::boxed::Box; use alloc::boxed::Box;
use alloc::sync::Arc; use alloc::sync::Arc;
use log::*;
use core::cell::UnsafeCell; use core::cell::UnsafeCell;
use crate::thread_pool::*; use log::*;
use crate::interrupt;
/// Thread executor /// Thread executor
/// ///
@ -25,7 +25,9 @@ struct ProcessorInner {
impl Processor { impl Processor {
pub const fn new() -> Self { pub const fn new() -> Self {
Processor { inner: UnsafeCell::new(None) } Processor {
inner: UnsafeCell::new(None),
}
} }
pub unsafe fn init(&self, id: usize, context: Box<Context>, manager: Arc<ThreadPool>) { pub unsafe fn init(&self, id: usize, context: Box<Context>, manager: Arc<ThreadPool>) {
@ -38,7 +40,8 @@ impl Processor {
} }
fn inner(&self) -> &mut ProcessorInner { fn inner(&self) -> &mut ProcessorInner {
unsafe { &mut *self.inner.get() }.as_mut() unsafe { &mut *self.inner.get() }
.as_mut()
.expect("Processor is not initialized") .expect("Processor is not initialized")
} }
@ -51,22 +54,30 @@ impl Processor {
/// via switch back to the scheduler. /// via switch back to the scheduler.
pub fn run(&self) -> ! { pub fn run(&self) -> ! {
let inner = self.inner(); let inner = self.inner();
unsafe { interrupt::disable_and_store(); } unsafe {
interrupt::disable_and_store();
}
loop { loop {
if let Some(proc) = inner.manager.run(inner.id) { if let Some(proc) = inner.manager.run(inner.id) {
trace!("CPU{} begin running thread {}", inner.id, proc.0); trace!("CPU{} begin running thread {}", inner.id, proc.0);
inner.proc = Some(proc); inner.proc = Some(proc);
unsafe { unsafe {
inner.loop_context.switch_to(&mut *inner.proc.as_mut().unwrap().1); inner
.loop_context
.switch_to(&mut *inner.proc.as_mut().unwrap().1);
} }
let (tid, context) = inner.proc.take().unwrap(); let (tid, context) = inner.proc.take().unwrap();
trace!("CPU{} stop running thread {}", inner.id, tid); trace!("CPU{} stop running thread {}", inner.id, tid);
inner.manager.stop(tid, context); inner.manager.stop(tid, context);
} else { } else {
trace!("CPU{} idle", inner.id); trace!("CPU{} idle", inner.id);
unsafe { interrupt::enable_and_wfi(); } unsafe {
interrupt::enable_and_wfi();
}
// wait for a timer interrupt // wait for a timer interrupt
unsafe { interrupt::disable_and_store(); } unsafe {
interrupt::disable_and_store();
}
} }
} }
} }
@ -79,7 +90,12 @@ impl Processor {
let inner = self.inner(); let inner = self.inner();
unsafe { unsafe {
let flags = interrupt::disable_and_store(); let flags = interrupt::disable_and_store();
inner.proc.as_mut().unwrap().1.switch_to(&mut *inner.loop_context); inner
.proc
.as_mut()
.unwrap()
.1
.switch_to(&mut *inner.loop_context);
interrupt::restore(flags); interrupt::restore(flags);
} }
} }

@ -36,7 +36,9 @@ impl RRScheduler {
max_time_slice, max_time_slice,
infos: Vec::default(), infos: Vec::default(),
}; };
RRScheduler { inner: Mutex::new(inner) } RRScheduler {
inner: Mutex::new(inner),
}
} }
} }
@ -63,7 +65,7 @@ impl RRSchedulerInner {
self.infos[tid].present = false; self.infos[tid].present = false;
self._list_remove(tid); self._list_remove(tid);
Some(tid - 1) Some(tid - 1)
}, }
}; };
trace!("rr pop {:?}", ret); trace!("rr pop {:?}", ret);
ret ret

@ -62,7 +62,9 @@ impl StrideScheduler {
infos: Vec::default(), infos: Vec::default(),
queue: BinaryHeap::default(), queue: BinaryHeap::default(),
}; };
StrideScheduler { inner: Mutex::new(inner) } StrideScheduler {
inner: Mutex::new(inner),
}
} }
} }

@ -6,12 +6,12 @@
//! - `processor`: Get a reference of the current `Processor` //! - `processor`: Get a reference of the current `Processor`
//! - `new_kernel_context`: Construct a `Context` of the new kernel thread //! - `new_kernel_context`: Construct a `Context` of the new kernel thread
use crate::processor::*;
use crate::thread_pool::*;
use alloc::boxed::Box; use alloc::boxed::Box;
use core::marker::PhantomData; use core::marker::PhantomData;
use core::time::Duration; use core::time::Duration;
use log::*; use log::*;
use crate::processor::*;
use crate::thread_pool::*;
#[linkage = "weak"] #[linkage = "weak"]
#[no_mangle] #[no_mangle]
@ -23,14 +23,15 @@ fn processor() -> &'static Processor {
#[linkage = "weak"] #[linkage = "weak"]
#[no_mangle] #[no_mangle]
/// Construct a `Context` of the new kernel thread /// Construct a `Context` of the new kernel thread
fn new_kernel_context(_entry: extern fn(usize) -> !, _arg: usize) -> Box<Context> { fn new_kernel_context(_entry: extern "C" fn(usize) -> !, _arg: usize) -> Box<Context> {
unimplemented!("thread: Please implement and export `new_kernel_context`") unimplemented!("thread: Please implement and export `new_kernel_context`")
} }
/// Gets a handle to the thread that invokes it. /// Gets a handle to the thread that invokes it.
pub fn current() -> Thread { pub fn current() -> Thread {
Thread { tid: processor().tid() } Thread {
tid: processor().tid(),
}
} }
/// Puts the current thread to sleep for the specified amount of time. /// Puts the current thread to sleep for the specified amount of time.
@ -50,7 +51,7 @@ pub fn sleep(dur: Duration) {
/// `F`: Type of the function `f` /// `F`: Type of the function `f`
/// `T`: Type of the return value of `f` /// `T`: Type of the return value of `f`
pub fn spawn<F, T>(f: F) -> JoinHandle<T> pub fn spawn<F, T>(f: F) -> JoinHandle<T>
where where
F: Send + 'static + FnOnce() -> T, F: Send + 'static + FnOnce() -> T,
T: Send + 'static, T: Send + 'static,
{ {
@ -69,7 +70,7 @@ pub fn spawn<F, T>(f: F) -> JoinHandle<T>
// //
// 注意到它具有泛型参数因此对每一次spawn调用 // 注意到它具有泛型参数因此对每一次spawn调用
// 由于F类型是独特的因此都会生成一个新的kernel_thread_entry // 由于F类型是独特的因此都会生成一个新的kernel_thread_entry
extern fn kernel_thread_entry<F, T>(f: usize) -> ! extern "C" fn kernel_thread_entry<F, T>(f: usize) -> !
where where
F: Send + 'static + FnOnce() -> T, F: Send + 'static + FnOnce() -> T,
T: Send + 'static, T: Send + 'static,

@ -1,9 +1,9 @@
use crate::scheduler::Scheduler;
use crate::timer::Timer;
use alloc::boxed::Box; use alloc::boxed::Box;
use alloc::vec::Vec; use alloc::vec::Vec;
use spin::{Mutex, MutexGuard};
use log::*; use log::*;
use crate::scheduler::Scheduler; use spin::{Mutex, MutexGuard};
use crate::timer::Timer;
struct Thread { struct Thread {
status: Status, status: Status,
@ -105,8 +105,7 @@ impl ThreadPool {
/// The manager first mark it `Running`, /// The manager first mark it `Running`,
/// then take out and return its Context. /// then take out and return its Context.
pub(crate) fn run(&self, cpu_id: usize) -> Option<(Tid, Box<Context>)> { pub(crate) fn run(&self, cpu_id: usize) -> Option<(Tid, Box<Context>)> {
self.scheduler.pop(cpu_id) self.scheduler.pop(cpu_id).map(|tid| {
.map(|tid| {
let mut proc_lock = self.threads[tid].lock(); let mut proc_lock = self.threads[tid].lock();
let mut proc = proc_lock.as_mut().expect("thread not exist"); let mut proc = proc_lock.as_mut().expect("thread not exist");
proc.status = Status::Running(cpu_id); proc.status = Status::Running(cpu_id);
@ -175,7 +174,7 @@ impl ThreadPool {
// release the tid // release the tid
*proc_lock = None; *proc_lock = None;
Some(code) Some(code)
}, }
_ => None, _ => None,
} }
} }

@ -45,7 +45,7 @@ impl<T: PartialEq> Timer<T> {
let time = self.tick + time_after; let time = self.tick + time_after;
let event = Event { time, data }; let event = Event { time, data };
let mut it = self.timers.iter(); let mut it = self.timers.iter();
let mut i : usize = 0; let mut i: usize = 0;
loop { loop {
match it.next() { match it.next() {
None => break, None => break,

@ -14,12 +14,9 @@ fn main() {
"x86_64" => { "x86_64" => {
gen_vector_asm().unwrap(); gen_vector_asm().unwrap();
} }
"riscv32" => { "riscv32" => {}
} "riscv64" => {}
"riscv64" => { "aarch64" => {}
}
"aarch64" => {
}
_ => panic!("Unknown arch {}", arch), _ => panic!("Unknown arch {}", arch),
} }
} }

@ -57,16 +57,10 @@ impl ColorBuffer {
unsafe { unsafe {
match color_depth { match color_depth {
ColorDepth16 => ColorBuffer { ColorDepth16 => ColorBuffer {
buf16: core::slice::from_raw_parts_mut( buf16: core::slice::from_raw_parts_mut(base_addr as *mut u16, size / 2),
base_addr as *mut u16,
size / 2,
),
}, },
ColorDepth32 => ColorBuffer { ColorDepth32 => ColorBuffer {
buf32: core::slice::from_raw_parts_mut( buf32: core::slice::from_raw_parts_mut(base_addr as *mut u32, size / 4),
base_addr as *mut u32,
size / 4,
),
}, },
} }
} }

@ -3,12 +3,12 @@
//! (ref: https://github.com/raspberrypi/firmware/wiki/Mailbox-property-interface) //! (ref: https://github.com/raspberrypi/firmware/wiki/Mailbox-property-interface)
use super::fb::FramebufferInfo; use super::fb::FramebufferInfo;
use bcm2837::mailbox::{Mailbox, MailboxChannel}; use aarch64::asm;
use lazy_static::lazy_static;
use alloc::string::String; use alloc::string::String;
use bcm2837::mailbox::{Mailbox, MailboxChannel};
use core::mem; use core::mem;
use lazy_static::lazy_static;
use spin::Mutex; use spin::Mutex;
use aarch64::asm;
lazy_static! { lazy_static! {
static ref MAILBOX: Mutex<Mailbox> = Mutex::new(Mailbox::new()); static ref MAILBOX: Mutex<Mailbox> = Mutex::new(Mailbox::new());
@ -268,7 +268,10 @@ pub fn framebuffer_get_depth() -> PropertyMailboxResult<u32> {
/// Set virtual offset. Returns `(X, Y)` in pixel. /// Set virtual offset. Returns `(X, Y)` in pixel.
/// The response may not be the same as the request so it must be checked. /// The response may not be the same as the request so it must be checked.
/// May be the previous offset or 0 for unsupported. /// May be the previous offset or 0 for unsupported.
pub fn framebuffer_set_virtual_offset(xoffset: u32, yoffset: u32) -> PropertyMailboxResult<(u32, u32)> { pub fn framebuffer_set_virtual_offset(
xoffset: u32,
yoffset: u32,
) -> PropertyMailboxResult<(u32, u32)> {
let ret = send_one_tag!( let ret = send_one_tag!(
RPI_FIRMWARE_FRAMEBUFFER_SET_VIRTUAL_OFFSET, RPI_FIRMWARE_FRAMEBUFFER_SET_VIRTUAL_OFFSET,
[xoffset, yoffset] [xoffset, yoffset]
@ -278,7 +281,11 @@ pub fn framebuffer_set_virtual_offset(xoffset: u32, yoffset: u32) -> PropertyMai
/// Allocate framebuffer on GPU and try to set width/height/depth. /// Allocate framebuffer on GPU and try to set width/height/depth.
/// Returns `FramebufferInfo`. /// Returns `FramebufferInfo`.
pub fn framebuffer_alloc(width: u32, height: u32, depth: u32) -> PropertyMailboxResult<FramebufferInfo> { pub fn framebuffer_alloc(
width: u32,
height: u32,
depth: u32,
) -> PropertyMailboxResult<FramebufferInfo> {
#[repr(C, packed)] #[repr(C, packed)]
#[derive(Debug)] #[derive(Debug)]
struct FramebufferAllocTag { struct FramebufferAllocTag {

@ -1,13 +1,13 @@
//! Raspberry PI 3 Model B/B+ //! Raspberry PI 3 Model B/B+
use once::*;
use bcm2837::atags::Atags; use bcm2837::atags::Atags;
use once::*;
pub mod fb; pub mod fb;
pub mod irq; pub mod irq;
pub mod timer;
pub mod serial;
pub mod mailbox; pub mod mailbox;
pub mod serial;
pub mod timer;
pub const IO_REMAP_BASE: usize = bcm2837::consts::IO_BASE; pub const IO_REMAP_BASE: usize = bcm2837::consts::IO_BASE;
pub const IO_REMAP_END: usize = bcm2837::consts::KERNEL_OFFSET + 0x4000_1000; pub const IO_REMAP_END: usize = bcm2837::consts::KERNEL_OFFSET + 0x4000_1000;

@ -1,8 +1,8 @@
use bcm2837::mini_uart::{MiniUart, MiniUartInterruptId}; use bcm2837::mini_uart::{MiniUart, MiniUartInterruptId};
use lazy_static::lazy_static;
use core::fmt; use core::fmt;
use spin::Mutex; use lazy_static::lazy_static;
use once::*; use once::*;
use spin::Mutex;
/// Struct to get a global SerialPort interface /// Struct to get a global SerialPort interface
pub struct SerialPort { pub struct SerialPort {

File diff suppressed because it is too large Load Diff

@ -10,7 +10,7 @@ use spin::Mutex;
use crate::util::escape_parser::{CharacterAttribute, EscapeParser}; use crate::util::escape_parser::{CharacterAttribute, EscapeParser};
use super::fb::{ColorDepth::*, FRAME_BUFFER, FramebufferInfo}; use super::fb::{ColorDepth::*, FramebufferInfo, FRAME_BUFFER};
use self::color::FramebufferColor; use self::color::FramebufferColor;
use self::fonts::{Font, Font8x16}; use self::fonts::{Font, Font8x16};
@ -67,10 +67,7 @@ impl<F: Font> ConsoleBuffer<F> {
ch.attr.foreground.pack16() as u32, ch.attr.foreground.pack16() as u32,
ch.attr.background.pack16() as u32, ch.attr.background.pack16() as u32,
), ),
ColorDepth32 => ( ColorDepth32 => (ch.attr.foreground.pack32(), ch.attr.background.pack32()),
ch.attr.foreground.pack32(),
ch.attr.background.pack32(),
),
}; };
if ch.attr.reverse { if ch.attr.reverse {
core::mem::swap(&mut foreground, &mut background); core::mem::swap(&mut foreground, &mut background);
@ -87,7 +84,10 @@ impl<F: Font> ConsoleBuffer<F> {
}; };
for y in 0..F::HEIGHT { for y in 0..F::HEIGHT {
for x in 0..F::WIDTH { for x in 0..F::WIDTH {
let pixel = if y == underline_y || y == strikethrough_y || F::get(ch.ascii_char, x, y) { let pixel = if y == underline_y
|| y == strikethrough_y
|| F::get(ch.ascii_char, x, y)
{
foreground foreground
} else { } else {
background background

@ -1,11 +1,11 @@
//! TrapFrame and context definitions for aarch64. //! TrapFrame and context definitions for aarch64.
use spin::Mutex;
use lazy_static::lazy_static;
use aarch64::barrier;
use aarch64::addr::PhysAddr; use aarch64::addr::PhysAddr;
use aarch64::paging::PhysFrame;
use aarch64::asm::{tlb_invalidate_all, ttbr_el1_read, ttbr_el1_write_asid}; use aarch64::asm::{tlb_invalidate_all, ttbr_el1_read, ttbr_el1_write_asid};
use aarch64::barrier;
use aarch64::paging::PhysFrame;
use lazy_static::lazy_static;
use spin::Mutex;
#[repr(C)] #[repr(C)]
#[derive(Default, Debug, Copy, Clone)] #[derive(Default, Debug, Copy, Clone)]
@ -23,7 +23,7 @@ pub struct TrapFrame {
/// 用于在内核栈中构造新线程的中断帧 /// 用于在内核栈中构造新线程的中断帧
impl TrapFrame { impl TrapFrame {
fn new_kernel_thread(entry: extern fn(usize) -> !, arg: usize, sp: usize) -> Self { fn new_kernel_thread(entry: extern "C" fn(usize) -> !, arg: usize, sp: usize) -> Self {
use core::mem::zeroed; use core::mem::zeroed;
let mut tf: Self = unsafe { zeroed() }; let mut tf: Self = unsafe { zeroed() };
tf.x0 = arg; tf.x0 = arg;
@ -65,7 +65,7 @@ impl InitStack {
} }
} }
extern { extern "C" {
fn __trapret(); fn __trapret();
} }
@ -78,7 +78,10 @@ struct ContextData {
impl ContextData { impl ContextData {
fn new() -> Self { fn new() -> Self {
ContextData { lr: __trapret as usize, ..ContextData::default() } ContextData {
lr: __trapret as usize,
..ContextData::default()
}
} }
} }
@ -99,7 +102,7 @@ impl Context {
/// Pop all callee-saved registers, then return to the target. /// Pop all callee-saved registers, then return to the target.
#[naked] #[naked]
#[inline(never)] #[inline(never)]
unsafe extern fn __switch(_self_stack: &mut usize, _target_stack: &mut usize) { unsafe extern "C" fn __switch(_self_stack: &mut usize, _target_stack: &mut usize) {
asm!( asm!(
" "
mov x10, #-(12 * 8) mov x10, #-(12 * 8)
@ -144,17 +147,30 @@ impl Context {
} }
} }
pub unsafe fn new_kernel_thread(entry: extern fn(usize) -> !, arg: usize, kstack_top: usize, ttbr: usize) -> Self { pub unsafe fn new_kernel_thread(
entry: extern "C" fn(usize) -> !,
arg: usize,
kstack_top: usize,
ttbr: usize,
) -> Self {
InitStack { InitStack {
context: ContextData::new(), context: ContextData::new(),
tf: TrapFrame::new_kernel_thread(entry, arg, kstack_top), tf: TrapFrame::new_kernel_thread(entry, arg, kstack_top),
}.push_at(kstack_top, ttbr)
} }
pub unsafe fn new_user_thread(entry_addr: usize, ustack_top: usize, kstack_top: usize, _is32: bool, ttbr: usize) -> Self { .push_at(kstack_top, ttbr)
}
pub unsafe fn new_user_thread(
entry_addr: usize,
ustack_top: usize,
kstack_top: usize,
_is32: bool,
ttbr: usize,
) -> Self {
InitStack { InitStack {
context: ContextData::new(), context: ContextData::new(),
tf: TrapFrame::new_user_thread(entry_addr, ustack_top), tf: TrapFrame::new_user_thread(entry_addr, ustack_top),
}.push_at(kstack_top, ttbr) }
.push_at(kstack_top, ttbr)
} }
pub unsafe fn new_fork(tf: &TrapFrame, kstack_top: usize, ttbr: usize) -> Self { pub unsafe fn new_fork(tf: &TrapFrame, kstack_top: usize, ttbr: usize) -> Self {
InitStack { InitStack {
@ -164,9 +180,16 @@ impl Context {
tf.x0 = 0; tf.x0 = 0;
tf tf
}, },
}.push_at(kstack_top, ttbr)
} }
pub unsafe fn new_clone(tf: &TrapFrame, ustack_top: usize, kstack_top: usize, ttbr: usize, tls: usize) -> Self { .push_at(kstack_top, ttbr)
}
pub unsafe fn new_clone(
tf: &TrapFrame,
ustack_top: usize,
kstack_top: usize,
ttbr: usize,
tls: usize,
) -> Self {
InitStack { InitStack {
context: ContextData::new(), context: ContextData::new(),
tf: { tf: {
@ -176,14 +199,14 @@ impl Context {
tf.x0 = 0; tf.x0 = 0;
tf tf
}, },
}.push_at(kstack_top, ttbr) }
.push_at(kstack_top, ttbr)
} }
/// Called at a new user context /// Called at a new user context
/// To get the init TrapFrame in sys_exec /// To get the init TrapFrame in sys_exec
pub unsafe fn get_init_tf(&self) -> TrapFrame { pub unsafe fn get_init_tf(&self) -> TrapFrame {
(*(self.stack_top as *const InitStack)).tf.clone() (*(self.stack_top as *const InitStack)).tf.clone()
} }
} }
const ASID_MASK: u16 = 0xffff; const ASID_MASK: u16 = 0xffff;
@ -199,7 +222,10 @@ struct AsidAllocator(Asid);
impl AsidAllocator { impl AsidAllocator {
fn new() -> Self { fn new() -> Self {
AsidAllocator(Asid { value: 0, generation: 1 }) AsidAllocator(Asid {
value: 0,
generation: 1,
})
} }
fn alloc(&mut self, old_asid: Asid) -> Asid { fn alloc(&mut self, old_asid: Asid) -> Asid {

@ -1,8 +1,8 @@
//! Trap handler //! Trap handler
use crate::arch::board::irq::handle_irq;
use super::context::TrapFrame; use super::context::TrapFrame;
use super::syndrome::{Fault, Syndrome}; use super::syndrome::{Fault, Syndrome};
use crate::arch::board::irq::handle_irq;
use aarch64::regs::*; use aarch64::regs::*;
use log::*; use log::*;

@ -1,7 +1,7 @@
//! Interrupt and exception for aarch64. //! Interrupt and exception for aarch64.
mod handler;
mod context; mod context;
mod handler;
mod syndrome; mod syndrome;
use aarch64::regs::*; use aarch64::regs::*;

@ -1,7 +1,7 @@
//! Input/output for aarch64. //! Input/output for aarch64.
use super::driver::serial::*;
use super::driver::console::CONSOLE; use super::driver::console::CONSOLE;
use super::driver::serial::*;
use core::fmt::{Arguments, Write}; use core::fmt::{Arguments, Write};
pub fn getchar() -> char { pub fn getchar() -> char {

@ -1,8 +1,8 @@
//! Memory initialization for aarch64. //! Memory initialization for aarch64.
use crate::memory::{init_heap, Linear, MemoryAttr, MemorySet, FRAME_ALLOCATOR};
use crate::consts::{MEMORY_OFFSET, KERNEL_OFFSET};
use super::paging::MMIOType; use super::paging::MMIOType;
use crate::consts::{KERNEL_OFFSET, MEMORY_OFFSET};
use crate::memory::{init_heap, Linear, MemoryAttr, MemorySet, FRAME_ALLOCATOR};
use aarch64::regs::*; use aarch64::regs::*;
use log::*; use log::*;
use rcore_memory::PAGE_SIZE; use rcore_memory::PAGE_SIZE;
@ -19,7 +19,9 @@ fn init_frame_allocator() {
use bit_allocator::BitAlloc; use bit_allocator::BitAlloc;
use core::ops::Range; use core::ops::Range;
let end = super::board::probe_memory().expect("failed to find memory map").1; let end = super::board::probe_memory()
.expect("failed to find memory map")
.1;
let start = (_end as u64 + PAGE_SIZE as u64).wrapping_sub(KERNEL_OFFSET as u64) as usize; let start = (_end as u64 + PAGE_SIZE as u64).wrapping_sub(KERNEL_OFFSET as u64) as usize;
let mut ba = FRAME_ALLOCATOR.lock(); let mut ba = FRAME_ALLOCATOR.lock();
ba.insert(to_range(start, end)); ba.insert(to_range(start, end));
@ -39,14 +41,50 @@ static mut KERNEL_MEMORY_SET: Option<MemorySet> = None;
fn remap_the_kernel() { fn remap_the_kernel() {
let offset = -(KERNEL_OFFSET as isize); let offset = -(KERNEL_OFFSET as isize);
let mut ms = MemorySet::new_bare(); let mut ms = MemorySet::new_bare();
ms.push(stext as usize, etext as usize, MemoryAttr::default().execute().readonly(), Linear::new(offset), "text"); ms.push(
ms.push(sdata as usize, edata as usize, MemoryAttr::default(), Linear::new(offset), "data"); stext as usize,
ms.push(srodata as usize, erodata as usize, MemoryAttr::default().readonly(), Linear::new(offset), "rodata"); etext as usize,
ms.push(sbss as usize, ebss as usize, MemoryAttr::default(), Linear::new(offset), "bss"); MemoryAttr::default().execute().readonly(),
ms.push(bootstack as usize, bootstacktop as usize, MemoryAttr::default(), Linear::new(offset), "kstack"); Linear::new(offset),
"text",
);
ms.push(
sdata as usize,
edata as usize,
MemoryAttr::default(),
Linear::new(offset),
"data",
);
ms.push(
srodata as usize,
erodata as usize,
MemoryAttr::default().readonly(),
Linear::new(offset),
"rodata",
);
ms.push(
sbss as usize,
ebss as usize,
MemoryAttr::default(),
Linear::new(offset),
"bss",
);
ms.push(
bootstack as usize,
bootstacktop as usize,
MemoryAttr::default(),
Linear::new(offset),
"kstack",
);
use super::board::{IO_REMAP_BASE, IO_REMAP_END}; use super::board::{IO_REMAP_BASE, IO_REMAP_END};
ms.push(IO_REMAP_BASE, IO_REMAP_END, MemoryAttr::default().mmio(MMIOType::Device as u8), Linear::new(offset), "io_remap"); ms.push(
IO_REMAP_BASE,
IO_REMAP_END,
MemoryAttr::default().mmio(MMIOType::Device as u8),
Linear::new(offset),
"io_remap",
);
info!("{:#x?}", ms); info!("{:#x?}", ms);
unsafe { ms.get_page_table_mut().activate_as_kernel() } unsafe { ms.get_page_table_mut().activate_as_kernel() }
@ -58,7 +96,13 @@ pub fn ioremap(paddr: usize, len: usize, name: &'static str) -> usize {
let offset = -(KERNEL_OFFSET as isize); let offset = -(KERNEL_OFFSET as isize);
let vaddr = paddr.wrapping_add(KERNEL_OFFSET); let vaddr = paddr.wrapping_add(KERNEL_OFFSET);
if let Some(ms) = unsafe { KERNEL_MEMORY_SET.as_mut() } { if let Some(ms) = unsafe { KERNEL_MEMORY_SET.as_mut() } {
ms.push(vaddr, vaddr + len, MemoryAttr::default().mmio(MMIOType::NormalNonCacheable as u8), Linear::new(offset), name); ms.push(
vaddr,
vaddr + len,
MemoryAttr::default().mmio(MMIOType::NormalNonCacheable as u8),
Linear::new(offset),
name,
);
return vaddr; return vaddr;
} }
0 0

@ -1,15 +1,15 @@
//! Entrance and initialization for aarch64. //! Entrance and initialization for aarch64.
pub mod io;
pub mod paging;
pub mod memory;
pub mod interrupt;
pub mod consts; pub mod consts;
pub mod cpu; pub mod cpu;
pub mod driver; pub mod driver;
pub mod timer; pub mod interrupt;
pub mod syscall; pub mod io;
pub mod memory;
pub mod paging;
pub mod rand; pub mod rand;
pub mod syscall;
pub mod timer;
#[cfg(feature = "board_raspi3")] #[cfg(feature = "board_raspi3")]
#[path = "board/raspi3/mod.rs"] #[path = "board/raspi3/mod.rs"]

@ -1,11 +1,13 @@
//! Page table implementations for aarch64. //! Page table implementations for aarch64.
use rcore_memory::paging::*;
use aarch64::asm::{tlb_invalidate, tlb_invalidate_all, ttbr_el1_read, ttbr_el1_write}; use aarch64::asm::{tlb_invalidate, tlb_invalidate_all, ttbr_el1_read, ttbr_el1_write};
use aarch64::{PhysAddr, VirtAddr};
use aarch64::paging::{Mapper, PageTable as Aarch64PageTable, PageTableEntry, PageTableFlags as EF, RecursivePageTable};
use aarch64::paging::{FrameAllocator, FrameDeallocator, Page, PhysFrame as Frame, Size4KiB};
use aarch64::paging::memory_attribute::*; use aarch64::paging::memory_attribute::*;
use aarch64::paging::{FrameAllocator, FrameDeallocator, Page, PhysFrame as Frame, Size4KiB};
use aarch64::paging::{
Mapper, PageTable as Aarch64PageTable, PageTableEntry, PageTableFlags as EF, RecursivePageTable,
};
use aarch64::{PhysAddr, VirtAddr};
use log::*; use log::*;
use rcore_memory::paging::*;
// Depends on kernel // Depends on kernel
use crate::consts::{KERNEL_OFFSET, KERNEL_PML4, RECURSIVE_INDEX}; use crate::consts::{KERNEL_OFFSET, KERNEL_PML4, RECURSIVE_INDEX};
use crate::memory::{active_table, alloc_frame, dealloc_frame}; use crate::memory::{active_table, alloc_frame, dealloc_frame};
@ -18,8 +20,16 @@ impl PageTable for ActivePageTable {
fn map(&mut self, addr: usize, target: usize) -> &mut Entry { fn map(&mut self, addr: usize, target: usize) -> &mut Entry {
let flags = EF::default(); let flags = EF::default();
let attr = MairNormal::attr_value(); let attr = MairNormal::attr_value();
self.0.map_to(Page::of_addr(addr as u64), Frame::of_addr(target as u64), flags, attr, &mut FrameAllocatorForAarch64) self.0
.unwrap().flush(); .map_to(
Page::of_addr(addr as u64),
Frame::of_addr(target as u64),
flags,
attr,
&mut FrameAllocatorForAarch64,
)
.unwrap()
.flush();
self.get_entry(addr).expect("fail to get entry") self.get_entry(addr).expect("fail to get entry")
} }
@ -30,7 +40,8 @@ impl PageTable for ActivePageTable {
fn get_entry(&mut self, vaddr: usize) -> Option<&mut Entry> { fn get_entry(&mut self, vaddr: usize) -> Option<&mut Entry> {
// get p1 entry // get p1 entry
let entry_addr = ((vaddr >> 9) & 0o777_777_777_7770) | (RECURSIVE_INDEX << 39) | (vaddr & KERNEL_OFFSET); let entry_addr =
((vaddr >> 9) & 0o777_777_777_7770) | (RECURSIVE_INDEX << 39) | (vaddr & KERNEL_OFFSET);
Some(unsafe { &mut *(entry_addr as *mut PageEntry) }) Some(unsafe { &mut *(entry_addr as *mut PageEntry) })
} }
} }
@ -39,12 +50,12 @@ impl PageTableExt for ActivePageTable {
const TEMP_PAGE_ADDR: usize = KERNEL_OFFSET | 0xcafeb000; const TEMP_PAGE_ADDR: usize = KERNEL_OFFSET | 0xcafeb000;
} }
const ROOT_PAGE_TABLE: *mut Aarch64PageTable = const ROOT_PAGE_TABLE: *mut Aarch64PageTable = (KERNEL_OFFSET
(KERNEL_OFFSET | | (RECURSIVE_INDEX << 39)
(RECURSIVE_INDEX << 39) | | (RECURSIVE_INDEX << 30)
(RECURSIVE_INDEX << 30) | | (RECURSIVE_INDEX << 21)
(RECURSIVE_INDEX << 21) | | (RECURSIVE_INDEX << 12))
(RECURSIVE_INDEX << 12)) as *mut Aarch64PageTable; as *mut Aarch64PageTable;
impl ActivePageTable { impl ActivePageTable {
pub unsafe fn new() -> Self { pub unsafe fn new() -> Self {
@ -66,38 +77,63 @@ impl Entry for PageEntry {
tlb_invalidate(addr); tlb_invalidate(addr);
} }
fn present(&self) -> bool { self.0.flags().contains(EF::VALID) } fn present(&self) -> bool {
fn accessed(&self) -> bool { self.0.flags().contains(EF::AF) } self.0.flags().contains(EF::VALID)
fn writable(&self) -> bool { self.0.flags().contains(EF::WRITE) } }
fn dirty(&self) -> bool { self.hw_dirty() && self.sw_dirty() } fn accessed(&self) -> bool {
self.0.flags().contains(EF::AF)
}
fn writable(&self) -> bool {
self.0.flags().contains(EF::WRITE)
}
fn dirty(&self) -> bool {
self.hw_dirty() && self.sw_dirty()
}
fn clear_accessed(&mut self) { self.as_flags().remove(EF::AF); } fn clear_accessed(&mut self) {
fn clear_dirty(&mut self) self.as_flags().remove(EF::AF);
{ }
fn clear_dirty(&mut self) {
self.as_flags().remove(EF::DIRTY); self.as_flags().remove(EF::DIRTY);
self.as_flags().insert(EF::AP_RO); self.as_flags().insert(EF::AP_RO);
} }
fn set_writable(&mut self, value: bool) fn set_writable(&mut self, value: bool) {
{
self.as_flags().set(EF::AP_RO, !value); self.as_flags().set(EF::AP_RO, !value);
self.as_flags().set(EF::WRITE, value); self.as_flags().set(EF::WRITE, value);
} }
fn set_present(&mut self, value: bool) { self.as_flags().set(EF::VALID, value); } fn set_present(&mut self, value: bool) {
fn target(&self) -> usize { self.0.addr().as_u64() as usize } self.as_flags().set(EF::VALID, value);
}
fn target(&self) -> usize {
self.0.addr().as_u64() as usize
}
fn set_target(&mut self, target: usize) { fn set_target(&mut self, target: usize) {
self.0.modify_addr(PhysAddr::new(target as u64)); self.0.modify_addr(PhysAddr::new(target as u64));
} }
fn writable_shared(&self) -> bool { self.0.flags().contains(EF::WRITABLE_SHARED) } fn writable_shared(&self) -> bool {
fn readonly_shared(&self) -> bool { self.0.flags().contains(EF::READONLY_SHARED) } self.0.flags().contains(EF::WRITABLE_SHARED)
}
fn readonly_shared(&self) -> bool {
self.0.flags().contains(EF::READONLY_SHARED)
}
fn set_shared(&mut self, writable: bool) { fn set_shared(&mut self, writable: bool) {
let flags = self.as_flags(); let flags = self.as_flags();
flags.set(EF::WRITABLE_SHARED, writable); flags.set(EF::WRITABLE_SHARED, writable);
flags.set(EF::READONLY_SHARED, !writable); flags.set(EF::READONLY_SHARED, !writable);
} }
fn clear_shared(&mut self) { self.as_flags().remove(EF::WRITABLE_SHARED | EF::READONLY_SHARED); } fn clear_shared(&mut self) {
fn user(&self) -> bool { self.0.flags().contains(EF::AP_EL0) } self.as_flags()
fn swapped(&self) -> bool { self.0.flags().contains(EF::SWAPPED) } .remove(EF::WRITABLE_SHARED | EF::READONLY_SHARED);
fn set_swapped(&mut self, value: bool) { self.as_flags().set(EF::SWAPPED, value); } }
fn user(&self) -> bool {
self.0.flags().contains(EF::AP_EL0)
}
fn swapped(&self) -> bool {
self.0.flags().contains(EF::SWAPPED)
}
fn set_swapped(&mut self, value: bool) {
self.as_flags().set(EF::SWAPPED, value);
}
fn set_user(&mut self, value: bool) { fn set_user(&mut self, value: bool) {
self.as_flags().set(EF::AP_EL0, value); self.as_flags().set(EF::AP_EL0, value);
self.as_flags().set(EF::nG, value); // set non-global to use ASID self.as_flags().set(EF::nG, value); // set non-global to use ASID
@ -140,9 +176,15 @@ impl Entry for PageEntry {
} }
impl PageEntry { impl PageEntry {
fn read_only(&self) -> bool { self.0.flags().contains(EF::AP_RO) } fn read_only(&self) -> bool {
fn hw_dirty(&self) -> bool { self.writable() && !self.read_only() } self.0.flags().contains(EF::AP_RO)
fn sw_dirty(&self) -> bool { self.0.flags().contains(EF::DIRTY) } }
fn hw_dirty(&self) -> bool {
self.writable() && !self.read_only()
}
fn sw_dirty(&self) -> bool {
self.0.flags().contains(EF::DIRTY)
}
fn as_flags(&mut self) -> &mut EF { fn as_flags(&mut self) -> &mut EF {
unsafe { &mut *(self as *mut _ as *mut EF) } unsafe { &mut *(self as *mut _ as *mut EF) }
} }
@ -168,7 +210,11 @@ impl InactivePageTable for InactivePageTable0 {
active_table().with_temporary_map(target, |_, table: &mut Aarch64PageTable| { active_table().with_temporary_map(target, |_, table: &mut Aarch64PageTable| {
table.zero(); table.zero();
// set up recursive mapping for the table // set up recursive mapping for the table
table[RECURSIVE_INDEX].set_frame(frame.clone(), EF::default(), MairNormal::attr_value()); table[RECURSIVE_INDEX].set_frame(
frame.clone(),
EF::default(),
MairNormal::attr_value(),
);
}); });
InactivePageTable0 { p4_frame: frame } InactivePageTable0 { p4_frame: frame }
} }
@ -179,7 +225,11 @@ impl InactivePageTable for InactivePageTable0 {
assert!(!e0.is_unused()); assert!(!e0.is_unused());
self.edit(|_| { self.edit(|_| {
table[KERNEL_PML4].set_frame(Frame::containing_address(e0.addr()), EF::default(), MairNormal::attr_value()); table[KERNEL_PML4].set_frame(
Frame::containing_address(e0.addr()),
EF::default(),
MairNormal::attr_value(),
);
}); });
} }
@ -201,12 +251,18 @@ impl InactivePageTable for InactivePageTable0 {
fn edit<T>(&mut self, f: impl FnOnce(&mut Self::Active) -> T) -> T { fn edit<T>(&mut self, f: impl FnOnce(&mut Self::Active) -> T) -> T {
let target = ttbr_el1_read(1).start_address().as_u64() as usize; let target = ttbr_el1_read(1).start_address().as_u64() as usize;
active_table().with_temporary_map(target, |active_table, p4_table: &mut Aarch64PageTable| { active_table().with_temporary_map(
target,
|active_table, p4_table: &mut Aarch64PageTable| {
let backup = p4_table[RECURSIVE_INDEX].clone(); let backup = p4_table[RECURSIVE_INDEX].clone();
let old_frame = ttbr_el1_read(0); let old_frame = ttbr_el1_read(0);
// overwrite recursive mapping // overwrite recursive mapping
p4_table[RECURSIVE_INDEX].set_frame(self.p4_frame.clone(), EF::default(), MairNormal::attr_value()); p4_table[RECURSIVE_INDEX].set_frame(
self.p4_frame.clone(),
EF::default(),
MairNormal::attr_value(),
);
ttbr_el1_write(0, self.p4_frame.clone()); ttbr_el1_write(0, self.p4_frame.clone());
tlb_invalidate_all(); tlb_invalidate_all();
@ -218,7 +274,8 @@ impl InactivePageTable for InactivePageTable0 {
ttbr_el1_write(0, old_frame); ttbr_el1_write(0, old_frame);
tlb_invalidate_all(); tlb_invalidate_all();
ret ret
}) },
)
} }
} }

@ -10,7 +10,8 @@ pub unsafe fn init_external_interrupt() {
/// Claim and complete external interrupt by reading and writing to /// Claim and complete external interrupt by reading and writing to
/// PLIC Interrupt Claim/Complete Register. /// PLIC Interrupt Claim/Complete Register.
pub unsafe fn handle_external_interrupt() { pub unsafe fn handle_external_interrupt() {
const HART1_S_MODE_INTERRUPT_CLAIM_COMPLETE: *mut u32 = (KERNEL_OFFSET + 0x0C20_2004) as *mut u32; const HART1_S_MODE_INTERRUPT_CLAIM_COMPLETE: *mut u32 =
(KERNEL_OFFSET + 0x0C20_2004) as *mut u32;
// claim // claim
let source = HART1_S_MODE_INTERRUPT_CLAIM_COMPLETE.read(); let source = HART1_S_MODE_INTERRUPT_CLAIM_COMPLETE.read();
// complete // complete

@ -3,6 +3,6 @@
//! [atomic](http://llvm.org/docs/Atomics.html#libcalls-atomic) //! [atomic](http://llvm.org/docs/Atomics.html#libcalls-atomic)
#[no_mangle] #[no_mangle]
pub extern fn abort() { pub extern "C" fn abort() {
panic!("abort"); panic!("abort");
} }

@ -1,8 +1,4 @@
use riscv::register::{ use riscv::register::{scause::Scause, sstatus, sstatus::Sstatus};
sstatus,
sstatus::Sstatus,
scause::Scause,
};
/// Saved registers on a trap. /// Saved registers on a trap.
#[derive(Clone)] #[derive(Clone)]
@ -27,7 +23,7 @@ impl TrapFrame {
/// ///
/// The new thread starts at function `entry` with an usize argument `arg`. /// The new thread starts at function `entry` with an usize argument `arg`.
/// The stack pointer will be set to `sp`. /// The stack pointer will be set to `sp`.
fn new_kernel_thread(entry: extern fn(usize) -> !, arg: usize, sp: usize) -> Self { fn new_kernel_thread(entry: extern "C" fn(usize) -> !, arg: usize, sp: usize) -> Self {
use core::mem::zeroed; use core::mem::zeroed;
let mut tf: Self = unsafe { zeroed() }; let mut tf: Self = unsafe { zeroed() };
tf.x[10] = arg; // a0 tf.x[10] = arg; // a0
@ -57,17 +53,17 @@ impl TrapFrame {
} }
} }
use core::fmt::{Debug, Formatter, Error}; use core::fmt::{Debug, Error, Formatter};
impl Debug for TrapFrame { impl Debug for TrapFrame {
fn fmt(&self, f: &mut Formatter) -> Result<(), Error> { fn fmt(&self, f: &mut Formatter) -> Result<(), Error> {
struct Regs<'a>(&'a [usize; 32]); struct Regs<'a>(&'a [usize; 32]);
impl<'a> Debug for Regs<'a> { impl<'a> Debug for Regs<'a> {
fn fmt(&self, f: &mut Formatter) -> Result<(), Error> { fn fmt(&self, f: &mut Formatter) -> Result<(), Error> {
const REG_NAME: [&str; 32] = [ const REG_NAME: [&str; 32] = [
"zero", "ra", "sp", "gp", "tp", "t0", "t1", "t2", "zero", "ra", "sp", "gp", "tp", "t0", "t1", "t2", "s0", "s1", "a0", "a1", "a2",
"s0", "s1", "a0", "a1", "a2", "a3", "a4", "a5", "a6", "a7", "a3", "a4", "a5", "a6", "a7", "s2", "s3", "s4", "s5", "s6", "s7", "s8", "s9",
"s2", "s3", "s4", "s5", "s6", "s7", "s8", "s9", "s10", "s11", "s10", "s11", "t3", "t4", "t5", "t6",
"t3", "t4", "t5", "t6"]; ];
f.debug_map().entries(REG_NAME.iter().zip(self.0)).finish() f.debug_map().entries(REG_NAME.iter().zip(self.0)).finish()
} }
} }
@ -98,7 +94,7 @@ impl InitStack {
} }
} }
extern { extern "C" {
fn trap_return(); fn trap_return();
} }
@ -116,7 +112,11 @@ struct ContextData {
impl ContextData { impl ContextData {
fn new(satp: usize) -> Self { fn new(satp: usize) -> Self {
ContextData { ra: trap_return as usize, satp, ..ContextData::default() } ContextData {
ra: trap_return as usize,
satp,
..ContextData::default()
}
} }
} }
@ -137,25 +137,29 @@ impl Context {
/// Pop all callee-saved registers, then return to the target. /// Pop all callee-saved registers, then return to the target.
#[naked] #[naked]
#[inline(never)] #[inline(never)]
pub unsafe extern fn switch(&mut self, _target: &mut Self) { pub unsafe extern "C" fn switch(&mut self, _target: &mut Self) {
#[cfg(target_arch = "riscv32")] #[cfg(target_arch = "riscv32")]
asm!(r" asm!(
r"
.equ XLENB, 4 .equ XLENB, 4
.macro Load reg, mem .macro Load reg, mem
lw \reg, \mem lw \reg, \mem
.endm .endm
.macro Store reg, mem .macro Store reg, mem
sw \reg, \mem sw \reg, \mem
.endm"); .endm"
);
#[cfg(target_arch = "riscv64")] #[cfg(target_arch = "riscv64")]
asm!(r" asm!(
r"
.equ XLENB, 8 .equ XLENB, 8
.macro Load reg, mem .macro Load reg, mem
ld \reg, \mem ld \reg, \mem
.endm .endm
.macro Store reg, mem .macro Store reg, mem
sd \reg, \mem sd \reg, \mem
.endm"); .endm"
);
asm!(" asm!("
// save from's registers // save from's registers
addi sp, sp, (-XLENB*14) addi sp, sp, (-XLENB*14)
@ -210,11 +214,17 @@ impl Context {
/// The new thread starts at function `entry` with an usize argument `arg`. /// The new thread starts at function `entry` with an usize argument `arg`.
/// The stack pointer will be set to `kstack_top`. /// The stack pointer will be set to `kstack_top`.
/// The SATP register will be set to `satp`. /// The SATP register will be set to `satp`.
pub unsafe fn new_kernel_thread(entry: extern fn(usize) -> !, arg: usize, kstack_top: usize, satp: usize) -> Self { pub unsafe fn new_kernel_thread(
entry: extern "C" fn(usize) -> !,
arg: usize,
kstack_top: usize,
satp: usize,
) -> Self {
InitStack { InitStack {
context: ContextData::new(satp), context: ContextData::new(satp),
tf: TrapFrame::new_kernel_thread(entry, arg, kstack_top), tf: TrapFrame::new_kernel_thread(entry, arg, kstack_top),
}.push_at(kstack_top) }
.push_at(kstack_top)
} }
/// Constructs Context for a new user thread. /// Constructs Context for a new user thread.
@ -222,11 +232,18 @@ impl Context {
/// The new thread starts at `entry_addr`. /// The new thread starts at `entry_addr`.
/// The stack pointer of user and kernel mode will be set to `ustack_top`, `kstack_top`. /// The stack pointer of user and kernel mode will be set to `ustack_top`, `kstack_top`.
/// The SATP register will be set to `satp`. /// The SATP register will be set to `satp`.
pub unsafe fn new_user_thread(entry_addr: usize, ustack_top: usize, kstack_top: usize, _is32: bool, satp: usize) -> Self { pub unsafe fn new_user_thread(
entry_addr: usize,
ustack_top: usize,
kstack_top: usize,
_is32: bool,
satp: usize,
) -> Self {
InitStack { InitStack {
context: ContextData::new(satp), context: ContextData::new(satp),
tf: TrapFrame::new_user_thread(entry_addr, ustack_top), tf: TrapFrame::new_user_thread(entry_addr, ustack_top),
}.push_at(kstack_top) }
.push_at(kstack_top)
} }
/// Fork a user process and get the new Context. /// Fork a user process and get the new Context.
@ -243,7 +260,8 @@ impl Context {
tf.x[10] = 0; // a0 tf.x[10] = 0; // a0
tf tf
}, },
}.push_at(kstack_top) }
.push_at(kstack_top)
} }
/// Fork a user thread and get the new Context. /// Fork a user thread and get the new Context.
@ -253,7 +271,13 @@ impl Context {
/// The new user stack will be set to `ustack_top`. /// The new user stack will be set to `ustack_top`.
/// The new thread pointer will be set to `tls`. /// The new thread pointer will be set to `tls`.
/// All the other registers are same as the original. /// All the other registers are same as the original.
pub unsafe fn new_clone(tf: &TrapFrame, ustack_top: usize, kstack_top: usize, satp: usize, tls: usize) -> Self { pub unsafe fn new_clone(
tf: &TrapFrame,
ustack_top: usize,
kstack_top: usize,
satp: usize,
tls: usize,
) -> Self {
InitStack { InitStack {
context: ContextData::new(satp), context: ContextData::new(satp),
tf: { tf: {
@ -263,7 +287,8 @@ impl Context {
tf.x[10] = 0; // a0 tf.x[10] = 0; // a0
tf tf
}, },
}.push_at(kstack_top) }
.push_at(kstack_top)
} }
/// Used for getting the init TrapFrame of a new user context in `sys_exec`. /// Used for getting the init TrapFrame of a new user context in `sys_exec`.

@ -9,7 +9,9 @@ pub unsafe fn set_cpu_id(cpu_id: usize) {
pub fn id() -> usize { pub fn id() -> usize {
let cpu_id; let cpu_id;
unsafe { asm!("mv $0, gp" : "=r"(cpu_id)); } unsafe {
asm!("mv $0, gp" : "=r"(cpu_id));
}
cpu_id cpu_id
} }

@ -1,14 +1,14 @@
use riscv::register::*;
use crate::drivers::DRIVERS;
pub use self::context::*; pub use self::context::*;
use crate::drivers::DRIVERS;
use log::*; use log::*;
use riscv::register::*;
#[path = "context.rs"] #[path = "context.rs"]
mod context; mod context;
/// Initialize interrupt /// Initialize interrupt
pub fn init() { pub fn init() {
extern { extern "C" {
fn trap_entry(); fn trap_entry();
} }
unsafe { unsafe {
@ -53,9 +53,13 @@ pub unsafe fn restore(flags: usize) {
/// ///
/// This function is called from `trap.asm`. /// This function is called from `trap.asm`.
#[no_mangle] #[no_mangle]
pub extern fn rust_trap(tf: &mut TrapFrame) { pub extern "C" fn rust_trap(tf: &mut TrapFrame) {
use self::scause::{Trap, Interrupt as I, Exception as E}; use self::scause::{Exception as E, Interrupt as I, Trap};
trace!("Interrupt @ CPU{}: {:?} ", super::cpu::id(), tf.scause.cause()); trace!(
"Interrupt @ CPU{}: {:?} ",
super::cpu::id(),
tf.scause.cause()
);
match tf.scause.cause() { match tf.scause.cause() {
Trap::Interrupt(I::SupervisorExternal) => external(), Trap::Interrupt(I::SupervisorExternal) => external(),
Trap::Interrupt(I::SupervisorSoft) => ipi(), Trap::Interrupt(I::SupervisorSoft) => ipi(),
@ -71,13 +75,15 @@ pub extern fn rust_trap(tf: &mut TrapFrame) {
fn external() { fn external() {
#[cfg(feature = "board_u540")] #[cfg(feature = "board_u540")]
unsafe { super::board::handle_external_interrupt(); } unsafe {
super::board::handle_external_interrupt();
}
// true means handled, false otherwise // true means handled, false otherwise
let handlers = [try_process_serial, try_process_drivers]; let handlers = [try_process_serial, try_process_drivers];
for handler in handlers.iter() { for handler in handlers.iter() {
if handler() == true { if handler() == true {
break break;
} }
} }
} }
@ -88,17 +94,17 @@ fn try_process_serial() -> bool {
crate::trap::serial(ch); crate::trap::serial(ch);
true true
} }
None => false None => false,
} }
} }
fn try_process_drivers() -> bool { fn try_process_drivers() -> bool {
for driver in DRIVERS.read().iter() { for driver in DRIVERS.read().iter() {
if driver.try_handle_interrupt(None) == true { if driver.try_handle_interrupt(None) == true {
return true return true;
} }
} }
return false return false;
} }
fn ipi() { fn ipi() {
@ -113,7 +119,11 @@ fn timer() {
fn syscall(tf: &mut TrapFrame) { fn syscall(tf: &mut TrapFrame) {
tf.sepc += 4; // Must before syscall, because of fork. tf.sepc += 4; // Must before syscall, because of fork.
let ret = crate::syscall::syscall(tf.x[17], [tf.x[10], tf.x[11], tf.x[12], tf.x[13], tf.x[14], tf.x[15]], tf); let ret = crate::syscall::syscall(
tf.x[17],
[tf.x[10], tf.x[11], tf.x[12], tf.x[13], tf.x[14], tf.x[15]],
tf,
);
tf.x[10] = ret as usize; tf.x[10] = ret as usize;
} }

@ -1,5 +1,5 @@
use core::fmt::{Write, Result, Arguments};
use super::sbi; use super::sbi;
use core::fmt::{Arguments, Result, Write};
struct SerialPort; struct SerialPort;

@ -1,14 +1,16 @@
use crate::consts::{KERNEL_OFFSET, MEMORY_END, MEMORY_OFFSET};
use crate::memory::{init_heap, Linear, MemoryAttr, MemorySet, FRAME_ALLOCATOR};
use core::mem; use core::mem;
use riscv::{addr::*, register::sstatus};
use rcore_memory::PAGE_SIZE;
use log::*; use log::*;
use crate::memory::{FRAME_ALLOCATOR, init_heap, MemoryAttr, MemorySet, Linear}; use rcore_memory::PAGE_SIZE;
use crate::consts::{MEMORY_OFFSET, MEMORY_END, KERNEL_OFFSET};
use riscv::register::satp; use riscv::register::satp;
use riscv::{addr::*, register::sstatus};
/// Initialize the memory management module /// Initialize the memory management module
pub fn init(dtb: usize) { pub fn init(dtb: usize) {
unsafe { sstatus::set_sum(); } // Allow user memory access unsafe {
sstatus::set_sum();
} // Allow user memory access
// initialize heap and Frame allocator // initialize heap and Frame allocator
init_frame_allocator(); init_frame_allocator();
init_heap(); init_heap();
@ -28,7 +30,10 @@ fn init_frame_allocator() {
use core::ops::Range; use core::ops::Range;
let mut ba = FRAME_ALLOCATOR.lock(); let mut ba = FRAME_ALLOCATOR.lock();
let range = to_range((end as usize) - KERNEL_OFFSET + MEMORY_OFFSET + PAGE_SIZE, MEMORY_END); let range = to_range(
(end as usize) - KERNEL_OFFSET + MEMORY_OFFSET + PAGE_SIZE,
MEMORY_END,
);
ba.insert(range); ba.insert(range);
info!("frame allocator: init end"); info!("frame allocator: init end");
@ -46,18 +51,70 @@ fn init_frame_allocator() {
fn remap_the_kernel(dtb: usize) { fn remap_the_kernel(dtb: usize) {
let offset = -(KERNEL_OFFSET as isize - MEMORY_OFFSET as isize); let offset = -(KERNEL_OFFSET as isize - MEMORY_OFFSET as isize);
let mut ms = MemorySet::new_bare(); let mut ms = MemorySet::new_bare();
ms.push(stext as usize, etext as usize, MemoryAttr::default().execute().readonly(), Linear::new(offset), "text"); ms.push(
ms.push(sdata as usize, edata as usize, MemoryAttr::default(), Linear::new(offset), "data"); stext as usize,
ms.push(srodata as usize, erodata as usize, MemoryAttr::default().readonly(), Linear::new(offset), "rodata"); etext as usize,
ms.push(bootstack as usize, bootstacktop as usize, MemoryAttr::default(), Linear::new(offset), "stack"); MemoryAttr::default().execute().readonly(),
ms.push(sbss as usize, ebss as usize, MemoryAttr::default(), Linear::new(offset), "bss"); Linear::new(offset),
ms.push(dtb, dtb + super::consts::MAX_DTB_SIZE, MemoryAttr::default().readonly(), Linear::new(offset), "dts"); "text",
);
ms.push(
sdata as usize,
edata as usize,
MemoryAttr::default(),
Linear::new(offset),
"data",
);
ms.push(
srodata as usize,
erodata as usize,
MemoryAttr::default().readonly(),
Linear::new(offset),
"rodata",
);
ms.push(
bootstack as usize,
bootstacktop as usize,
MemoryAttr::default(),
Linear::new(offset),
"stack",
);
ms.push(
sbss as usize,
ebss as usize,
MemoryAttr::default(),
Linear::new(offset),
"bss",
);
ms.push(
dtb,
dtb + super::consts::MAX_DTB_SIZE,
MemoryAttr::default().readonly(),
Linear::new(offset),
"dts",
);
// map PLIC for HiFiveU // map PLIC for HiFiveU
let offset = -(KERNEL_OFFSET as isize); let offset = -(KERNEL_OFFSET as isize);
ms.push(KERNEL_OFFSET + 0x0C00_2000, KERNEL_OFFSET + 0x0C00_2000 + PAGE_SIZE, MemoryAttr::default(), Linear::new(offset), "plic0"); ms.push(
ms.push(KERNEL_OFFSET + 0x0C20_2000, KERNEL_OFFSET + 0x0C20_2000 + PAGE_SIZE, MemoryAttr::default(), Linear::new(offset), "plic1"); KERNEL_OFFSET + 0x0C00_2000,
unsafe { ms.activate(); } KERNEL_OFFSET + 0x0C00_2000 + PAGE_SIZE,
unsafe { SATP = ms.token(); } MemoryAttr::default(),
Linear::new(offset),
"plic0",
);
ms.push(
KERNEL_OFFSET + 0x0C20_2000,
KERNEL_OFFSET + 0x0C20_2000 + PAGE_SIZE,
MemoryAttr::default(),
Linear::new(offset),
"plic1",
);
unsafe {
ms.activate();
}
unsafe {
SATP = ms.token();
}
mem::forget(ms); mem::forget(ms);
info!("remap kernel end"); info!("remap kernel end");
} }
@ -77,7 +134,7 @@ pub unsafe fn clear_bss() {
// Symbols provided by linker script // Symbols provided by linker script
#[allow(dead_code)] #[allow(dead_code)]
extern { extern "C" {
fn stext(); fn stext();
fn etext(); fn etext();
fn sdata(); fn sdata();

@ -1,34 +1,38 @@
pub mod io; #[cfg(feature = "board_u540")]
pub mod interrupt; #[path = "board/u540/mod.rs"]
pub mod timer; mod board;
pub mod paging;
pub mod memory;
pub mod compiler_rt; pub mod compiler_rt;
pub mod consts; pub mod consts;
pub mod cpu; pub mod cpu;
pub mod syscall; pub mod interrupt;
pub mod io;
pub mod memory;
pub mod paging;
pub mod rand; pub mod rand;
#[cfg(feature = "board_u540")]
#[path = "board/u540/mod.rs"]
mod board;
mod sbi; mod sbi;
pub mod syscall;
pub mod timer;
use log::*; use log::*;
#[no_mangle] #[no_mangle]
pub extern fn rust_main(hartid: usize, dtb: usize, hart_mask: usize) -> ! { pub extern "C" fn rust_main(hartid: usize, dtb: usize, hart_mask: usize) -> ! {
// An initial recursive page table has been set by BBL (shared by all cores) // An initial recursive page table has been set by BBL (shared by all cores)
unsafe { cpu::set_cpu_id(hartid); } unsafe {
cpu::set_cpu_id(hartid);
}
if hartid != BOOT_HART_ID { if hartid != BOOT_HART_ID {
while unsafe { !cpu::has_started(hartid) } { } while unsafe { !cpu::has_started(hartid) } {}
println!("Hello RISCV! in hart {}, dtb @ {:#x}", hartid, dtb); println!("Hello RISCV! in hart {}, dtb @ {:#x}", hartid, dtb);
others_main(); others_main();
//other_main -> ! //other_main -> !
} }
unsafe { memory::clear_bss(); } unsafe {
memory::clear_bss();
}
println!("Hello RISCV! in hart {}, dtb @ {:#x}", hartid, dtb); println!("Hello RISCV! in hart {}, dtb @ {:#x}", hartid, dtb);
@ -40,10 +44,14 @@ pub extern fn rust_main(hartid: usize, dtb: usize, hart_mask: usize) -> ! {
#[cfg(not(feature = "board_u540"))] #[cfg(not(feature = "board_u540"))]
crate::drivers::init(dtb); crate::drivers::init(dtb);
#[cfg(feature = "board_u540")] #[cfg(feature = "board_u540")]
unsafe { board::init_external_interrupt(); } unsafe {
board::init_external_interrupt();
}
crate::process::init(); crate::process::init();
unsafe { cpu::start_others(hart_mask); } unsafe {
cpu::start_others(hart_mask);
}
crate::kmain(); crate::kmain();
} }
@ -61,7 +69,8 @@ const BOOT_HART_ID: usize = 1;
/// Constant & Macro for `trap.asm` /// Constant & Macro for `trap.asm`
#[cfg(target_arch = "riscv32")] #[cfg(target_arch = "riscv32")]
global_asm!(r" global_asm!(
r"
.equ XLENB, 4 .equ XLENB, 4
.equ XLENb, 32 .equ XLENb, 32
.macro LOAD a1, a2 .macro LOAD a1, a2
@ -70,9 +79,11 @@ global_asm!(r"
.macro STORE a1, a2 .macro STORE a1, a2
sw \a1, \a2*XLENB(sp) sw \a1, \a2*XLENB(sp)
.endm .endm
"); "
);
#[cfg(target_arch = "riscv64")] #[cfg(target_arch = "riscv64")]
global_asm!(r" global_asm!(
r"
.equ XLENB, 8 .equ XLENB, 8
.equ XLENb, 64 .equ XLENb, 64
.macro LOAD a1, a2 .macro LOAD a1, a2
@ -81,8 +92,8 @@ global_asm!(r"
.macro STORE a1, a2 .macro STORE a1, a2
sd \a1, \a2*XLENB(sp) sd \a1, \a2*XLENB(sp)
.endm .endm
"); "
);
global_asm!(include_str!("boot/entry.asm")); global_asm!(include_str!("boot/entry.asm"));
global_asm!(include_str!("boot/trap.asm")); global_asm!(include_str!("boot/trap.asm"));

@ -1,17 +1,20 @@
use crate::consts::RECURSIVE_INDEX; use crate::consts::RECURSIVE_INDEX;
// Depends on kernel // Depends on kernel
#[cfg(target_arch = "riscv32")]
use crate::consts::KERNEL_P2_INDEX;
#[cfg(target_arch = "riscv64")]
use crate::consts::KERNEL_P4_INDEX;
use crate::memory::{active_table, alloc_frame, dealloc_frame}; use crate::memory::{active_table, alloc_frame, dealloc_frame};
use log::*;
use rcore_memory::paging::*;
use riscv::addr::*; use riscv::addr::*;
use riscv::asm::{sfence_vma, sfence_vma_all}; use riscv::asm::{sfence_vma, sfence_vma_all};
use riscv::paging::{Mapper, PageTable as RvPageTable, PageTableEntry, PageTableFlags as EF, RecursivePageTable, PageTableType};
use riscv::paging::{FrameAllocator, FrameDeallocator}; use riscv::paging::{FrameAllocator, FrameDeallocator};
use riscv::paging::{
Mapper, PageTable as RvPageTable, PageTableEntry, PageTableFlags as EF, PageTableType,
RecursivePageTable,
};
use riscv::register::satp; use riscv::register::satp;
use rcore_memory::paging::*;
use log::*;
#[cfg(target_arch = "riscv32")]
use crate::consts::KERNEL_P2_INDEX;
#[cfg(target_arch = "riscv64")]
use crate::consts::KERNEL_P4_INDEX;
pub struct ActivePageTable(RecursivePageTable<'static>, PageEntry); pub struct ActivePageTable(RecursivePageTable<'static>, PageEntry);
@ -20,7 +23,6 @@ pub struct ActivePageTable(RecursivePageTable<'static>, PageEntry);
pub struct PageEntry(&'static mut PageTableEntry, Page); pub struct PageEntry(&'static mut PageTableEntry, Page);
impl PageTable for ActivePageTable { impl PageTable for ActivePageTable {
fn map(&mut self, addr: usize, target: usize) -> &mut Entry { fn map(&mut self, addr: usize, target: usize) -> &mut Entry {
// use riscv::paging:Mapper::map_to, // use riscv::paging:Mapper::map_to,
// map the 4K `page` to the 4K `frame` with `flags` // map the 4K `page` to the 4K `frame` with `flags`
@ -29,7 +31,10 @@ impl PageTable for ActivePageTable {
let frame = Frame::of_addr(PhysAddr::new(target)); let frame = Frame::of_addr(PhysAddr::new(target));
// map the page to the frame using FrameAllocatorForRiscv // map the page to the frame using FrameAllocatorForRiscv
// we may need frame allocator to alloc frame for new page table(first/second) // we may need frame allocator to alloc frame for new page table(first/second)
self.0.map_to(page, frame, flags, &mut FrameAllocatorForRiscv).unwrap().flush(); self.0
.map_to(page, frame, flags, &mut FrameAllocatorForRiscv)
.unwrap()
.flush();
self.get_entry(addr).expect("fail to get entry") self.get_entry(addr).expect("fail to get entry")
} }
@ -56,29 +61,26 @@ impl PageTableExt for ActivePageTable {}
/// The virtual address of root page table /// The virtual address of root page table
#[cfg(target_arch = "riscv32")] #[cfg(target_arch = "riscv32")]
const ROOT_PAGE_TABLE: *mut RvPageTable = const ROOT_PAGE_TABLE: *mut RvPageTable =
((RECURSIVE_INDEX << 12 << 10) | ((RECURSIVE_INDEX << 12 << 10) | ((RECURSIVE_INDEX + 1) << 12)) as *mut RvPageTable;
((RECURSIVE_INDEX+1) << 12)) as *mut RvPageTable;
#[cfg(all(target_arch = "riscv64", feature = "sv39"))] #[cfg(all(target_arch = "riscv64", feature = "sv39"))]
const ROOT_PAGE_TABLE: *mut RvPageTable = const ROOT_PAGE_TABLE: *mut RvPageTable = ((0xFFFF_0000_0000_0000)
((0xFFFF_0000_0000_0000) | | (0o777 << 12 << 9 << 9 << 9)
(0o777 << 12 << 9 << 9 << 9) | | (RECURSIVE_INDEX << 12 << 9 << 9)
(RECURSIVE_INDEX << 12 << 9 << 9) | | (RECURSIVE_INDEX << 12 << 9)
(RECURSIVE_INDEX << 12 << 9) | | ((RECURSIVE_INDEX + 1) << 12)) as *mut RvPageTable;
((RECURSIVE_INDEX+1) << 12)) as *mut RvPageTable;
#[cfg(all(target_arch = "riscv64", not(feature = "sv39")))] #[cfg(all(target_arch = "riscv64", not(feature = "sv39")))]
const ROOT_PAGE_TABLE: *mut RvPageTable = const ROOT_PAGE_TABLE: *mut RvPageTable = ((0xFFFF_0000_0000_0000)
((0xFFFF_0000_0000_0000) | | (RECURSIVE_INDEX << 12 << 9 << 9 << 9)
(RECURSIVE_INDEX << 12 << 9 << 9 << 9) | | (RECURSIVE_INDEX << 12 << 9 << 9)
(RECURSIVE_INDEX << 12 << 9 << 9) | | (RECURSIVE_INDEX << 12 << 9)
(RECURSIVE_INDEX << 12 << 9) | | ((RECURSIVE_INDEX + 1) << 12)) as *mut RvPageTable;
((RECURSIVE_INDEX+1) << 12)) as *mut RvPageTable;
impl ActivePageTable { impl ActivePageTable {
#[cfg(target_arch = "riscv32")] #[cfg(target_arch = "riscv32")]
pub unsafe fn new() -> Self { pub unsafe fn new() -> Self {
ActivePageTable( ActivePageTable(
RecursivePageTable::new(&mut *ROOT_PAGE_TABLE).unwrap(), RecursivePageTable::new(&mut *ROOT_PAGE_TABLE).unwrap(),
::core::mem::uninitialized() ::core::mem::uninitialized(),
) )
} }
#[cfg(target_arch = "riscv64")] #[cfg(target_arch = "riscv64")]
@ -89,7 +91,7 @@ impl ActivePageTable {
let type_ = PageTableType::Sv48; let type_ = PageTableType::Sv48;
ActivePageTable( ActivePageTable(
RecursivePageTable::new(&mut *ROOT_PAGE_TABLE, type_).unwrap(), RecursivePageTable::new(&mut *ROOT_PAGE_TABLE, type_).unwrap(),
::core::mem::uninitialized() ::core::mem::uninitialized(),
) )
} }
} }
@ -97,38 +99,78 @@ impl ActivePageTable {
/// implementation for the Entry trait in /crate/memory/src/paging/mod.rs /// implementation for the Entry trait in /crate/memory/src/paging/mod.rs
impl Entry for PageEntry { impl Entry for PageEntry {
fn update(&mut self) { fn update(&mut self) {
unsafe { sfence_vma(0, self.1.start_address().as_usize()); } unsafe {
} sfence_vma(0, self.1.start_address().as_usize());
fn accessed(&self) -> bool { self.0.flags().contains(EF::ACCESSED) } }
fn dirty(&self) -> bool { self.0.flags().contains(EF::DIRTY) } }
fn writable(&self) -> bool { self.0.flags().contains(EF::WRITABLE) } fn accessed(&self) -> bool {
fn present(&self) -> bool { self.0.flags().contains(EF::VALID | EF::READABLE) } self.0.flags().contains(EF::ACCESSED)
fn clear_accessed(&mut self) { self.0.flags_mut().remove(EF::ACCESSED); } }
fn clear_dirty(&mut self) { self.0.flags_mut().remove(EF::DIRTY); } fn dirty(&self) -> bool {
fn set_writable(&mut self, value: bool) { self.0.flags_mut().set(EF::WRITABLE, value); } self.0.flags().contains(EF::DIRTY)
fn set_present(&mut self, value: bool) { self.0.flags_mut().set(EF::VALID | EF::READABLE, value); } }
fn target(&self) -> usize { self.0.addr().as_usize() } fn writable(&self) -> bool {
self.0.flags().contains(EF::WRITABLE)
}
fn present(&self) -> bool {
self.0.flags().contains(EF::VALID | EF::READABLE)
}
fn clear_accessed(&mut self) {
self.0.flags_mut().remove(EF::ACCESSED);
}
fn clear_dirty(&mut self) {
self.0.flags_mut().remove(EF::DIRTY);
}
fn set_writable(&mut self, value: bool) {
self.0.flags_mut().set(EF::WRITABLE, value);
}
fn set_present(&mut self, value: bool) {
self.0.flags_mut().set(EF::VALID | EF::READABLE, value);
}
fn target(&self) -> usize {
self.0.addr().as_usize()
}
fn set_target(&mut self, target: usize) { fn set_target(&mut self, target: usize) {
let flags = self.0.flags(); let flags = self.0.flags();
let frame = Frame::of_addr(PhysAddr::new(target)); let frame = Frame::of_addr(PhysAddr::new(target));
self.0.set(frame, flags); self.0.set(frame, flags);
} }
fn writable_shared(&self) -> bool { self.0.flags().contains(EF::RESERVED1) } fn writable_shared(&self) -> bool {
fn readonly_shared(&self) -> bool { self.0.flags().contains(EF::RESERVED2) } self.0.flags().contains(EF::RESERVED1)
}
fn readonly_shared(&self) -> bool {
self.0.flags().contains(EF::RESERVED2)
}
fn set_shared(&mut self, writable: bool) { fn set_shared(&mut self, writable: bool) {
let flags = self.0.flags_mut(); let flags = self.0.flags_mut();
flags.set(EF::RESERVED1, writable); flags.set(EF::RESERVED1, writable);
flags.set(EF::RESERVED2, !writable); flags.set(EF::RESERVED2, !writable);
} }
fn clear_shared(&mut self) { self.0.flags_mut().remove(EF::RESERVED1 | EF::RESERVED2); } fn clear_shared(&mut self) {
fn swapped(&self) -> bool { self.0.flags().contains(EF::RESERVED1) } self.0.flags_mut().remove(EF::RESERVED1 | EF::RESERVED2);
fn set_swapped(&mut self, value: bool) { self.0.flags_mut().set(EF::RESERVED1, value); } }
fn user(&self) -> bool { self.0.flags().contains(EF::USER) } fn swapped(&self) -> bool {
fn set_user(&mut self, value: bool) { self.0.flags_mut().set(EF::USER, value); } self.0.flags().contains(EF::RESERVED1)
fn execute(&self) -> bool { self.0.flags().contains(EF::EXECUTABLE) } }
fn set_execute(&mut self, value: bool) { self.0.flags_mut().set(EF::EXECUTABLE, value); } fn set_swapped(&mut self, value: bool) {
fn mmio(&self) -> u8 { 0 } self.0.flags_mut().set(EF::RESERVED1, value);
fn set_mmio(&mut self, _value: u8) { } }
fn user(&self) -> bool {
self.0.flags().contains(EF::USER)
}
fn set_user(&mut self, value: bool) {
self.0.flags_mut().set(EF::USER, value);
}
fn execute(&self) -> bool {
self.0.flags().contains(EF::EXECUTABLE)
}
fn set_execute(&mut self, value: bool) {
self.0.flags_mut().set(EF::EXECUTABLE, value);
}
fn mmio(&self) -> u8 {
0
}
fn set_mmio(&mut self, _value: u8) {}
} }
#[derive(Debug)] #[derive(Debug)]
@ -152,7 +194,7 @@ impl InactivePageTable for InactivePageTable0 {
#[cfg(target_arch = "riscv32")] #[cfg(target_arch = "riscv32")]
fn map_kernel(&mut self) { fn map_kernel(&mut self) {
let table = unsafe { &mut *ROOT_PAGE_TABLE }; let table = unsafe { &mut *ROOT_PAGE_TABLE };
extern { extern "C" {
fn start(); fn start();
fn end(); fn end();
} }
@ -208,7 +250,9 @@ impl InactivePageTable for InactivePageTable0 {
} }
fn flush_tlb() { fn flush_tlb() {
unsafe { sfence_vma_all(); } unsafe {
sfence_vma_all();
}
} }
fn edit<T>(&mut self, f: impl FnOnce(&mut Self::Active) -> T) -> T { fn edit<T>(&mut self, f: impl FnOnce(&mut Self::Active) -> T) -> T {
@ -218,14 +262,18 @@ impl InactivePageTable for InactivePageTable0 {
// overwrite recursive mapping // overwrite recursive mapping
root_table[RECURSIVE_INDEX].set(self.root_frame.clone(), EF::VALID); root_table[RECURSIVE_INDEX].set(self.root_frame.clone(), EF::VALID);
unsafe { sfence_vma_all(); } unsafe {
sfence_vma_all();
}
// execute f in the new context // execute f in the new context
let ret = f(active_table); let ret = f(active_table);
// restore recursive mapping to original p2 table // restore recursive mapping to original p2 table
root_table[RECURSIVE_INDEX] = backup; root_table[RECURSIVE_INDEX] = backup;
unsafe { sfence_vma_all(); } unsafe {
sfence_vma_all();
}
ret ret
}) })

@ -28,7 +28,12 @@ pub fn shutdown() -> ! {
pub fn set_timer(stime_value: u64) { pub fn set_timer(stime_value: u64) {
#[cfg(target_pointer_width = "32")] #[cfg(target_pointer_width = "32")]
sbi_call(SBI_SET_TIMER, stime_value as usize, (stime_value >> 32) as usize, 0); sbi_call(
SBI_SET_TIMER,
stime_value as usize,
(stime_value >> 32) as usize,
0,
);
#[cfg(target_pointer_width = "64")] #[cfg(target_pointer_width = "64")]
sbi_call(SBI_SET_TIMER, stime_value as usize, 0, 0); sbi_call(SBI_SET_TIMER, stime_value as usize, 0, 0);
} }
@ -50,7 +55,12 @@ pub fn remote_sfence_vma(hart_mask: usize, _start: usize, _size: usize) {
} }
pub fn remote_sfence_vma_asid(hart_mask: usize, _start: usize, _size: usize, _asid: usize) { pub fn remote_sfence_vma_asid(hart_mask: usize, _start: usize, _size: usize, _asid: usize) {
sbi_call(SBI_REMOTE_SFENCE_VMA_ASID, &hart_mask as *const _ as usize, 0, 0); sbi_call(
SBI_REMOTE_SFENCE_VMA_ASID,
&hart_mask as *const _ as usize,
0,
0,
);
} }
const SBI_SET_TIMER: usize = 0; const SBI_SET_TIMER: usize = 0;

@ -1,6 +1,6 @@
use riscv::register::*;
use super::sbi; use super::sbi;
use log::*; use log::*;
use riscv::register::*;
#[cfg(target_pointer_width = "64")] #[cfg(target_pointer_width = "64")]
pub fn get_cycle() -> u64 { pub fn get_cycle() -> u64 {
@ -27,7 +27,9 @@ pub fn read_epoch() -> u64 {
/// Enable timer interrupt /// Enable timer interrupt
pub fn init() { pub fn init() {
// Enable supervisor timer interrupt // Enable supervisor timer interrupt
unsafe { sie::set_stimer(); } unsafe {
sie::set_stimer();
}
set_next(); set_next();
info!("timer: init end"); info!("timer: init end");
} }

@ -14,7 +14,10 @@ pub unsafe fn exit_in_qemu(error_code: u8) -> ! {
} }
pub fn id() -> usize { pub fn id() -> usize {
CpuId::new().get_feature_info().unwrap().initial_local_apic_id() as usize CpuId::new()
.get_feature_info()
.unwrap()
.initial_local_apic_id() as usize
} }
pub fn send_ipi(cpu_id: usize) { pub fn send_ipi(cpu_id: usize) {
@ -31,7 +34,7 @@ pub fn init() {
unsafe { unsafe {
asm!("mov %cr4, $0" : "=r" (value)); asm!("mov %cr4, $0" : "=r" (value));
// OSFXSR | OSXMMEXCPT // OSFXSR | OSXMMEXCPT
value |= 1 << 9 | 1 << 10 ; value |= 1 << 9 | 1 << 10;
asm!("mov $0, %cr4" :: "r" (value) : "memory"); asm!("mov $0, %cr4" :: "r" (value) : "memory");
Cr0::update(|cr0| { Cr0::update(|cr0| {
cr0.remove(Cr0Flags::EMULATE_COPROCESSOR); cr0.remove(Cr0Flags::EMULATE_COPROCESSOR);

@ -16,10 +16,26 @@ pub struct IDE {
impl IDE { impl IDE {
pub fn new(num: u8) -> Self { pub fn new(num: u8) -> Self {
let ide = match num { let ide = match num {
0 => IDE { num: 0, base: 0x1f0, ctrl: 0x3f4 }, 0 => IDE {
1 => IDE { num: 1, base: 0x1f0, ctrl: 0x3f4 }, num: 0,
2 => IDE { num: 2, base: 0x170, ctrl: 0x374 }, base: 0x1f0,
3 => IDE { num: 3, base: 0x170, ctrl: 0x374 }, ctrl: 0x3f4,
},
1 => IDE {
num: 1,
base: 0x1f0,
ctrl: 0x3f4,
},
2 => IDE {
num: 2,
base: 0x170,
ctrl: 0x374,
},
3 => IDE {
num: 3,
base: 0x170,
ctrl: 0x374,
},
_ => panic!("ide number should be 0,1,2,3"), _ => panic!("ide number should be 0,1,2,3"),
}; };
ide.init(); ide.init();
@ -103,14 +119,17 @@ impl IDE {
port::outb(self.base + ISA_SECTOR, (sector & 0xFF) as u8); port::outb(self.base + ISA_SECTOR, (sector & 0xFF) as u8);
port::outb(self.base + ISA_CYL_LO, ((sector >> 8) & 0xFF) as u8); port::outb(self.base + ISA_CYL_LO, ((sector >> 8) & 0xFF) as u8);
port::outb(self.base + ISA_CYL_HI, ((sector >> 16) & 0xFF) as u8); port::outb(self.base + ISA_CYL_HI, ((sector >> 16) & 0xFF) as u8);
port::outb(self.base + ISA_SDH, 0xE0 | ((self.num & 1) << 4) | (((sector >> 24) & 0xF) as u8)); port::outb(
self.base + ISA_SDH,
0xE0 | ((self.num & 1) << 4) | (((sector >> 24) & 0xF) as u8),
);
} }
} }
} }
const SECTOR_SIZE: usize = 128; const SECTOR_SIZE: usize = 128;
const MAX_DMA_SECTORS: usize = 0x1F_F000 / SECTOR_SIZE; // Limited by sector count (and PRDT entries) const MAX_DMA_SECTORS: usize = 0x1F_F000 / SECTOR_SIZE; // Limited by sector count (and PRDT entries)
// 512 PDRT entries, assume maximum fragmentation = 512 * 4K max = 2^21 = 2MB per transfer // 512 PDRT entries, assume maximum fragmentation = 512 * 4K max = 2^21 = 2MB per transfer
const ISA_DATA: u16 = 0x00; const ISA_DATA: u16 = 0x00;
const ISA_ERROR: u16 = 0x01; const ISA_ERROR: u16 = 0x01;

@ -1,7 +1,7 @@
use lazy_static::lazy_static;
use pc_keyboard::{layouts, DecodedKey, HandleControl, Keyboard, ScancodeSet1};
use spin::Mutex; use spin::Mutex;
use x86_64::instructions::port::Port; use x86_64::instructions::port::Port;
use pc_keyboard::{Keyboard, ScancodeSet1, DecodedKey, layouts, HandleControl};
use lazy_static::lazy_static;
pub fn init() { pub fn init() {
use crate::arch::interrupt::consts; use crate::arch::interrupt::consts;
@ -13,8 +13,9 @@ pub fn init() {
/// Should be called on every interrupt /// Should be called on every interrupt
pub fn receive() -> Option<DecodedKey> { pub fn receive() -> Option<DecodedKey> {
lazy_static! { lazy_static! {
static ref KEYBOARD: Mutex<Keyboard<layouts::Us104Key, ScancodeSet1>> = static ref KEYBOARD: Mutex<Keyboard<layouts::Us104Key, ScancodeSet1>> = Mutex::new(
Mutex::new(Keyboard::new(layouts::Us104Key, ScancodeSet1, HandleControl::Ignore)); Keyboard::new(layouts::Us104Key, ScancodeSet1, HandleControl::Ignore)
);
} }
let mut keyboard = KEYBOARD.lock(); let mut keyboard = KEYBOARD.lock();

@ -1,12 +1,12 @@
use once::*; use once::*;
pub mod vga; pub mod ide;
pub mod serial;
pub mod pic;
pub mod keyboard; pub mod keyboard;
pub mod pic;
pub mod pit; pub mod pit;
pub mod ide;
pub mod rtc_cmos; pub mod rtc_cmos;
pub mod serial;
pub mod vga;
pub fn init() { pub fn init() {
assert_has_not_been_called!(); assert_has_not_been_called!();

@ -1,9 +1,9 @@
// Copy from Redox // Copy from Redox
use x86_64::instructions::port::Port;
use spin::Mutex;
use once::*;
use log::*; use log::*;
use once::*;
use spin::Mutex;
use x86_64::instructions::port::Port;
static MASTER: Mutex<Pic> = Mutex::new(Pic::new(0x20)); static MASTER: Mutex<Pic> = Mutex::new(Pic::new(0x20));
static SLAVE: Mutex<Pic> = Mutex::new(Pic::new(0xA0)); static SLAVE: Mutex<Pic> = Mutex::new(Pic::new(0xA0));
@ -53,7 +53,7 @@ pub unsafe fn init() {
pub fn enable_irq(irq: u8) { pub fn enable_irq(irq: u8) {
match irq { match irq {
_ if irq < 8 => MASTER.lock().mask_set(irq), _ if irq < 8 => MASTER.lock().mask_set(irq),
_ if irq < 16 => SLAVE.lock().mask_set(irq-8), _ if irq < 16 => SLAVE.lock().mask_set(irq - 8),
_ => panic!("irq not in 0..16"), _ => panic!("irq not in 0..16"),
} }
} }
@ -80,7 +80,9 @@ impl Pic {
} }
fn ack(&mut self) { fn ack(&mut self) {
unsafe { self.cmd.write(0x20); } unsafe {
self.cmd.write(0x20);
}
} }
fn mask_set(&mut self, irq: u8) { fn mask_set(&mut self, irq: u8) {

@ -1,6 +1,6 @@
use x86_64::instructions::port::Port;
use log::*; use log::*;
use once::*; use once::*;
use x86_64::instructions::port::Port;
pub fn init() { pub fn init() {
assert_has_not_been_called!("pit::init must be called only once"); assert_has_not_been_called!("pit::init must be called only once");
@ -39,7 +39,7 @@ impl Pit {
} }
} }
const TIMER_FREQ : u32 = 1193182; const TIMER_FREQ: u32 = 1193182;
const TIMER_SEL0 : u8 = 0x00; // select counter 0 const TIMER_SEL0: u8 = 0x00; // select counter 0
const TIMER_RATEGEN : u8 = 0x04; // mode 2, rate generator const TIMER_RATEGEN: u8 = 0x04; // mode 2, rate generator
const TIMER_16BIT : u8 = 0x30; // r/w counter 16 bits, LSB first const TIMER_16BIT: u8 = 0x30; // r/w counter 16 bits, LSB first

@ -61,7 +61,8 @@ pub fn read_epoch() -> u64 {
month = month - 2; month = month - 2;
} }
let result = ((((year / 4 - year / 100 + year / 400 + 367 * month / 12 + day) + year * 365 let result = ((((year / 4 - year / 100 + year / 400 + 367 * month / 12 + day)
+ year * 365
- 719499) - 719499)
* 24 * 24
+ hour) + hour)

@ -51,10 +51,14 @@ pub struct ScreenChar {
} }
impl ScreenChar { impl ScreenChar {
pub fn new(ascii_char: u8, foreground_color: ConsoleColor, background_color: ConsoleColor) -> Self { pub fn new(
ascii_char: u8,
foreground_color: ConsoleColor,
background_color: ConsoleColor,
) -> Self {
ScreenChar { ScreenChar {
ascii_char, ascii_char,
color_code: ColorCode::new(foreground_color, background_color) color_code: ColorCode::new(foreground_color, background_color),
} }
} }
} }
@ -69,7 +73,7 @@ pub struct VgaBuffer {
impl VgaBuffer { impl VgaBuffer {
pub fn clear(&mut self) { pub fn clear(&mut self) {
let blank = ScreenChar::new(b' ', ConsoleColor::White, ConsoleColor::Black); let blank = ScreenChar::new(b' ', ConsoleColor::White, ConsoleColor::Black);
for row in 0 .. BUFFER_HEIGHT { for row in 0..BUFFER_HEIGHT {
for col in 0..BUFFER_WIDTH { for col in 0..BUFFER_WIDTH {
self.chars[row][col].write(blank); self.chars[row][col].write(blank);
} }
@ -135,7 +139,8 @@ impl BaseConsole for VgaWriter {
pos.row.bound(self.get_height()); pos.row.bound(self.get_height());
pos.col.bound(self.get_width()); pos.col.bound(self.get_width());
self.pos = pos; self.pos = pos;
self.buffer.set_cursor_at(pos.row.0 as usize, pos.col.0 as usize); self.buffer
.set_cursor_at(pos.row.0 as usize, pos.col.0 as usize);
Ok(()) Ok(())
} }
@ -180,7 +185,8 @@ impl AsciiConsole for VgaWriter {
ascii_char: ch, ascii_char: ch,
color_code: self.color_code, color_code: self.color_code,
}; };
self.buffer.write(pos.row.0 as usize, pos.col.0 as usize, screen_char); self.buffer
.write(pos.row.0 as usize, pos.col.0 as usize, screen_char);
Ok(()) Ok(())
} }
@ -221,8 +227,7 @@ impl AsciiConsole for VgaWriter {
0x1b => Some(SpecialChar::Escape), 0x1b => Some(SpecialChar::Escape),
0x7f => Some(SpecialChar::Delete), 0x7f => Some(SpecialChar::Delete),
0x08 => Some(SpecialChar::Backspace), 0x08 => Some(SpecialChar::Backspace),
_ if !(ch.is_ascii_graphic() || ch == b' ') _ if !(ch.is_ascii_graphic() || ch == b' ') => Some(SpecialChar::Delete), // ignore non-graphic ascii
=> Some(SpecialChar::Delete), // ignore non-graphic ascii
_ => None, _ => None,
}, },
_ => None, _ => None,
@ -246,7 +251,6 @@ impl VgaWriter {
impl fmt::Write for VgaWriter { impl fmt::Write for VgaWriter {
fn write_str(&mut self, s: &str) -> fmt::Result { fn write_str(&mut self, s: &str) -> fmt::Result {
self.write_string(s.as_bytes()) self.write_string(s.as_bytes()).map_err(|_| fmt::Error)
.map_err(|_| fmt::Error)
} }
} }

@ -1,9 +1,9 @@
use alloc::boxed::Box; use alloc::boxed::Box;
use x86_64::{PrivilegeLevel, VirtAddr}; use x86_64::registers::model_specific::Msr;
use x86_64::structures::gdt::*; use x86_64::structures::gdt::*;
use x86_64::structures::tss::TaskStateSegment; use x86_64::structures::tss::TaskStateSegment;
use x86_64::registers::model_specific::Msr; use x86_64::{PrivilegeLevel, VirtAddr};
use crate::consts::MAX_CPU_NUM; use crate::consts::MAX_CPU_NUM;
@ -17,8 +17,7 @@ pub fn init() {
static mut CPUS: [Option<Cpu>; MAX_CPU_NUM] = [ static mut CPUS: [Option<Cpu>; MAX_CPU_NUM] = [
// TODO: More elegant ? // TODO: More elegant ?
None, None, None, None, None, None, None, None, None, None, None, None,
None, None, None, None,
]; ];
pub struct Cpu { pub struct Cpu {
@ -41,7 +40,7 @@ impl Cpu {
} }
unsafe fn init(&'static mut self) { unsafe fn init(&'static mut self) {
use x86_64::instructions::segmentation::{set_cs, load_fs}; use x86_64::instructions::segmentation::{load_fs, set_cs};
use x86_64::instructions::tables::load_tss; use x86_64::instructions::tables::load_tss;
// Set the stack when DoubleFault occurs // Set the stack when DoubleFault occurs
@ -83,7 +82,7 @@ const KCODE: Descriptor = Descriptor::UserSegment(0x0020980000000000); // EXECU
const UCODE: Descriptor = Descriptor::UserSegment(0x0020F80000000000); // EXECUTABLE | USER_SEGMENT | USER_MODE | PRESENT | LONG_MODE const UCODE: Descriptor = Descriptor::UserSegment(0x0020F80000000000); // EXECUTABLE | USER_SEGMENT | USER_MODE | PRESENT | LONG_MODE
const KDATA: Descriptor = Descriptor::UserSegment(0x0000920000000000); // DATA_WRITABLE | USER_SEGMENT | PRESENT const KDATA: Descriptor = Descriptor::UserSegment(0x0000920000000000); // DATA_WRITABLE | USER_SEGMENT | PRESENT
const UDATA: Descriptor = Descriptor::UserSegment(0x0000F20000000000); // DATA_WRITABLE | USER_SEGMENT | USER_MODE | PRESENT const UDATA: Descriptor = Descriptor::UserSegment(0x0000F20000000000); // DATA_WRITABLE | USER_SEGMENT | USER_MODE | PRESENT
// Copied from xv6 // Copied from xv6
const UCODE32: Descriptor = Descriptor::UserSegment(0x00cffa00_0000ffff); // EXECUTABLE | USER_SEGMENT | USER_MODE | PRESENT const UCODE32: Descriptor = Descriptor::UserSegment(0x00cffa00_0000ffff); // EXECUTABLE | USER_SEGMENT | USER_MODE | PRESENT
const UDATA32: Descriptor = Descriptor::UserSegment(0x00cff200_0000ffff); // EXECUTABLE | USER_SEGMENT | USER_MODE | PRESENT const UDATA32: Descriptor = Descriptor::UserSegment(0x00cff200_0000ffff); // EXECUTABLE | USER_SEGMENT | USER_MODE | PRESENT

@ -1,5 +1,5 @@
use x86_64::structures::idt::*;
use lazy_static::lazy_static; use lazy_static::lazy_static;
use x86_64::structures::idt::*;
pub fn init() { pub fn init() {
IDT.load(); IDT.load();
@ -37,9 +37,9 @@ lazy_static! {
}; };
} }
extern { extern "C" {
/// 中断向量表 /// 中断向量表
/// 符号定义在 [trap.asm](boot/trap.asm) /// 符号定义在 [trap.asm](boot/trap.asm)
//noinspection RsStaticConstNaming //noinspection RsStaticConstNaming
static __vectors: [extern fn(); 256]; static __vectors: [extern "C" fn(); 256];
} }

@ -1,9 +1,8 @@
/// `syscall` instruction
use x86_64::registers::model_specific::*;
use core::mem::transmute;
use super::super::gdt; use super::super::gdt;
use super::TrapFrame; use super::TrapFrame;
use core::mem::transmute;
/// `syscall` instruction
use x86_64::registers::model_specific::*;
pub fn init() { pub fn init() {
unsafe { unsafe {
@ -26,7 +25,7 @@ pub fn init() {
} }
} }
extern { extern "C" {
fn syscall_entry(); fn syscall_entry();
} }

@ -66,17 +66,21 @@
use super::consts::*; use super::consts::*;
use super::TrapFrame; use super::TrapFrame;
use log::*;
use bitflags::*;
use crate::drivers::DRIVERS; use crate::drivers::DRIVERS;
use bitflags::*;
use log::*;
global_asm!(include_str!("trap.asm")); global_asm!(include_str!("trap.asm"));
global_asm!(include_str!("vector.asm")); global_asm!(include_str!("vector.asm"));
#[allow(non_upper_case_globals)] #[allow(non_upper_case_globals)]
#[no_mangle] #[no_mangle]
pub extern fn rust_trap(tf: &mut TrapFrame) { pub extern "C" fn rust_trap(tf: &mut TrapFrame) {
trace!("Interrupt: {:#x} @ CPU{}", tf.trap_num, super::super::cpu::id()); trace!(
"Interrupt: {:#x} @ CPU{}",
tf.trap_num,
super::super::cpu::id()
);
// Dispatch // Dispatch
match tf.trap_num as u8 { match tf.trap_num as u8 {
Breakpoint => breakpoint(), Breakpoint => breakpoint(),
@ -99,7 +103,7 @@ pub extern fn rust_trap(tf: &mut TrapFrame) {
} }
} }
warn!("unhandled external IRQ number: {}", irq); warn!("unhandled external IRQ number: {}", irq);
}, }
} }
} }
Syscall32 => syscall32(tf), Syscall32 => syscall32(tf),
@ -120,7 +124,9 @@ fn double_fault(tf: &TrapFrame) {
fn page_fault(tf: &mut TrapFrame) { fn page_fault(tf: &mut TrapFrame) {
let addr: usize; let addr: usize;
unsafe { asm!("mov %cr2, $0" : "=r" (addr)); } unsafe {
asm!("mov %cr2, $0" : "=r" (addr));
}
bitflags! { bitflags! {
struct PageError: u8 { struct PageError: u8 {
@ -209,7 +215,7 @@ fn error(tf: &TrapFrame) {
} }
#[no_mangle] #[no_mangle]
pub unsafe extern fn set_return_rsp(tf: *const TrapFrame) { pub unsafe extern "C" fn set_return_rsp(tf: *const TrapFrame) {
use crate::arch::gdt::Cpu; use crate::arch::gdt::Cpu;
Cpu::current().set_ring0_rsp(tf.add(1) as usize); Cpu::current().set_ring0_rsp(tf.add(1) as usize);
} }

@ -1,12 +1,12 @@
pub mod consts; pub mod consts;
pub mod fast_syscall;
mod handler; mod handler;
mod trapframe; mod trapframe;
pub mod fast_syscall;
pub use self::trapframe::*;
pub use self::handler::*; pub use self::handler::*;
use apic::*; pub use self::trapframe::*;
use crate::consts::KERNEL_OFFSET; use crate::consts::KERNEL_OFFSET;
use apic::*;
#[inline(always)] #[inline(always)]
pub unsafe fn enable() { pub unsafe fn enable() {

@ -1,9 +1,9 @@
use core::fmt;
use core::default::Default; use core::default::Default;
use core::fmt;
#[derive(Clone)] #[derive(Clone)]
#[repr(C)] #[repr(C)]
pub struct FpState([u8; 16+512]); pub struct FpState([u8; 16 + 512]);
impl fmt::Debug for FpState { impl fmt::Debug for FpState {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
@ -13,11 +13,10 @@ impl fmt::Debug for FpState {
impl Default for FpState { impl Default for FpState {
fn default() -> Self { fn default() -> Self {
FpState([0u8; 16+512]) FpState([0u8; 16 + 512])
} }
} }
#[derive(Debug, Clone, Default)] #[derive(Debug, Clone, Default)]
#[repr(C)] #[repr(C)]
pub struct TrapFrame { pub struct TrapFrame {
@ -62,7 +61,7 @@ pub struct TrapFrame {
/// 用于在内核栈中构造新线程的中断帧 /// 用于在内核栈中构造新线程的中断帧
impl TrapFrame { impl TrapFrame {
fn new_kernel_thread(entry: extern fn(usize) -> !, arg: usize, rsp: usize) -> Self { fn new_kernel_thread(entry: extern "C" fn(usize) -> !, arg: usize, rsp: usize) -> Self {
use crate::arch::gdt; use crate::arch::gdt;
let mut tf = TrapFrame::default(); let mut tf = TrapFrame::default();
tf.rdi = arg; tf.rdi = arg;
@ -77,7 +76,11 @@ impl TrapFrame {
fn new_user_thread(entry_addr: usize, rsp: usize, is32: bool) -> Self { fn new_user_thread(entry_addr: usize, rsp: usize, is32: bool) -> Self {
use crate::arch::gdt; use crate::arch::gdt;
let mut tf = TrapFrame::default(); let mut tf = TrapFrame::default();
tf.cs = if is32 { gdt::UCODE32_SELECTOR.0 } else { gdt::UCODE_SELECTOR.0 } as usize; tf.cs = if is32 {
gdt::UCODE32_SELECTOR.0
} else {
gdt::UCODE_SELECTOR.0
} as usize;
tf.rip = entry_addr; tf.rip = entry_addr;
tf.ss = gdt::UDATA32_SELECTOR.0 as usize; tf.ss = gdt::UDATA32_SELECTOR.0 as usize;
tf.rsp = rsp; tf.rsp = rsp;
@ -105,7 +108,11 @@ struct ContextData {
impl ContextData { impl ContextData {
fn new(cr3: usize) -> Self { fn new(cr3: usize) -> Self {
ContextData { rip: trap_ret as usize, cr3, ..ContextData::default() } ContextData {
rip: trap_ret as usize,
cr3,
..ContextData::default()
}
} }
} }
@ -125,7 +132,7 @@ impl InitStack {
} }
} }
extern { extern "C" {
fn trap_ret(); fn trap_ret();
} }
@ -142,7 +149,7 @@ impl Context {
/// Pop all callee-saved registers, then return to the target. /// Pop all callee-saved registers, then return to the target.
#[naked] #[naked]
#[inline(never)] #[inline(never)]
pub unsafe extern fn switch(&mut self, _target: &mut Self) { pub unsafe extern "C" fn switch(&mut self, _target: &mut Self) {
asm!( asm!(
" "
// push rip (by caller) // push rip (by caller)
@ -180,17 +187,30 @@ impl Context {
Context(0) Context(0)
} }
pub unsafe fn new_kernel_thread(entry: extern fn(usize) -> !, arg: usize, kstack_top: usize, cr3: usize) -> Self { pub unsafe fn new_kernel_thread(
entry: extern "C" fn(usize) -> !,
arg: usize,
kstack_top: usize,
cr3: usize,
) -> Self {
InitStack { InitStack {
context: ContextData::new(cr3), context: ContextData::new(cr3),
tf: TrapFrame::new_kernel_thread(entry, arg, kstack_top), tf: TrapFrame::new_kernel_thread(entry, arg, kstack_top),
}.push_at(kstack_top)
} }
pub unsafe fn new_user_thread(entry_addr: usize, ustack_top: usize, kstack_top: usize, is32: bool, cr3: usize) -> Self { .push_at(kstack_top)
}
pub unsafe fn new_user_thread(
entry_addr: usize,
ustack_top: usize,
kstack_top: usize,
is32: bool,
cr3: usize,
) -> Self {
InitStack { InitStack {
context: ContextData::new(cr3), context: ContextData::new(cr3),
tf: TrapFrame::new_user_thread(entry_addr, ustack_top, is32), tf: TrapFrame::new_user_thread(entry_addr, ustack_top, is32),
}.push_at(kstack_top) }
.push_at(kstack_top)
} }
pub unsafe fn new_fork(tf: &TrapFrame, kstack_top: usize, cr3: usize) -> Self { pub unsafe fn new_fork(tf: &TrapFrame, kstack_top: usize, cr3: usize) -> Self {
InitStack { InitStack {
@ -200,9 +220,16 @@ impl Context {
tf.rax = 0; tf.rax = 0;
tf tf
}, },
}.push_at(kstack_top)
} }
pub unsafe fn new_clone(tf: &TrapFrame, ustack_top: usize, kstack_top: usize, cr3: usize, tls: usize) -> Self { .push_at(kstack_top)
}
pub unsafe fn new_clone(
tf: &TrapFrame,
ustack_top: usize,
kstack_top: usize,
cr3: usize,
tls: usize,
) -> Self {
InitStack { InitStack {
context: ContextData::new(cr3), context: ContextData::new(cr3),
tf: { tf: {
@ -212,7 +239,8 @@ impl Context {
tf.rax = 0; tf.rax = 0;
tf tf
}, },
}.push_at(kstack_top) }
.push_at(kstack_top)
} }
/// Called at a new user context /// Called at a new user context
/// To get the init TrapFrame in sys_exec /// To get the init TrapFrame in sys_exec

@ -3,19 +3,25 @@ use super::driver::vga::VGA_WRITER;
use core::fmt::{Arguments, Write}; use core::fmt::{Arguments, Write};
pub fn getchar() -> char { pub fn getchar() -> char {
unsafe { COM1.force_unlock(); } unsafe {
COM1.force_unlock();
}
COM1.lock().receive() as char COM1.lock().receive() as char
} }
pub fn putfmt(fmt: Arguments) { pub fn putfmt(fmt: Arguments) {
#[cfg(feature = "nographic")] #[cfg(feature = "nographic")]
{ {
unsafe { COM1.force_unlock(); } unsafe {
COM1.force_unlock();
}
COM1.lock().write_fmt(fmt).unwrap(); COM1.lock().write_fmt(fmt).unwrap();
} }
#[cfg(not(feature = "nographic"))] #[cfg(not(feature = "nographic"))]
{ {
unsafe { VGA_WRITER.force_unlock(); } unsafe {
VGA_WRITER.force_unlock();
}
VGA_WRITER.lock().write_fmt(fmt).unwrap(); VGA_WRITER.lock().write_fmt(fmt).unwrap();
} }
} }

@ -1,11 +1,11 @@
use bit_allocator::BitAlloc;
use crate::consts::KERNEL_OFFSET; use crate::consts::KERNEL_OFFSET;
use bit_allocator::BitAlloc;
// Depends on kernel // Depends on kernel
use crate::memory::{FRAME_ALLOCATOR, init_heap, active_table};
use super::{BootInfo, MemoryRegionType}; use super::{BootInfo, MemoryRegionType};
use rcore_memory::paging::*; use crate::memory::{active_table, init_heap, FRAME_ALLOCATOR};
use once::*;
use log::*; use log::*;
use once::*;
use rcore_memory::paging::*;
pub fn init(boot_info: &BootInfo) { pub fn init(boot_info: &BootInfo) {
assert_has_not_been_called!("memory::init must be called only once"); assert_has_not_been_called!("memory::init must be called only once");
@ -20,7 +20,9 @@ fn init_frame_allocator(boot_info: &BootInfo) {
let mut ba = FRAME_ALLOCATOR.lock(); let mut ba = FRAME_ALLOCATOR.lock();
for region in boot_info.memory_map.iter() { for region in boot_info.memory_map.iter() {
if region.region_type == MemoryRegionType::Usable { if region.region_type == MemoryRegionType::Usable {
ba.insert(region.range.start_frame_number as usize..region.range.end_frame_number as usize); ba.insert(
region.range.start_frame_number as usize..region.range.end_frame_number as usize,
);
} }
} }
} }
@ -28,7 +30,11 @@ fn init_frame_allocator(boot_info: &BootInfo) {
fn init_device_vm_map() { fn init_device_vm_map() {
let mut page_table = active_table(); let mut page_table = active_table();
// IOAPIC // IOAPIC
page_table.map(KERNEL_OFFSET + 0xfec00000, 0xfec00000).update(); page_table
.map(KERNEL_OFFSET + 0xfec00000, 0xfec00000)
.update();
// LocalAPIC // LocalAPIC
page_table.map(KERNEL_OFFSET + 0xfee00000, 0xfee00000).update(); page_table
.map(KERNEL_OFFSET + 0xfee00000, 0xfee00000)
.update();
} }

@ -2,18 +2,18 @@ use bootloader::bootinfo::{BootInfo, MemoryRegionType};
use core::sync::atomic::*; use core::sync::atomic::*;
use log::*; use log::*;
pub mod driver; pub mod consts;
pub mod cpu; pub mod cpu;
pub mod interrupt; pub mod driver;
pub mod paging;
pub mod gdt; pub mod gdt;
pub mod idt; pub mod idt;
pub mod memory; pub mod interrupt;
pub mod io; pub mod io;
pub mod consts; pub mod memory;
pub mod timer; pub mod paging;
pub mod syscall;
pub mod rand; pub mod rand;
pub mod syscall;
pub mod timer;
static AP_CAN_INIT: AtomicBool = ATOMIC_BOOL_INIT; static AP_CAN_INIT: AtomicBool = ATOMIC_BOOL_INIT;

@ -1,18 +1,18 @@
// Depends on kernel // Depends on kernel
use crate::consts::KERNEL_OFFSET;
use crate::memory::{active_table, alloc_frame, dealloc_frame}; use crate::memory::{active_table, alloc_frame, dealloc_frame};
use log::*;
use rcore_memory::paging::*; use rcore_memory::paging::*;
use x86_64::instructions::tlb; use x86_64::instructions::tlb;
use x86_64::PhysAddr;
use x86_64::registers::control::{Cr3, Cr3Flags}; use x86_64::registers::control::{Cr3, Cr3Flags};
use x86_64::structures::paging::{ use x86_64::structures::paging::{
page_table::{PageTable as x86PageTable, PageTableEntry, PageTableFlags as EF}, frame::PhysFrame as Frame,
mapper::{Mapper, RecursivePageTable}, mapper::{Mapper, RecursivePageTable},
page::{Page, PageRange, Size4KiB}, page::{Page, PageRange, Size4KiB},
frame::PhysFrame as Frame, page_table::{PageTable as x86PageTable, PageTableEntry, PageTableFlags as EF},
FrameAllocator, FrameDeallocator FrameAllocator, FrameDeallocator,
}; };
use crate::consts::KERNEL_OFFSET; use x86_64::PhysAddr;
use log::*;
pub trait PageExt { pub trait PageExt {
fn of_addr(address: usize) -> Self; fn of_addr(address: usize) -> Self;
@ -47,7 +47,12 @@ impl PageTable for ActivePageTable {
fn map(&mut self, addr: usize, target: usize) -> &mut Entry { fn map(&mut self, addr: usize, target: usize) -> &mut Entry {
let flags = EF::PRESENT | EF::WRITABLE | EF::NO_EXECUTE; let flags = EF::PRESENT | EF::WRITABLE | EF::NO_EXECUTE;
unsafe { unsafe {
if let Ok(flush) = self.0.map_to(Page::of_addr(addr), Frame::of_addr(target), flags, &mut FrameAllocatorForX86) { if let Ok(flush) = self.0.map_to(
Page::of_addr(addr),
Frame::of_addr(target),
flags,
&mut FrameAllocatorForX86,
) {
flush.flush(); flush.flush();
} }
} }
@ -64,7 +69,9 @@ impl PageTable for ActivePageTable {
fn get_entry(&mut self, addr: usize) -> Option<&mut Entry> { fn get_entry(&mut self, addr: usize) -> Option<&mut Entry> {
for level in 0..3 { for level in 0..3 {
let entry = get_entry_ptr(addr, 4 - level); let entry = get_entry_ptr(addr, 4 - level);
if unsafe { !(*entry).present() } { return None; } if unsafe { !(*entry).present() } {
return None;
}
} }
unsafe { Some(&mut *(get_entry_ptr(addr, 1))) } unsafe { Some(&mut *(get_entry_ptr(addr, 1))) }
} }
@ -82,34 +89,64 @@ impl ActivePageTable {
impl Entry for PageEntry { impl Entry for PageEntry {
fn update(&mut self) { fn update(&mut self) {
use x86_64::{VirtAddr, instructions::tlb::flush}; use x86_64::{instructions::tlb::flush, VirtAddr};
let addr = VirtAddr::new_unchecked((self as *const _ as u64) << 9); let addr = VirtAddr::new_unchecked((self as *const _ as u64) << 9);
flush(addr); flush(addr);
} }
fn accessed(&self) -> bool { self.0.flags().contains(EF::ACCESSED) } fn accessed(&self) -> bool {
fn dirty(&self) -> bool { self.0.flags().contains(EF::DIRTY) } self.0.flags().contains(EF::ACCESSED)
fn writable(&self) -> bool { self.0.flags().contains(EF::WRITABLE) } }
fn present(&self) -> bool { self.0.flags().contains(EF::PRESENT) } fn dirty(&self) -> bool {
fn clear_accessed(&mut self) { self.as_flags().remove(EF::ACCESSED); } self.0.flags().contains(EF::DIRTY)
fn clear_dirty(&mut self) { self.as_flags().remove(EF::DIRTY); } }
fn set_writable(&mut self, value: bool) { self.as_flags().set(EF::WRITABLE, value); } fn writable(&self) -> bool {
fn set_present(&mut self, value: bool) { self.as_flags().set(EF::PRESENT, value); } self.0.flags().contains(EF::WRITABLE)
fn target(&self) -> usize { self.0.addr().as_u64() as usize } }
fn present(&self) -> bool {
self.0.flags().contains(EF::PRESENT)
}
fn clear_accessed(&mut self) {
self.as_flags().remove(EF::ACCESSED);
}
fn clear_dirty(&mut self) {
self.as_flags().remove(EF::DIRTY);
}
fn set_writable(&mut self, value: bool) {
self.as_flags().set(EF::WRITABLE, value);
}
fn set_present(&mut self, value: bool) {
self.as_flags().set(EF::PRESENT, value);
}
fn target(&self) -> usize {
self.0.addr().as_u64() as usize
}
fn set_target(&mut self, target: usize) { fn set_target(&mut self, target: usize) {
let flags = self.0.flags(); let flags = self.0.flags();
self.0.set_addr(PhysAddr::new(target as u64), flags); self.0.set_addr(PhysAddr::new(target as u64), flags);
} }
fn writable_shared(&self) -> bool { self.0.flags().contains(EF::BIT_10) } fn writable_shared(&self) -> bool {
fn readonly_shared(&self) -> bool { self.0.flags().contains(EF::BIT_9) } self.0.flags().contains(EF::BIT_10)
}
fn readonly_shared(&self) -> bool {
self.0.flags().contains(EF::BIT_9)
}
fn set_shared(&mut self, writable: bool) { fn set_shared(&mut self, writable: bool) {
let flags = self.as_flags(); let flags = self.as_flags();
flags.set(EF::BIT_10, writable); flags.set(EF::BIT_10, writable);
flags.set(EF::BIT_9, !writable); flags.set(EF::BIT_9, !writable);
} }
fn clear_shared(&mut self) { self.as_flags().remove(EF::BIT_9 | EF::BIT_10); } fn clear_shared(&mut self) {
fn swapped(&self) -> bool { self.0.flags().contains(EF::BIT_11) } self.as_flags().remove(EF::BIT_9 | EF::BIT_10);
fn set_swapped(&mut self, value: bool) { self.as_flags().set(EF::BIT_11, value); } }
fn user(&self) -> bool { self.0.flags().contains(EF::USER_ACCESSIBLE) } fn swapped(&self) -> bool {
self.0.flags().contains(EF::BIT_11)
}
fn set_swapped(&mut self, value: bool) {
self.as_flags().set(EF::BIT_11, value);
}
fn user(&self) -> bool {
self.0.flags().contains(EF::USER_ACCESSIBLE)
}
fn set_user(&mut self, value: bool) { fn set_user(&mut self, value: bool) {
self.as_flags().set(EF::USER_ACCESSIBLE, value); self.as_flags().set(EF::USER_ACCESSIBLE, value);
if value { if value {
@ -122,10 +159,16 @@ impl Entry for PageEntry {
} }
} }
} }
fn execute(&self) -> bool { !self.0.flags().contains(EF::NO_EXECUTE) } fn execute(&self) -> bool {
fn set_execute(&mut self, value: bool) { self.as_flags().set(EF::NO_EXECUTE, !value); } !self.0.flags().contains(EF::NO_EXECUTE)
fn mmio(&self) -> u8 { 0 } }
fn set_mmio(&mut self, _value: u8) { } fn set_execute(&mut self, value: bool) {
self.as_flags().set(EF::NO_EXECUTE, !value);
}
fn mmio(&self) -> u8 {
0
}
fn set_mmio(&mut self, _value: u8) {}
} }
fn get_entry_ptr(addr: usize, level: u8) -> *mut PageEntry { fn get_entry_ptr(addr: usize, level: u8) -> *mut PageEntry {
@ -176,7 +219,10 @@ impl InactivePageTable for InactivePageTable0 {
} }
unsafe fn set_token(token: usize) { unsafe fn set_token(token: usize) {
Cr3::write(Frame::containing_address(PhysAddr::new(token as u64)), Cr3Flags::empty()); Cr3::write(
Frame::containing_address(PhysAddr::new(token as u64)),
Cr3Flags::empty(),
);
} }
fn active_token() -> usize { fn active_token() -> usize {

@ -52,8 +52,16 @@ pub fn backtrace() {
let mut current_pc = lr(); let mut current_pc = lr();
let mut current_fp = fp(); let mut current_fp = fp();
let mut stack_num = 0; let mut stack_num = 0;
while current_pc >= stext as usize && current_pc <= etext as usize && current_fp as usize != 0 { while current_pc >= stext as usize
println!("#{} {:#018X} fp {:#018X}", stack_num, current_pc - size_of::<usize>(), current_fp); && current_pc <= etext as usize
&& current_fp as usize != 0
{
println!(
"#{} {:#018X} fp {:#018X}",
stack_num,
current_pc - size_of::<usize>(),
current_fp
);
stack_num = stack_num + 1; stack_num = stack_num + 1;
#[cfg(any(target_arch = "riscv32", target_arch = "riscv64"))] #[cfg(any(target_arch = "riscv32", target_arch = "riscv64"))]
{ {
@ -72,8 +80,9 @@ pub fn backtrace() {
// Kernel stack at 0x0000_57ac_0000_0000 (defined in bootloader crate) // Kernel stack at 0x0000_57ac_0000_0000 (defined in bootloader crate)
// size = 512 pages // size = 512 pages
current_fp = *(current_fp as *const usize).offset(0); current_fp = *(current_fp as *const usize).offset(0);
if current_fp >= 0x0000_57ac_0000_0000 + 512 * PAGE_SIZE - size_of::<usize>() && if current_fp >= 0x0000_57ac_0000_0000 + 512 * PAGE_SIZE - size_of::<usize>()
current_fp <= 0xffff_ff00_0000_0000 { && current_fp <= 0xffff_ff00_0000_0000
{
break; break;
} }
current_pc = *(current_fp as *const usize).offset(1); current_pc = *(current_fp as *const usize).offset(1);

@ -1,5 +1,5 @@
use core::slice;
use alloc::string::String; use alloc::string::String;
use core::slice;
use device_tree::{DeviceTree, Node}; use device_tree::{DeviceTree, Node};

@ -1,7 +1,7 @@
//! Implement Device //! Implement Device
use spin::RwLock;
use rcore_fs::dev::*; use rcore_fs::dev::*;
use spin::RwLock;
#[cfg(target_arch = "x86_64")] #[cfg(target_arch = "x86_64")]
use crate::arch::driver::ide; use crate::arch::driver::ide;
@ -9,9 +9,12 @@ use crate::arch::driver::ide;
pub struct MemBuf(RwLock<&'static mut [u8]>); pub struct MemBuf(RwLock<&'static mut [u8]>);
impl MemBuf { impl MemBuf {
pub unsafe fn new(begin: unsafe extern fn(), end: unsafe extern fn()) -> Self { pub unsafe fn new(begin: unsafe extern "C" fn(), end: unsafe extern "C" fn()) -> Self {
use core::slice; use core::slice;
MemBuf(RwLock::new(slice::from_raw_parts_mut(begin as *mut u8, end as usize - begin as usize))) MemBuf(RwLock::new(slice::from_raw_parts_mut(
begin as *mut u8,
end as usize - begin as usize,
)))
} }
} }
@ -36,7 +39,8 @@ impl BlockDevice for ide::IDE {
fn read_at(&self, block_id: usize, buf: &mut [u8]) -> bool { fn read_at(&self, block_id: usize, buf: &mut [u8]) -> bool {
use core::slice; use core::slice;
assert!(buf.len() >= ide::BLOCK_SIZE); assert!(buf.len() >= ide::BLOCK_SIZE);
let buf = unsafe { slice::from_raw_parts_mut(buf.as_ptr() as *mut u32, ide::BLOCK_SIZE / 4) }; let buf =
unsafe { slice::from_raw_parts_mut(buf.as_ptr() as *mut u32, ide::BLOCK_SIZE / 4) };
self.read(block_id as u64, 1, buf).is_ok() self.read(block_id as u64, 1, buf).is_ok()
} }
fn write_at(&self, block_id: usize, buf: &[u8]) -> bool { fn write_at(&self, block_id: usize, buf: &[u8]) -> bool {

@ -2,7 +2,7 @@
use alloc::{string::String, sync::Arc}; use alloc::{string::String, sync::Arc};
use rcore_fs::vfs::{Metadata, INode, Result, FsError}; use rcore_fs::vfs::{FsError, INode, Metadata, Result};
#[derive(Clone)] #[derive(Clone)]
pub struct FileHandle { pub struct FileHandle {

@ -7,24 +7,28 @@ use rcore_fs_sfs::SimpleFileSystem;
use crate::arch::driver::ide; use crate::arch::driver::ide;
pub use self::file::*; pub use self::file::*;
pub use self::stdio::{STDIN, STDOUT};
pub use self::pipe::Pipe; pub use self::pipe::Pipe;
pub use self::stdio::{STDIN, STDOUT};
mod file;
mod stdio;
mod device; mod device;
mod file;
mod pipe; mod pipe;
mod stdio;
/// Hard link user programs /// Hard link user programs
#[cfg(feature = "link_user")] #[cfg(feature = "link_user")]
global_asm!(concat!(r#" global_asm!(concat!(
r#"
.section .data .section .data
.global _user_img_start .global _user_img_start
.global _user_img_end .global _user_img_end
_user_img_start: _user_img_start:
.incbin ""#, env!("SFSIMG"), r#"" .incbin ""#,
env!("SFSIMG"),
r#""
_user_img_end: _user_img_end:
"#)); "#
));
lazy_static! { lazy_static! {
/// The root of file system /// The root of file system
@ -66,7 +70,9 @@ impl INodeExt for INode {
fn read_as_vec(&self) -> Result<Vec<u8>> { fn read_as_vec(&self) -> Result<Vec<u8>> {
let size = self.metadata()?.size; let size = self.metadata()?.size;
let mut buf = Vec::with_capacity(size); let mut buf = Vec::with_capacity(size);
unsafe { buf.set_len(size); } unsafe {
buf.set_len(size);
}
self.read_at(0, buf.as_mut_slice())?; self.read_at(0, buf.as_mut_slice())?;
Ok(buf) Ok(buf)
} }

@ -67,16 +67,20 @@ impl INode for Stdin {
buf[0] = self.pop() as u8; buf[0] = self.pop() as u8;
Ok(1) Ok(1)
} }
fn write_at(&self, _offset: usize, _buf: &[u8]) -> Result<usize> { unimplemented!() } fn write_at(&self, _offset: usize, _buf: &[u8]) -> Result<usize> {
unimplemented!()
}
impl_inode!(); impl_inode!();
} }
impl INode for Stdout { impl INode for Stdout {
fn read_at(&self, _offset: usize, _buf: &mut [u8]) -> Result<usize> { unimplemented!() } fn read_at(&self, _offset: usize, _buf: &mut [u8]) -> Result<usize> {
unimplemented!()
}
fn write_at(&self, _offset: usize, buf: &[u8]) -> Result<usize> { fn write_at(&self, _offset: usize, buf: &[u8]) -> Result<usize> {
use core::str; use core::str;
//we do not care the utf-8 things, we just want to print it! //we do not care the utf-8 things, we just want to print it!
let s = unsafe{ str::from_utf8_unchecked(buf) }; let s = unsafe { str::from_utf8_unchecked(buf) };
print!("{}", s); print!("{}", s);
Ok(buf.len()) Ok(buf.len())
} }

@ -1,19 +1,20 @@
// Rust language features implementations // Rust language features implementations
use core::panic::PanicInfo; use crate::backtrace;
use core::alloc::Layout; use core::alloc::Layout;
use core::panic::PanicInfo;
use log::*; use log::*;
use crate::backtrace;
#[lang = "eh_personality"] #[lang = "eh_personality"]
extern fn eh_personality() { extern "C" fn eh_personality() {}
}
#[panic_handler] #[panic_handler]
fn panic(info: &PanicInfo) -> ! { fn panic(info: &PanicInfo) -> ! {
error!("\n\n{}", info); error!("\n\n{}", info);
backtrace::backtrace(); backtrace::backtrace();
loop { crate::arch::cpu::halt() } loop {
crate::arch::cpu::halt()
}
} }
#[lang = "oom"] #[lang = "oom"]

@ -16,25 +16,25 @@ extern crate log;
#[macro_use] #[macro_use]
extern crate lazy_static; extern crate lazy_static;
pub use crate::process::{processor, new_kernel_context}; pub use crate::process::{new_kernel_context, processor};
use rcore_thread::std_thread as thread;
use buddy_system_allocator::LockedHeap; use buddy_system_allocator::LockedHeap;
use rcore_thread::std_thread as thread;
#[macro_use] // print! #[macro_use] // print!
mod logging; mod logging;
mod memory; mod backtrace;
mod lang;
mod util;
mod consts; mod consts;
mod process; mod drivers;
mod syscall;
mod fs; mod fs;
mod lang;
mod memory;
mod net;
mod process;
mod shell;
mod sync; mod sync;
mod syscall;
mod trap; mod trap;
mod shell; mod util;
mod drivers;
mod net;
mod backtrace;
#[allow(dead_code)] #[allow(dead_code)]
#[cfg(target_arch = "x86_64")] #[cfg(target_arch = "x86_64")]

@ -64,12 +64,14 @@ impl Log for SimpleLogger {
true true
} }
fn log(&self, record: &Record) { fn log(&self, record: &Record) {
static DISABLED_TARGET: &[&str] = &[ static DISABLED_TARGET: &[&str] = &[];
];
if self.enabled(record.metadata()) && !DISABLED_TARGET.contains(&record.target()) { if self.enabled(record.metadata()) && !DISABLED_TARGET.contains(&record.target()) {
// let target = record.target(); // let target = record.target();
// let begin = target.as_bytes().iter().rposition(|&c| c == b':').map(|i| i + 1).unwrap_or(0); // let begin = target.as_bytes().iter().rposition(|&c| c == b':').map(|i| i + 1).unwrap_or(0);
print_in_color(format_args!("[{:>5}] {}\n", record.level(), record.args()), ConsoleColor::from(record.level())); print_in_color(
format_args!("[{:>5}] {}\n", record.level(), record.args()),
ConsoleColor::from(record.level()),
);
} }
} }
fn flush(&self) {} fn flush(&self) {}

@ -1,14 +1,14 @@
use super::HEAP_ALLOCATOR;
pub use crate::arch::paging::*; pub use crate::arch::paging::*;
use bit_allocator::BitAlloc;
use crate::consts::MEMORY_OFFSET; use crate::consts::MEMORY_OFFSET;
use super::HEAP_ALLOCATOR;
use rcore_memory::*;
pub use rcore_memory::memory_set::{MemoryArea, MemoryAttr, handler::*};
use crate::process::process_unsafe; use crate::process::process_unsafe;
use crate::sync::SpinNoIrqLock; use crate::sync::SpinNoIrqLock;
use bit_allocator::BitAlloc;
use buddy_system_allocator::LockedHeap;
use lazy_static::*; use lazy_static::*;
use log::*; use log::*;
use buddy_system_allocator::LockedHeap; pub use rcore_memory::memory_set::{handler::*, MemoryArea, MemoryAttr};
use rcore_memory::*;
pub type MemorySet = rcore_memory::memory_set::MemorySet<InactivePageTable0>; pub type MemorySet = rcore_memory::memory_set::MemorySet<InactivePageTable0>;
@ -25,7 +25,8 @@ pub type FrameAlloc = bit_allocator::BitAlloc4K;
pub type FrameAlloc = bit_allocator::BitAlloc1M; pub type FrameAlloc = bit_allocator::BitAlloc1M;
lazy_static! { lazy_static! {
pub static ref FRAME_ALLOCATOR: SpinNoIrqLock<FrameAlloc> = SpinNoIrqLock::new(FrameAlloc::default()); pub static ref FRAME_ALLOCATOR: SpinNoIrqLock<FrameAlloc> =
SpinNoIrqLock::new(FrameAlloc::default());
} }
/// The only way to get active page table /// The only way to get active page table
@ -46,21 +47,25 @@ pub fn active_table() -> ActivePageTable {
unsafe { ActivePageTable::new() } unsafe { ActivePageTable::new() }
} }
#[derive(Debug, Clone, Copy)] #[derive(Debug, Clone, Copy)]
pub struct GlobalFrameAlloc; pub struct GlobalFrameAlloc;
impl FrameAllocator for GlobalFrameAlloc { impl FrameAllocator for GlobalFrameAlloc {
fn alloc(&self) -> Option<usize> { fn alloc(&self) -> Option<usize> {
// get the real address of the alloc frame // get the real address of the alloc frame
let ret = FRAME_ALLOCATOR.lock().alloc().map(|id| id * PAGE_SIZE + MEMORY_OFFSET); let ret = FRAME_ALLOCATOR
.lock()
.alloc()
.map(|id| id * PAGE_SIZE + MEMORY_OFFSET);
trace!("Allocate frame: {:x?}", ret); trace!("Allocate frame: {:x?}", ret);
ret ret
// TODO: try to swap out when alloc failed // TODO: try to swap out when alloc failed
} }
fn dealloc(&self, target: usize) { fn dealloc(&self, target: usize) {
trace!("Deallocate frame: {:x}", target); trace!("Deallocate frame: {:x}", target);
FRAME_ALLOCATOR.lock().dealloc((target - MEMORY_OFFSET) / PAGE_SIZE); FRAME_ALLOCATOR
.lock()
.dealloc((target - MEMORY_OFFSET) / PAGE_SIZE);
} }
} }
@ -77,7 +82,8 @@ const STACK_SIZE: usize = 0x8000;
impl KernelStack { impl KernelStack {
pub fn new() -> Self { pub fn new() -> Self {
use alloc::alloc::{alloc, Layout}; use alloc::alloc::{alloc, Layout};
let bottom = unsafe{ alloc(Layout::from_size_align(STACK_SIZE, STACK_SIZE).unwrap()) } as usize; let bottom =
unsafe { alloc(Layout::from_size_align(STACK_SIZE, STACK_SIZE).unwrap()) } as usize;
KernelStack(bottom) KernelStack(bottom)
} }
pub fn top(&self) -> usize { pub fn top(&self) -> usize {
@ -88,26 +94,32 @@ impl KernelStack {
impl Drop for KernelStack { impl Drop for KernelStack {
fn drop(&mut self) { fn drop(&mut self) {
use alloc::alloc::{dealloc, Layout}; use alloc::alloc::{dealloc, Layout};
unsafe{ dealloc(self.0 as _, Layout::from_size_align(STACK_SIZE, STACK_SIZE).unwrap()); } unsafe {
dealloc(
self.0 as _,
Layout::from_size_align(STACK_SIZE, STACK_SIZE).unwrap(),
);
}
} }
} }
/// Handle page fault at `addr`. /// Handle page fault at `addr`.
/// Return true to continue, false to halt. /// Return true to continue, false to halt.
pub fn handle_page_fault(addr: usize) -> bool { pub fn handle_page_fault(addr: usize) -> bool {
debug!("page fault @ {:#x}", addr); debug!("page fault @ {:#x}", addr);
// This is safe as long as page fault never happens in page fault handler // This is safe as long as page fault never happens in page fault handler
unsafe { unsafe { process_unsafe().vm.handle_page_fault(addr) }
process_unsafe().vm.handle_page_fault(addr)
}
} }
pub fn init_heap() { pub fn init_heap() {
use crate::consts::KERNEL_HEAP_SIZE; use crate::consts::KERNEL_HEAP_SIZE;
static mut HEAP: [u8; KERNEL_HEAP_SIZE] = [0; KERNEL_HEAP_SIZE]; static mut HEAP: [u8; KERNEL_HEAP_SIZE] = [0; KERNEL_HEAP_SIZE];
unsafe { HEAP_ALLOCATOR.lock().init(HEAP.as_ptr() as usize, KERNEL_HEAP_SIZE); } unsafe {
HEAP_ALLOCATOR
.lock()
.init(HEAP.as_ptr() as usize, KERNEL_HEAP_SIZE);
}
info!("heap init end"); info!("heap init end");
} }

@ -1,8 +1,8 @@
use alloc::sync::Arc;
use crate::arch::rand; use crate::arch::rand;
use crate::drivers::{NET_DRIVERS, SOCKET_ACTIVITY}; use crate::drivers::{NET_DRIVERS, SOCKET_ACTIVITY};
use crate::sync::SpinNoIrqLock as Mutex; use crate::sync::SpinNoIrqLock as Mutex;
use crate::syscall::*; use crate::syscall::*;
use alloc::sync::Arc;
use smoltcp::socket::*; use smoltcp::socket::*;
use smoltcp::wire::*; use smoltcp::wire::*;

@ -1,6 +1,6 @@
use alloc::collections::btree_map::BTreeMap;
use alloc::string::String; use alloc::string::String;
use alloc::vec::Vec; use alloc::vec::Vec;
use alloc::collections::btree_map::BTreeMap;
use core::ptr::null; use core::ptr::null;
pub struct ProcInitInfo { pub struct ProcInitInfo {
@ -16,17 +16,25 @@ impl ProcInitInfo {
// program name // program name
writer.push_str(&self.args[0]); writer.push_str(&self.args[0]);
// environment strings // environment strings
let envs: Vec<_> = self.envs.iter().map(|(key, value)| { let envs: Vec<_> = self
.envs
.iter()
.map(|(key, value)| {
writer.push_str(value.as_str()); writer.push_str(value.as_str());
writer.push_slice(&[b"="]); writer.push_slice(&[b"="]);
writer.push_slice(key.as_bytes()); writer.push_slice(key.as_bytes());
writer.sp writer.sp
}).collect(); })
.collect();
// argv strings // argv strings
let argv: Vec<_> = self.args.iter().map(|arg| { let argv: Vec<_> = self
.args
.iter()
.map(|arg| {
writer.push_str(arg.as_str()); writer.push_str(arg.as_str());
writer.sp writer.sp
}).collect(); })
.collect();
// auxiliary vector entries // auxiliary vector entries
writer.push_slice(&[null::<u8>(), null::<u8>()]); writer.push_slice(&[null::<u8>(), null::<u8>()]);
for (&type_, &value) in self.auxv.iter() { for (&type_, &value) in self.auxv.iter() {
@ -50,11 +58,13 @@ struct StackWriter {
impl StackWriter { impl StackWriter {
fn push_slice<T: Copy>(&mut self, vs: &[T]) { fn push_slice<T: Copy>(&mut self, vs: &[T]) {
use core::{mem::{size_of, align_of}, slice}; use core::{
mem::{align_of, size_of},
slice,
};
self.sp -= vs.len() * size_of::<T>(); self.sp -= vs.len() * size_of::<T>();
self.sp -= self.sp % align_of::<T>(); self.sp -= self.sp % align_of::<T>();
unsafe { slice::from_raw_parts_mut(self.sp as *mut T, vs.len()) } unsafe { slice::from_raw_parts_mut(self.sp as *mut T, vs.len()) }.copy_from_slice(vs);
.copy_from_slice(vs);
} }
fn push_str(&mut self, s: &str) { fn push_str(&mut self, s: &str) {
self.push_slice(&[b'\0']); self.push_slice(&[b'\0']);

@ -1,13 +1,13 @@
pub use self::structs::*; pub use self::structs::*;
pub use rcore_thread::*;
use crate::consts::{MAX_CPU_NUM, MAX_PROCESS_NUM};
use crate::arch::cpu; use crate::arch::cpu;
use crate::consts::{MAX_CPU_NUM, MAX_PROCESS_NUM};
use alloc::{boxed::Box, sync::Arc}; use alloc::{boxed::Box, sync::Arc};
use spin::MutexGuard;
use log::*; use log::*;
pub use rcore_thread::*;
use spin::MutexGuard;
pub mod structs;
mod abi; mod abi;
pub mod structs;
pub fn init() { pub fn init() {
// NOTE: max_time_slice <= 5 to ensure 'priority' test pass // NOTE: max_time_slice <= 5 to ensure 'priority' test pass
@ -25,7 +25,16 @@ pub fn init() {
info!("process: init end"); info!("process: init end");
} }
static PROCESSORS: [Processor; MAX_CPU_NUM] = [Processor::new(), Processor::new(), Processor::new(), Processor::new(), Processor::new(), Processor::new(), Processor::new(), Processor::new()]; static PROCESSORS: [Processor; MAX_CPU_NUM] = [
Processor::new(),
Processor::new(),
Processor::new(),
Processor::new(),
Processor::new(),
Processor::new(),
Processor::new(),
Processor::new(),
];
/// Get current process /// Get current process
pub fn process() -> MutexGuard<'static, Process> { pub fn process() -> MutexGuard<'static, Process> {
@ -45,13 +54,10 @@ pub unsafe fn process_unsafe() -> MutexGuard<'static, Process> {
/// FIXME: It's obviously unsafe to get &mut ! /// FIXME: It's obviously unsafe to get &mut !
pub fn current_thread() -> &'static mut Thread { pub fn current_thread() -> &'static mut Thread {
use core::mem::transmute; use core::mem::transmute;
let (process, _): (&mut Thread, *const ()) = unsafe { let (process, _): (&mut Thread, *const ()) = unsafe { transmute(processor().context()) };
transmute(processor().context())
};
process process
} }
// Implement dependencies for std::thread // Implement dependencies for std::thread
#[no_mangle] #[no_mangle]
@ -60,6 +66,6 @@ pub fn processor() -> &'static Processor {
} }
#[no_mangle] #[no_mangle]
pub fn new_kernel_context(entry: extern fn(usize) -> !, arg: usize) -> Box<Context> { pub fn new_kernel_context(entry: extern "C" fn(usize) -> !, arg: usize) -> Box<Context> {
Thread::new_kernel(entry, arg) Thread::new_kernel(entry, arg)
} }

@ -1,18 +1,22 @@
use alloc::{boxed::Box, collections::BTreeMap, string::String, sync::Arc, vec::Vec, sync::Weak}; use alloc::{boxed::Box, collections::BTreeMap, string::String, sync::Arc, sync::Weak, vec::Vec};
use core::fmt; use core::fmt;
use core::str;
use log::*; use log::*;
use spin::{Mutex, RwLock};
use xmas_elf::{ElfFile, header, program::{Flags, Type, SegmentData}};
use rcore_memory::PAGE_SIZE; use rcore_memory::PAGE_SIZE;
use rcore_thread::Tid; use rcore_thread::Tid;
use core::str; use spin::{Mutex, RwLock};
use xmas_elf::{
header,
program::{Flags, SegmentData, Type},
ElfFile,
};
use crate::arch::interrupt::{Context, TrapFrame}; use crate::arch::interrupt::{Context, TrapFrame};
use crate::fs::{FileHandle, INodeExt, OpenOptions, FOLLOW_MAX_DEPTH};
use crate::memory::{ByFrame, GlobalFrameAlloc, KernelStack, MemoryAttr, MemorySet}; use crate::memory::{ByFrame, GlobalFrameAlloc, KernelStack, MemoryAttr, MemorySet};
use crate::fs::{FileHandle, OpenOptions, INodeExt, FOLLOW_MAX_DEPTH};
use crate::sync::Condvar;
use crate::net::{SocketWrapper, SOCKETS}; use crate::net::{SocketWrapper, SOCKETS};
use crate::sync::Condvar;
use super::abi::{self, ProcInitInfo}; use super::abi::{self, ProcInitInfo};
@ -26,20 +30,17 @@ pub struct Thread {
pub proc: Arc<Mutex<Process>>, pub proc: Arc<Mutex<Process>>,
} }
#[derive(Clone)] #[derive(Clone)]
pub enum FileLike { pub enum FileLike {
File(FileHandle), File(FileHandle),
Socket(SocketWrapper) Socket(SocketWrapper),
} }
impl fmt::Debug for FileLike { impl fmt::Debug for FileLike {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self { match self {
FileLike::File(_) => write!(f, "File"), FileLike::File(_) => write!(f, "File"),
FileLike::Socket(wrapper) => { FileLike::Socket(wrapper) => write!(f, "{:?}", wrapper),
write!(f, "{:?}", wrapper)
},
} }
} }
} }
@ -104,7 +105,8 @@ pub struct Process {
/// Records the mapping between pid and Process struct. /// Records the mapping between pid and Process struct.
lazy_static! { lazy_static! {
pub static ref PROCESSES: RwLock<BTreeMap<usize, Weak<Mutex<Process>>>> = RwLock::new(BTreeMap::new()); pub static ref PROCESSES: RwLock<BTreeMap<usize, Weak<Mutex<Process>>>> =
RwLock::new(BTreeMap::new());
} }
/// Let `rcore_thread` can switch between our `Thread` /// Let `rcore_thread` can switch between our `Thread`
@ -128,7 +130,9 @@ impl rcore_thread::Context for Thread {
} }
// add it to threads // add it to threads
proc.threads.push(tid); proc.threads.push(tid);
PROCESSES.write().insert(proc.pid.get(), Arc::downgrade(&self.proc)); PROCESSES
.write()
.insert(proc.pid.get(), Arc::downgrade(&self.proc));
} }
} }
@ -156,7 +160,7 @@ impl Thread {
} }
/// Make a new kernel thread starting from `entry` with `arg` /// Make a new kernel thread starting from `entry` with `arg`
pub fn new_kernel(entry: extern fn(usize) -> !, arg: usize) -> Box<Thread> { pub fn new_kernel(entry: extern "C" fn(usize) -> !, arg: usize) -> Box<Thread> {
let vm = MemorySet::new(); let vm = MemorySet::new();
let kstack = KernelStack::new(); let kstack = KernelStack::new();
Box::new(Thread { Box::new(Thread {
@ -174,14 +178,15 @@ impl Thread {
children: Vec::new(), children: Vec::new(),
threads: Vec::new(), threads: Vec::new(),
child_exit: Arc::new(Condvar::new()), child_exit: Arc::new(Condvar::new()),
child_exit_code: BTreeMap::new() child_exit_code: BTreeMap::new(),
})), })),
}) })
} }
/// Make a new user process from ELF `data` /// Make a new user process from ELF `data`
pub fn new_user<'a, Iter>(data: &[u8], args: Iter) -> Box<Thread> pub fn new_user<'a, Iter>(data: &[u8], args: Iter) -> Box<Thread>
where Iter: Iterator<Item=&'a str> where
Iter: Iterator<Item = &'a str>,
{ {
// Parse ELF // Parse ELF
let elf = ElfFile::new(data).expect("failed to read elf"); let elf = ElfFile::new(data).expect("failed to read elf");
@ -192,8 +197,8 @@ impl Thread {
// Check ELF type // Check ELF type
match elf.header.pt2.type_().as_type() { match elf.header.pt2.type_().as_type() {
header::Type::Executable => {}, header::Type::Executable => {}
header::Type::SharedObject => {}, header::Type::SharedObject => {}
_ => panic!("ELF is not executable or shared object"), _ => panic!("ELF is not executable or shared object"),
} }
@ -220,13 +225,19 @@ impl Thread {
let mut vm = elf.make_memory_set(); let mut vm = elf.make_memory_set();
// User stack // User stack
use crate::consts::{USER_STACK_OFFSET, USER_STACK_SIZE, USER32_STACK_OFFSET}; use crate::consts::{USER32_STACK_OFFSET, USER_STACK_OFFSET, USER_STACK_SIZE};
let mut ustack_top = { let mut ustack_top = {
let (ustack_buttom, ustack_top) = match is32 { let (ustack_buttom, ustack_top) = match is32 {
true => (USER32_STACK_OFFSET, USER32_STACK_OFFSET + USER_STACK_SIZE), true => (USER32_STACK_OFFSET, USER32_STACK_OFFSET + USER_STACK_SIZE),
false => (USER_STACK_OFFSET, USER_STACK_OFFSET + USER_STACK_SIZE), false => (USER_STACK_OFFSET, USER_STACK_OFFSET + USER_STACK_SIZE),
}; };
vm.push(ustack_buttom, ustack_top, MemoryAttr::default().user(), ByFrame::new(GlobalFrameAlloc), "user_stack"); vm.push(
ustack_buttom,
ustack_top,
MemoryAttr::default().user(),
ByFrame::new(GlobalFrameAlloc),
"user_stack",
);
ustack_top ustack_top
}; };
@ -246,7 +257,7 @@ impl Thread {
}, },
}; };
unsafe { unsafe {
vm.with(|| { ustack_top = init_info.push_at(ustack_top) }); vm.with(|| ustack_top = init_info.push_at(ustack_top));
} }
trace!("{:#x?}", vm); trace!("{:#x?}", vm);
@ -254,16 +265,45 @@ impl Thread {
let kstack = KernelStack::new(); let kstack = KernelStack::new();
let mut files = BTreeMap::new(); let mut files = BTreeMap::new();
files.insert(0, FileLike::File(FileHandle::new(crate::fs::STDIN.clone(), OpenOptions { read: true, write: false, append: false }))); files.insert(
files.insert(1, FileLike::File(FileHandle::new(crate::fs::STDOUT.clone(), OpenOptions { read: false, write: true, append: false }))); 0,
files.insert(2, FileLike::File(FileHandle::new(crate::fs::STDOUT.clone(), OpenOptions { read: false, write: true, append: false }))); FileLike::File(FileHandle::new(
crate::fs::STDIN.clone(),
OpenOptions {
read: true,
write: false,
append: false,
},
)),
);
files.insert(
1,
FileLike::File(FileHandle::new(
crate::fs::STDOUT.clone(),
OpenOptions {
read: false,
write: true,
append: false,
},
)),
);
files.insert(
2,
FileLike::File(FileHandle::new(
crate::fs::STDOUT.clone(),
OpenOptions {
read: false,
write: true,
append: false,
},
)),
);
let entry_addr = elf.header.pt2.entry_point() as usize; let entry_addr = elf.header.pt2.entry_point() as usize;
Box::new(Thread { Box::new(Thread {
context: unsafe { context: unsafe {
Context::new_user_thread( Context::new_user_thread(entry_addr, ustack_top, kstack.top(), is32, vm.token())
entry_addr, ustack_top, kstack.top(), is32, vm.token())
}, },
kstack, kstack,
clear_child_tid: 0, clear_child_tid: 0,
@ -277,7 +317,7 @@ impl Thread {
children: Vec::new(), children: Vec::new(),
threads: Vec::new(), threads: Vec::new(),
child_exit: Arc::new(Condvar::new()), child_exit: Arc::new(Condvar::new()),
child_exit_code: BTreeMap::new() child_exit_code: BTreeMap::new(),
})), })),
}) })
} }
@ -297,9 +337,7 @@ impl Thread {
// NoMMU: coping data has been done in `vm.clone()` // NoMMU: coping data has been done in `vm.clone()`
for area in vm.iter() { for area in vm.iter() {
let data = Vec::<u8>::from(unsafe { area.as_slice() }); let data = Vec::<u8>::from(unsafe { area.as_slice() });
unsafe { vm.with(|| { unsafe { vm.with(|| area.as_slice_mut().copy_from_slice(data.as_slice())) }
area.as_slice_mut().copy_from_slice(data.as_slice())
}) }
} }
debug!("fork: temporary copy data!"); debug!("fork: temporary copy data!");
@ -326,13 +364,19 @@ impl Thread {
children: Vec::new(), children: Vec::new(),
threads: Vec::new(), threads: Vec::new(),
child_exit: Arc::new(Condvar::new()), child_exit: Arc::new(Condvar::new()),
child_exit_code: BTreeMap::new() child_exit_code: BTreeMap::new(),
})), })),
}) })
} }
/// Create a new thread in the same process. /// Create a new thread in the same process.
pub fn clone(&self, tf: &TrapFrame, stack_top: usize, tls: usize, clear_child_tid: usize) -> Box<Thread> { pub fn clone(
&self,
tf: &TrapFrame,
stack_top: usize,
tls: usize,
clear_child_tid: usize,
) -> Box<Thread> {
let kstack = KernelStack::new(); let kstack = KernelStack::new();
let token = self.proc.lock().vm.token(); let token = self.proc.lock().vm.token();
Box::new(Thread { Box::new(Thread {
@ -371,7 +415,9 @@ impl ToMemoryAttr for Flags {
fn to_attr(&self) -> MemoryAttr { fn to_attr(&self) -> MemoryAttr {
let mut flags = MemoryAttr::default().user(); let mut flags = MemoryAttr::default().user();
// FIXME: handle readonly // FIXME: handle readonly
if self.is_execute() { flags = flags.execute(); } if self.is_execute() {
flags = flags.execute();
}
flags flags
} }
} }
@ -406,7 +452,13 @@ impl ElfExt for ElfFile<'_> {
// Get target slice // Get target slice
let target = { let target = {
ms.push(virt_addr, virt_addr + mem_size, ph.flags().to_attr(), ByFrame::new(GlobalFrameAlloc), ""); ms.push(
virt_addr,
virt_addr + mem_size,
ph.flags().to_attr(),
ByFrame::new(GlobalFrameAlloc),
"",
);
unsafe { ::core::slice::from_raw_parts_mut(virt_addr as *mut u8, mem_size) } unsafe { ::core::slice::from_raw_parts_mut(virt_addr as *mut u8, mem_size) }
}; };
// Copy data // Copy data
@ -423,29 +475,34 @@ impl ElfExt for ElfFile<'_> {
} }
fn get_interpreter(&self) -> Result<&str, &str> { fn get_interpreter(&self) -> Result<&str, &str> {
let header = self.program_iter() let header = self
.program_iter()
.filter(|ph| ph.get_type() == Ok(Type::Interp)) .filter(|ph| ph.get_type() == Ok(Type::Interp))
.next().ok_or("no interp header")?; .next()
.ok_or("no interp header")?;
let mut data = match header.get_data(self)? { let mut data = match header.get_data(self)? {
SegmentData::Undefined(data) => data, SegmentData::Undefined(data) => data,
_ => unreachable!(), _ => unreachable!(),
}; };
// skip NULL // skip NULL
while let Some(0) = data.last() { while let Some(0) = data.last() {
data = &data[..data.len()-1]; data = &data[..data.len() - 1];
} }
let path = str::from_utf8(data) let path = str::from_utf8(data).map_err(|_| "failed to convert to utf8")?;
.map_err(|_| "failed to convert to utf8")?;
Ok(path) Ok(path)
} }
fn get_phdr_vaddr(&self) -> Option<u64> { fn get_phdr_vaddr(&self) -> Option<u64> {
if let Some(phdr) = self.program_iter() if let Some(phdr) = self
.find(|ph| ph.get_type() == Ok(Type::Phdr)) { .program_iter()
.find(|ph| ph.get_type() == Ok(Type::Phdr))
{
// if phdr exists in program header, use it // if phdr exists in program header, use it
Some(phdr.virtual_addr()) Some(phdr.virtual_addr())
} else if let Some(elf_addr) = self.program_iter() } else if let Some(elf_addr) = self
.find(|ph| ph.get_type() == Ok(Type::Load) && ph.offset() == 0) { .program_iter()
.find(|ph| ph.get_type() == Ok(Type::Load) && ph.offset() == 0)
{
// otherwise, check if elf is loaded from the beginning, then phdr can be inferred. // otherwise, check if elf is loaded from the beginning, then phdr can be inferred.
Some(elf_addr.virtual_addr() + self.header.pt2.ph_offset()) Some(elf_addr.virtual_addr() + self.header.pt2.ph_offset())
} else { } else {

@ -1,16 +1,18 @@
//! Kernel shell //! Kernel shell
use crate::drivers::CMDLINE;
use crate::fs::{INodeExt, ROOT_INODE};
use crate::process::*;
use alloc::string::String; use alloc::string::String;
use alloc::vec::Vec; use alloc::vec::Vec;
use crate::fs::{ROOT_INODE, INodeExt};
use crate::process::*;
use crate::drivers::CMDLINE;
#[cfg(not(feature = "run_cmdline"))] #[cfg(not(feature = "run_cmdline"))]
pub fn run_user_shell() { pub fn run_user_shell() {
if let Ok(inode) = ROOT_INODE.lookup("rust/sh") { if let Ok(inode) = ROOT_INODE.lookup("rust/sh") {
let data = inode.read_as_vec().unwrap(); let data = inode.read_as_vec().unwrap();
processor().manager().add(Thread::new_user(data.as_slice(), "sh".split(' '))); processor()
.manager()
.add(Thread::new_user(data.as_slice(), "sh".split(' ')));
} else { } else {
processor().manager().add(Thread::new_kernel(shell, 0)); processor().manager().add(Thread::new_kernel(shell, 0));
} }
@ -21,10 +23,12 @@ pub fn run_user_shell() {
let cmdline = CMDLINE.read(); let cmdline = CMDLINE.read();
let inode = ROOT_INODE.lookup(&cmdline).unwrap(); let inode = ROOT_INODE.lookup(&cmdline).unwrap();
let data = inode.read_as_vec().unwrap(); let data = inode.read_as_vec().unwrap();
processor().manager().add(Thread::new_user(data.as_slice(), cmdline.split(' '))); processor()
.manager()
.add(Thread::new_user(data.as_slice(), cmdline.split(' ')));
} }
pub extern fn shell(_arg: usize) -> ! { pub extern "C" fn shell(_arg: usize) -> ! {
let files = ROOT_INODE.list().unwrap(); let files = ROOT_INODE.list().unwrap();
println!("Available programs: {:?}", files); println!("Available programs: {:?}", files);
let mut history = Vec::new(); let mut history = Vec::new();
@ -38,7 +42,9 @@ pub extern fn shell(_arg: usize) -> ! {
let name = cmd.trim().split(' ').next().unwrap(); let name = cmd.trim().split(' ').next().unwrap();
if let Ok(file) = ROOT_INODE.lookup(name) { if let Ok(file) = ROOT_INODE.lookup(name) {
let data = file.read_as_vec().unwrap(); let data = file.read_as_vec().unwrap();
let _pid = processor().manager().add(Thread::new_user(data.as_slice(), cmd.split(' '))); let _pid = processor()
.manager()
.add(Thread::new_user(data.as_slice(), cmd.split(' ')));
// TODO: wait until process exits, or use user land shell completely // TODO: wait until process exits, or use user land shell completely
//unsafe { thread::JoinHandle::<()>::_of(pid) }.join().unwrap(); //unsafe { thread::JoinHandle::<()>::_of(pid) }.join().unwrap();
} else { } else {

@ -1,6 +1,6 @@
use alloc::collections::VecDeque;
use super::*; use super::*;
use crate::thread; use crate::thread;
use alloc::collections::VecDeque;
use alloc::sync::Arc; use alloc::sync::Arc;
use alloc::vec::Vec; use alloc::vec::Vec;
@ -47,7 +47,8 @@ impl Condvar {
} }
pub fn wait<'a, T, S>(&self, guard: MutexGuard<'a, T, S>) -> MutexGuard<'a, T, S> pub fn wait<'a, T, S>(&self, guard: MutexGuard<'a, T, S>) -> MutexGuard<'a, T, S>
where S: MutexSupport where
S: MutexSupport,
{ {
let mutex = guard.mutex; let mutex = guard.mutex;
drop(guard); drop(guard);

@ -54,8 +54,8 @@ pub use self::condvar::*;
pub use self::mutex::*; pub use self::mutex::*;
pub use self::semaphore::*; pub use self::semaphore::*;
mod mutex;
mod condvar; mod condvar;
mod semaphore;
pub mod mpsc; pub mod mpsc;
mod mutex;
mod semaphore;
pub mod test; pub mod test;

@ -1,6 +1,6 @@
use alloc::{sync::Arc, sync::Weak, collections::VecDeque};
use super::Condvar; use super::Condvar;
use super::SpinLock as Mutex; use super::SpinLock as Mutex;
use alloc::{collections::VecDeque, sync::Arc, sync::Weak};
struct Channel<T> { struct Channel<T> {
deque: Mutex<VecDeque<T>>, deque: Mutex<VecDeque<T>>,
@ -26,7 +26,7 @@ pub struct Receiver<T> {
unsafe impl<T: Send> Send for Receiver<T> {} unsafe impl<T: Send> Send for Receiver<T> {}
impl<T> ! Sync for Receiver<T> {} impl<T> !Sync for Receiver<T> {}
#[derive(Debug)] #[derive(Debug)]
pub struct RecvError; pub struct RecvError;
@ -54,7 +54,7 @@ pub struct Sender<T> {
unsafe impl<T: Send> Send for Sender<T> {} unsafe impl<T: Send> Send for Sender<T> {}
impl<T> ! Sync for Sender<T> {} impl<T> !Sync for Sender<T> {}
#[derive(Debug)] #[derive(Debug)]
pub struct SendError<T>(pub T); pub struct SendError<T>(pub T);
@ -78,7 +78,9 @@ impl<T> Sender<T> {
/// Creates a new asynchronous channel, returning the sender/receiver halves. /// Creates a new asynchronous channel, returning the sender/receiver halves.
pub fn channel<T>() -> (Sender<T>, Receiver<T>) { pub fn channel<T>() -> (Sender<T>, Receiver<T>) {
let channel = Arc::new(Channel::<T>::default()); let channel = Arc::new(Channel::<T>::default());
let sender = Sender { inner: Arc::downgrade(&channel) }; let sender = Sender {
inner: Arc::downgrade(&channel),
};
let receiver = Receiver { inner: channel }; let receiver = Receiver { inner: channel };
(sender, receiver) (sender, receiver)
} }
@ -86,9 +88,9 @@ pub fn channel<T>() -> (Sender<T>, Receiver<T>) {
pub mod test { pub mod test {
//! Copied from std::mpsc::test //! Copied from std::mpsc::test
use alloc::boxed::Box;
use super::*; use super::*;
use crate::thread; use crate::thread;
use alloc::boxed::Box;
fn smoke() { fn smoke() {
let (tx, rx) = channel::<i32>(); let (tx, rx) = channel::<i32>();

@ -26,19 +26,18 @@
//! `MutexSupport`提供了若干接口,它们会在操作锁的不同时间点被调用。 //! `MutexSupport`提供了若干接口,它们会在操作锁的不同时间点被调用。
//! 注意这个接口实际是取了几种实现的并集,并不是很通用。 //! 注意这个接口实际是取了几种实现的并集,并不是很通用。
use super::Condvar;
use crate::arch::interrupt; use crate::arch::interrupt;
use core::cell::UnsafeCell; use core::cell::UnsafeCell;
use core::fmt; use core::fmt;
use core::ops::{Deref, DerefMut}; use core::ops::{Deref, DerefMut};
use core::sync::atomic::{AtomicBool, Ordering}; use core::sync::atomic::{AtomicBool, Ordering};
use super::Condvar;
pub type SpinLock<T> = Mutex<T, Spin>; pub type SpinLock<T> = Mutex<T, Spin>;
pub type SpinNoIrqLock<T> = Mutex<T, SpinNoIrq>; pub type SpinNoIrqLock<T> = Mutex<T, SpinNoIrq>;
pub type ThreadLock<T> = Mutex<T, Condvar>; pub type ThreadLock<T> = Mutex<T, Condvar>;
pub struct Mutex<T: ?Sized, S: MutexSupport> pub struct Mutex<T: ?Sized, S: MutexSupport> {
{
lock: AtomicBool, lock: AtomicBool,
support: S, support: S,
data: UnsafeCell<T>, data: UnsafeCell<T>,
@ -47,8 +46,7 @@ pub struct Mutex<T: ?Sized, S: MutexSupport>
/// A guard to which the protected data can be accessed /// A guard to which the protected data can be accessed
/// ///
/// When the guard falls out of scope it will release the lock. /// When the guard falls out of scope it will release the lock.
pub struct MutexGuard<'a, T: ?Sized + 'a, S: MutexSupport + 'a> pub struct MutexGuard<'a, T: ?Sized + 'a, S: MutexSupport + 'a> {
{
pub(super) mutex: &'a Mutex<T, S>, pub(super) mutex: &'a Mutex<T, S>,
support_guard: S::GuardData, support_guard: S::GuardData,
} }
@ -58,8 +56,7 @@ unsafe impl<T: ?Sized + Send, S: MutexSupport> Sync for Mutex<T, S> {}
unsafe impl<T: ?Sized + Send, S: MutexSupport> Send for Mutex<T, S> {} unsafe impl<T: ?Sized + Send, S: MutexSupport> Send for Mutex<T, S> {}
impl<T, S: MutexSupport> Mutex<T, S> impl<T, S: MutexSupport> Mutex<T, S> {
{
/// Creates a new spinlock wrapping the supplied data. /// Creates a new spinlock wrapping the supplied data.
/// ///
/// May be used statically: /// May be used statically:
@ -93,8 +90,7 @@ impl<T, S: MutexSupport> Mutex<T, S>
} }
} }
impl<T: ?Sized, S: MutexSupport> Mutex<T, S> impl<T: ?Sized, S: MutexSupport> Mutex<T, S> {
{
fn obtain_lock(&self) { fn obtain_lock(&self) {
while self.lock.compare_and_swap(false, true, Ordering::Acquire) != false { while self.lock.compare_and_swap(false, true, Ordering::Acquire) != false {
// Wait until the lock looks unlocked before retrying // Wait until the lock looks unlocked before retrying
@ -119,8 +115,7 @@ impl<T: ?Sized, S: MutexSupport> Mutex<T, S>
/// } /// }
/// ///
/// ``` /// ```
pub fn lock(&self) -> MutexGuard<T, S> pub fn lock(&self) -> MutexGuard<T, S> {
{
let support_guard = S::before_lock(); let support_guard = S::before_lock();
self.obtain_lock(); self.obtain_lock();
MutexGuard { MutexGuard {
@ -155,11 +150,14 @@ impl<T: ?Sized, S: MutexSupport> Mutex<T, S>
} }
} }
impl<T: ?Sized + fmt::Debug, S: MutexSupport + fmt::Debug> fmt::Debug for Mutex<T, S> impl<T: ?Sized + fmt::Debug, S: MutexSupport + fmt::Debug> fmt::Debug for Mutex<T, S> {
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.try_lock() { match self.try_lock() {
Some(guard) => write!(f, "Mutex {{ data: {:?}, support: {:?} }}", &*guard, self.support), Some(guard) => write!(
f,
"Mutex {{ data: {:?}, support: {:?} }}",
&*guard, self.support
),
None => write!(f, "Mutex {{ <locked>, support: {:?} }}", self.support), None => write!(f, "Mutex {{ <locked>, support: {:?} }}", self.support),
} }
} }
@ -171,19 +169,20 @@ impl<T: ?Sized + Default, S: MutexSupport> Default for Mutex<T, S> {
} }
} }
impl<'a, T: ?Sized, S: MutexSupport> Deref for MutexGuard<'a, T, S> impl<'a, T: ?Sized, S: MutexSupport> Deref for MutexGuard<'a, T, S> {
{
type Target = T; type Target = T;
fn deref(&self) -> &T { unsafe { &*self.mutex.data.get() } } fn deref(&self) -> &T {
unsafe { &*self.mutex.data.get() }
}
} }
impl<'a, T: ?Sized, S: MutexSupport> DerefMut for MutexGuard<'a, T, S> impl<'a, T: ?Sized, S: MutexSupport> DerefMut for MutexGuard<'a, T, S> {
{ fn deref_mut(&mut self) -> &mut T {
fn deref_mut(&mut self) -> &mut T { unsafe { &mut *self.mutex.data.get() } } unsafe { &mut *self.mutex.data.get() }
}
} }
impl<'a, T: ?Sized, S: MutexSupport> Drop for MutexGuard<'a, T, S> impl<'a, T: ?Sized, S: MutexSupport> Drop for MutexGuard<'a, T, S> {
{
/// The dropping of the MutexGuard will release the lock it was created from. /// The dropping of the MutexGuard will release the lock it was created from.
fn drop(&mut self) { fn drop(&mut self) {
self.mutex.lock.store(false, Ordering::Release); self.mutex.lock.store(false, Ordering::Release);
@ -210,7 +209,9 @@ pub struct Spin;
impl MutexSupport for Spin { impl MutexSupport for Spin {
type GuardData = (); type GuardData = ();
fn new() -> Self { Spin } fn new() -> Self {
Spin
}
fn cpu_relax(&self) { fn cpu_relax(&self) {
unsafe { unsafe {
#[cfg(target_arch = "x86_64")] #[cfg(target_arch = "x86_64")]

@ -2,12 +2,12 @@
//! //!
//! The code is borrowed from [RustDoc - Dining Philosophers](https://doc.rust-lang.org/1.6.0/book/dining-philosophers.html) //! The code is borrowed from [RustDoc - Dining Philosophers](https://doc.rust-lang.org/1.6.0/book/dining-philosophers.html)
use alloc::{sync::Arc, vec::Vec};
use core::time::Duration;
use crate::sync::Condvar; use crate::sync::Condvar;
use crate::sync::ThreadLock as Mutex; use crate::sync::ThreadLock as Mutex;
use crate::thread; use crate::thread;
use alloc::vec; use alloc::vec;
use alloc::{sync::Arc, vec::Vec};
use core::time::Duration;
use log::*; use log::*;
struct Philosopher { struct Philosopher {
@ -18,11 +18,7 @@ struct Philosopher {
impl Philosopher { impl Philosopher {
fn new(name: &'static str, left: usize, right: usize) -> Philosopher { fn new(name: &'static str, left: usize, right: usize) -> Philosopher {
Philosopher { Philosopher { name, left, right }
name,
left,
right,
}
} }
fn eat(&self, table: &Arc<Table>) { fn eat(&self, table: &Arc<Table>) {
@ -92,7 +88,9 @@ fn philosopher(table: Arc<Table>) {
Philosopher::new("5", 0, 4), Philosopher::new("5", 0, 4),
]; ];
let handles: Vec<_> = philosophers.into_iter().map(|p| { let handles: Vec<_> = philosophers
.into_iter()
.map(|p| {
let table = table.clone(); let table = table.clone();
trace!("philosopher start"); trace!("philosopher start");
@ -103,7 +101,8 @@ fn philosopher(table: Arc<Table>) {
println!("{} iter {} end.", p.name, i); println!("{} iter {} end.", p.name, i);
} }
}) })
}).collect(); })
.collect();
trace!("philosopher starting finish"); trace!("philosopher starting finish");
for h in handles { for h in handles {
@ -116,7 +115,13 @@ pub fn philosopher_using_mutex() {
println!("philosophers using mutex"); println!("philosophers using mutex");
let table = Arc::new(MutexTable { let table = Arc::new(MutexTable {
forks: vec![Mutex::new(()), Mutex::new(()), Mutex::new(()), Mutex::new(()), Mutex::new(())] forks: vec![
Mutex::new(()),
Mutex::new(()),
Mutex::new(()),
Mutex::new(()),
Mutex::new(()),
],
}); });
philosopher(table); philosopher(table);
} }
@ -126,7 +131,13 @@ pub fn philosopher_using_monitor() {
let table = Arc::new(MonitorTable { let table = Arc::new(MonitorTable {
fork_status: Mutex::new(vec![false; 5]), fork_status: Mutex::new(vec![false; 5]),
fork_condvar: vec![Condvar::new(), Condvar::new(), Condvar::new(), Condvar::new(), Condvar::new()], fork_condvar: vec![
Condvar::new(),
Condvar::new(),
Condvar::new(),
Condvar::new(),
Condvar::new(),
],
}); });
philosopher(table); philosopher(table);
} }

@ -1,7 +1,7 @@
//! Custom nonstandard syscalls //! Custom nonstandard syscalls
use super::*;
use rcore_memory::memory_set::handler::Linear; use rcore_memory::memory_set::handler::Linear;
use rcore_memory::memory_set::MemoryAttr; use rcore_memory::memory_set::MemoryAttr;
use super::*;
/// Allocate this PCI device to user space /// Allocate this PCI device to user space
/// The kernel driver using the PCI device will be unloaded /// The kernel driver using the PCI device will be unloaded
@ -13,15 +13,13 @@ pub fn sys_map_pci_device(vendor: usize, product: usize) -> SysResult {
vendor, product vendor, product
); );
let tag = pci::find_device(vendor as u32, product as u32) let tag = pci::find_device(vendor as u32, product as u32).ok_or(SysError::ENOENT)?;
.ok_or(SysError::ENOENT)?;
if pci::detach_driver(&tag) { if pci::detach_driver(&tag) {
info!("Kernel driver detached"); info!("Kernel driver detached");
} }
// Get BAR0 memory // Get BAR0 memory
let (base, len) = unsafe { tag.get_bar_mem(0) } let (base, len) = unsafe { tag.get_bar_mem(0) }.ok_or(SysError::ENOENT)?;
.ok_or(SysError::ENOENT)?;
let mut proc = process(); let mut proc = process();
let virt_addr = proc.vm.find_free_area(0, len); let virt_addr = proc.vm.find_free_area(0, len);

@ -1,14 +1,14 @@
//! Syscalls for file system //! Syscalls for file system
use core::mem::size_of;
use core::cmp::min;
use core::cell::UnsafeCell; use core::cell::UnsafeCell;
use core::cmp::min;
use core::mem::size_of;
use rcore_fs::vfs::Timespec; use rcore_fs::vfs::Timespec;
use crate::drivers::SOCKET_ACTIVITY;
use crate::fs::*; use crate::fs::*;
use crate::memory::MemorySet; use crate::memory::MemorySet;
use crate::sync::Condvar; use crate::sync::Condvar;
use crate::drivers::SOCKET_ACTIVITY;
use super::*; use super::*;
@ -22,7 +22,7 @@ pub fn sys_read(fd: usize, base: *mut u8, len: usize) -> SysResult {
match proc.files.get(&fd) { match proc.files.get(&fd) {
Some(FileLike::File(_)) => sys_read_file(&mut proc, fd, base, len), Some(FileLike::File(_)) => sys_read_file(&mut proc, fd, base, len),
Some(FileLike::Socket(_)) => sys_read_socket(&mut proc, fd, base, len), Some(FileLike::Socket(_)) => sys_read_socket(&mut proc, fd, base, len),
None => Err(SysError::EINVAL) None => Err(SysError::EINVAL),
} }
} }
@ -37,12 +37,15 @@ pub fn sys_write(fd: usize, base: *const u8, len: usize) -> SysResult {
match proc.files.get(&fd) { match proc.files.get(&fd) {
Some(FileLike::File(_)) => sys_write_file(&mut proc, fd, base, len), Some(FileLike::File(_)) => sys_write_file(&mut proc, fd, base, len),
Some(FileLike::Socket(_)) => sys_write_socket(&mut proc, fd, base, len), Some(FileLike::Socket(_)) => sys_write_socket(&mut proc, fd, base, len),
None => Err(SysError::EINVAL) None => Err(SysError::EINVAL),
} }
} }
pub fn sys_pread(fd: usize, base: *mut u8, len: usize, offset: usize) -> SysResult { pub fn sys_pread(fd: usize, base: *mut u8, len: usize, offset: usize) -> SysResult {
info!("pread: fd: {}, base: {:?}, len: {}, offset: {}", fd, base, len, offset); info!(
"pread: fd: {}, base: {:?}, len: {}, offset: {}",
fd, base, len, offset
);
let mut proc = process(); let mut proc = process();
proc.vm.check_write_array(base, len)?; proc.vm.check_write_array(base, len)?;
@ -52,7 +55,10 @@ pub fn sys_pread(fd: usize, base: *mut u8, len: usize, offset: usize) -> SysResu
} }
pub fn sys_pwrite(fd: usize, base: *const u8, len: usize, offset: usize) -> SysResult { pub fn sys_pwrite(fd: usize, base: *const u8, len: usize, offset: usize) -> SysResult {
info!("pwrite: fd: {}, base: {:?}, len: {}, offset: {}", fd, base, len, offset); info!(
"pwrite: fd: {}, base: {:?}, len: {}, offset: {}",
fd, base, len, offset
);
let mut proc = process(); let mut proc = process();
proc.vm.check_read_array(base, len)?; proc.vm.check_read_array(base, len)?;
@ -74,7 +80,10 @@ pub fn sys_write_file(proc: &mut Process, fd: usize, base: *const u8, len: usize
} }
pub fn sys_poll(ufds: *mut PollFd, nfds: usize, timeout_msecs: usize) -> SysResult { pub fn sys_poll(ufds: *mut PollFd, nfds: usize, timeout_msecs: usize) -> SysResult {
info!("poll: ufds: {:?}, nfds: {}, timeout_msecs: {:#x}", ufds, nfds, timeout_msecs); info!(
"poll: ufds: {:?}, nfds: {}, timeout_msecs: {:#x}",
ufds, nfds, timeout_msecs
);
let proc = process(); let proc = process();
proc.vm.check_write_array(ufds, nfds)?; proc.vm.check_write_array(ufds, nfds)?;
@ -100,7 +109,7 @@ pub fn sys_poll(ufds: *mut PollFd, nfds: usize, timeout_msecs: usize) -> SysResu
poll.revents = poll.revents | PE::IN; poll.revents = poll.revents | PE::IN;
events = events + 1; events = events + 1;
} }
}, }
Some(FileLike::Socket(wrapper)) => { Some(FileLike::Socket(wrapper)) => {
let (input, output, err) = poll_socket(&wrapper); let (input, output, err) = poll_socket(&wrapper);
if err { if err {
@ -143,7 +152,7 @@ const MAX_FDSET_SIZE: usize = 1024 / FD_PER_ITEM;
struct FdSet { struct FdSet {
addr: *mut u32, addr: *mut u32,
nfds: usize, nfds: usize,
saved: [u32; MAX_FDSET_SIZE] saved: [u32; MAX_FDSET_SIZE],
} }
impl FdSet { impl FdSet {
@ -157,7 +166,7 @@ impl FdSet {
if len > MAX_FDSET_SIZE { if len > MAX_FDSET_SIZE {
return Err(SysError::EINVAL); return Err(SysError::EINVAL);
} }
let slice = unsafe {slice::from_raw_parts_mut(addr, len)}; let slice = unsafe { slice::from_raw_parts_mut(addr, len) };
// save the fdset, and clear it // save the fdset, and clear it
for i in 0..len { for i in 0..len {
@ -166,11 +175,7 @@ impl FdSet {
} }
} }
Ok(FdSet { Ok(FdSet { addr, nfds, saved })
addr,
nfds,
saved
})
} }
/// Try to set fd in `FdSet` /// Try to set fd in `FdSet`
@ -196,8 +201,17 @@ impl FdSet {
} }
} }
pub fn sys_select(nfds: usize, read: *mut u32, write: *mut u32, err: *mut u32, timeout: *const TimeVal) -> SysResult { pub fn sys_select(
info!("select: nfds: {}, read: {:?}, write: {:?}, err: {:?}, timeout: {:?}", nfds, read, write, err, timeout); nfds: usize,
read: *mut u32,
write: *mut u32,
err: *mut u32,
timeout: *const TimeVal,
) -> SysResult {
info!(
"select: nfds: {}, read: {:?}, write: {:?}, err: {:?}, timeout: {:?}",
nfds, read, write, err, timeout
);
let proc = process(); let proc = process();
let mut read_fds = FdSet::new(&proc.vm, read, nfds)?; let mut read_fds = FdSet::new(&proc.vm, read, nfds)?;
@ -222,23 +236,23 @@ pub fn sys_select(nfds: usize, read: *mut u32, write: *mut u32, err: *mut u32, t
FileLike::File(_) => { FileLike::File(_) => {
// FIXME: assume it is stdin for now // FIXME: assume it is stdin for now
if STDIN.can_read() { if STDIN.can_read() {
if read_fds.is_set(*fd){ if read_fds.is_set(*fd) {
read_fds.set(*fd); read_fds.set(*fd);
events = events + 1; events = events + 1;
} }
} }
}, }
FileLike::Socket(wrapper) => { FileLike::Socket(wrapper) => {
let (input, output, err) = poll_socket(&wrapper); let (input, output, err) = poll_socket(&wrapper);
if err && err_fds.is_set(*fd){ if err && err_fds.is_set(*fd) {
err_fds.set(*fd); err_fds.set(*fd);
events = events + 1; events = events + 1;
} }
if input && read_fds.is_set(*fd){ if input && read_fds.is_set(*fd) {
read_fds.set(*fd); read_fds.set(*fd);
events = events + 1; events = events + 1;
} }
if output && write_fds.is_set(*fd){ if output && write_fds.is_set(*fd) {
write_fds.set(*fd); write_fds.set(*fd);
events = events + 1; events = events + 1;
} }
@ -268,7 +282,10 @@ pub fn sys_select(nfds: usize, read: *mut u32, write: *mut u32, err: *mut u32, t
} }
pub fn sys_readv(fd: usize, iov_ptr: *const IoVec, iov_count: usize) -> SysResult { pub fn sys_readv(fd: usize, iov_ptr: *const IoVec, iov_count: usize) -> SysResult {
info!("readv: fd: {}, iov: {:?}, count: {}", fd, iov_ptr, iov_count); info!(
"readv: fd: {}, iov: {:?}, count: {}",
fd, iov_ptr, iov_count
);
let mut proc = process(); let mut proc = process();
let mut iovs = IoVecs::check_and_new(iov_ptr, iov_count, &proc.vm, true)?; let mut iovs = IoVecs::check_and_new(iov_ptr, iov_count, &proc.vm, true)?;
@ -282,7 +299,10 @@ pub fn sys_readv(fd: usize, iov_ptr: *const IoVec, iov_count: usize) -> SysResul
} }
pub fn sys_writev(fd: usize, iov_ptr: *const IoVec, iov_count: usize) -> SysResult { pub fn sys_writev(fd: usize, iov_ptr: *const IoVec, iov_count: usize) -> SysResult {
info!("writev: fd: {}, iov: {:?}, count: {}", fd, iov_ptr, iov_count); info!(
"writev: fd: {}, iov: {:?}, count: {}",
fd, iov_ptr, iov_count
);
let mut proc = process(); let mut proc = process();
let iovs = IoVecs::check_and_new(iov_ptr, iov_count, &proc.vm, false)?; let iovs = IoVecs::check_and_new(iov_ptr, iov_count, &proc.vm, false)?;
@ -292,7 +312,7 @@ pub fn sys_writev(fd: usize, iov_ptr: *const IoVec, iov_count: usize) -> SysResu
match proc.files.get(&fd) { match proc.files.get(&fd) {
Some(FileLike::File(_)) => sys_write_file(&mut proc, fd, buf.as_ptr(), len), Some(FileLike::File(_)) => sys_write_file(&mut proc, fd, buf.as_ptr(), len),
Some(FileLike::Socket(_)) => sys_write_socket(&mut proc, fd, buf.as_ptr(), len), Some(FileLike::Socket(_)) => sys_write_socket(&mut proc, fd, buf.as_ptr(), len),
None => Err(SysError::EINVAL) None => Err(SysError::EINVAL),
} }
} }
@ -306,10 +326,12 @@ pub fn sys_openat(dir_fd: usize, path: *const u8, flags: usize, mode: usize) ->
let mut proc = process(); let mut proc = process();
let path = unsafe { proc.vm.check_and_clone_cstr(path)? }; let path = unsafe { proc.vm.check_and_clone_cstr(path)? };
let flags = OpenFlags::from_bits_truncate(flags); let flags = OpenFlags::from_bits_truncate(flags);
info!("openat: dir_fd: {}, path: {:?}, flags: {:?}, mode: {:#o}", dir_fd as isize, path, flags, mode); info!(
"openat: dir_fd: {}, path: {:?}, flags: {:?}, mode: {:#o}",
dir_fd as isize, path, flags, mode
);
let inode = let inode = if dir_fd == AT_FDCWD {
if dir_fd == AT_FDCWD {
// from process cwd // from process cwd
if flags.contains(OpenFlags::CREATE) { if flags.contains(OpenFlags::CREATE) {
let (dir_path, file_name) = split_path(&path); let (dir_path, file_name) = split_path(&path);
@ -321,7 +343,7 @@ pub fn sys_openat(dir_fd: usize, path: *const u8, flags: usize, mode: usize) ->
return Err(SysError::EEXIST); return Err(SysError::EEXIST);
} }
file_inode file_inode
}, }
Err(FsError::EntryNotFound) => { Err(FsError::EntryNotFound) => {
dir_inode.create(file_name, FileType::File, mode as u32)? dir_inode.create(file_name, FileType::File, mode as u32)?
} }
@ -343,7 +365,7 @@ pub fn sys_openat(dir_fd: usize, path: *const u8, flags: usize, mode: usize) ->
return Err(SysError::EEXIST); return Err(SysError::EEXIST);
} }
file_inode file_inode
}, }
Err(FsError::EntryNotFound) => { Err(FsError::EntryNotFound) => {
dir_inode.create(file_name, FileType::File, mode as u32)? dir_inode.create(file_name, FileType::File, mode as u32)?
} }
@ -390,9 +412,7 @@ pub fn sys_getcwd(buf: *mut u8, len: usize) -> SysResult {
if proc.cwd.len() + 1 > len { if proc.cwd.len() + 1 > len {
return Err(SysError::ERANGE); return Err(SysError::ERANGE);
} }
unsafe { unsafe { util::write_cstr(buf, &proc.cwd) }
util::write_cstr(buf, &proc.cwd)
}
Ok(buf as usize) Ok(buf as usize)
} }
@ -408,7 +428,9 @@ pub fn sys_fstat(fd: usize, stat_ptr: *mut Stat) -> SysResult {
let file = proc.get_file(fd)?; let file = proc.get_file(fd)?;
let stat = Stat::from(file.metadata()?); let stat = Stat::from(file.metadata()?);
// TODO: handle symlink // TODO: handle symlink
unsafe { stat_ptr.write(stat); } unsafe {
stat_ptr.write(stat);
}
Ok(0) Ok(0)
} }
@ -420,7 +442,9 @@ pub fn sys_lstat(path: *const u8, stat_ptr: *mut Stat) -> SysResult {
let inode = proc.lookup_inode(&path)?; let inode = proc.lookup_inode(&path)?;
let stat = Stat::from(inode.metadata()?); let stat = Stat::from(inode.metadata()?);
unsafe { stat_ptr.write(stat); } unsafe {
stat_ptr.write(stat);
}
Ok(0) Ok(0)
} }
@ -483,7 +507,10 @@ pub fn sys_ftruncate(fd: usize, len: usize) -> SysResult {
} }
pub fn sys_getdents64(fd: usize, buf: *mut LinuxDirent64, buf_size: usize) -> SysResult { pub fn sys_getdents64(fd: usize, buf: *mut LinuxDirent64, buf_size: usize) -> SysResult {
info!("getdents64: fd: {}, ptr: {:?}, buf_size: {}", fd, buf, buf_size); info!(
"getdents64: fd: {}, ptr: {:?}, buf_size: {}",
fd, buf, buf_size
);
let mut proc = process(); let mut proc = process();
proc.vm.check_write_array(buf as *mut u8, buf_size)?; proc.vm.check_write_array(buf as *mut u8, buf_size)?;
let file = proc.get_file(fd)?; let file = proc.get_file(fd)?;
@ -499,7 +526,9 @@ pub fn sys_getdents64(fd: usize, buf: *mut LinuxDirent64, buf_size: usize) -> Sy
}?; }?;
// TODO: get ino from dirent // TODO: get ino from dirent
let ok = writer.try_write(0, DirentType::from_type(&info.type_).bits(), &name); let ok = writer.try_write(0, DirentType::from_type(&info.type_).bits(), &name);
if !ok { break; } if !ok {
break;
}
} }
Ok(writer.written_size) Ok(writer.written_size)
} }
@ -515,12 +544,12 @@ pub fn sys_dup2(fd1: usize, fd2: usize) -> SysResult {
let new_file = FileLike::File(file.clone()); let new_file = FileLike::File(file.clone());
proc.files.insert(fd2, new_file); proc.files.insert(fd2, new_file);
Ok(fd2) Ok(fd2)
}, }
Some(FileLike::Socket(wrapper)) => { Some(FileLike::Socket(wrapper)) => {
let new_wrapper = wrapper.clone(); let new_wrapper = wrapper.clone();
sys_dup2_socket(&mut proc, new_wrapper, fd2) sys_dup2_socket(&mut proc, new_wrapper, fd2)
}, }
None => Err(SysError::EINVAL) None => Err(SysError::EINVAL),
} }
} }
@ -552,23 +581,33 @@ pub fn sys_rename(oldpath: *const u8, newpath: *const u8) -> SysResult {
sys_renameat(AT_FDCWD, oldpath, AT_FDCWD, newpath) sys_renameat(AT_FDCWD, oldpath, AT_FDCWD, newpath)
} }
pub fn sys_renameat(olddirfd: usize, oldpath: *const u8, newdirfd: usize, newpath: *const u8) -> SysResult { pub fn sys_renameat(
olddirfd: usize,
oldpath: *const u8,
newdirfd: usize,
newpath: *const u8,
) -> SysResult {
let mut proc = process(); let mut proc = process();
let oldpath = unsafe { proc.vm.check_and_clone_cstr(oldpath)? }; let oldpath = unsafe { proc.vm.check_and_clone_cstr(oldpath)? };
let newpath = unsafe { proc.vm.check_and_clone_cstr(newpath)? }; let newpath = unsafe { proc.vm.check_and_clone_cstr(newpath)? };
info!("renameat: olddirfd: {}, oldpath: {:?}, newdirfd: {}, newpath: {:?}", olddirfd, oldpath, newdirfd, newpath); info!(
"renameat: olddirfd: {}, oldpath: {:?}, newdirfd: {}, newpath: {:?}",
olddirfd, oldpath, newdirfd, newpath
);
let (old_dir_path, old_file_name) = split_path(&oldpath); let (old_dir_path, old_file_name) = split_path(&oldpath);
let (new_dir_path, new_file_name) = split_path(&newpath); let (new_dir_path, new_file_name) = split_path(&newpath);
let old_dir_inode = if olddirfd == AT_FDCWD { let old_dir_inode = if olddirfd == AT_FDCWD {
proc.lookup_inode(old_dir_path)? proc.lookup_inode(old_dir_path)?
} else { } else {
proc.get_file(olddirfd)?.lookup_follow(old_dir_path, FOLLOW_MAX_DEPTH)? proc.get_file(olddirfd)?
.lookup_follow(old_dir_path, FOLLOW_MAX_DEPTH)?
}; };
let new_dir_inode = if newdirfd == AT_FDCWD { let new_dir_inode = if newdirfd == AT_FDCWD {
proc.lookup_inode(new_dir_path)? proc.lookup_inode(new_dir_path)?
} else { } else {
proc.get_file(newdirfd)?.lookup_follow(new_dir_path, FOLLOW_MAX_DEPTH)? proc.get_file(newdirfd)?
.lookup_follow(new_dir_path, FOLLOW_MAX_DEPTH)?
}; };
old_dir_inode.move_(old_file_name, &new_dir_inode, new_file_name)?; old_dir_inode.move_(old_file_name, &new_dir_inode, new_file_name)?;
Ok(0) Ok(0)
@ -640,10 +679,30 @@ pub fn sys_pipe(fds: *mut u32) -> SysResult {
let (read, write) = Pipe::create_pair(); let (read, write) = Pipe::create_pair();
let read_fd = proc.get_free_fd(); let read_fd = proc.get_free_fd();
proc.files.insert(read_fd, FileLike::File(FileHandle::new(Arc::new(read), OpenOptions { read: true, write: false, append: false }))); proc.files.insert(
read_fd,
FileLike::File(FileHandle::new(
Arc::new(read),
OpenOptions {
read: true,
write: false,
append: false,
},
)),
);
let write_fd = proc.get_free_fd(); let write_fd = proc.get_free_fd();
proc.files.insert(write_fd, FileLike::File(FileHandle::new(Arc::new(write), OpenOptions { read: false, write: true, append: false }))); proc.files.insert(
write_fd,
FileLike::File(FileHandle::new(
Arc::new(write),
OpenOptions {
read: false,
write: true,
append: false,
},
)),
);
unsafe { unsafe {
*fds = read_fd as u32; *fds = read_fd as u32;
@ -661,12 +720,15 @@ pub fn sys_sync() -> SysResult {
} }
pub fn sys_sendfile(out_fd: usize, in_fd: usize, offset: *mut usize, count: usize) -> SysResult { pub fn sys_sendfile(out_fd: usize, in_fd: usize, offset: *mut usize, count: usize) -> SysResult {
info!("sendfile: out: {}, in: {}, offset: {:?}, count: {}", out_fd, in_fd, offset, count); info!(
"sendfile: out: {}, in: {}, offset: {:?}, count: {}",
out_fd, in_fd, offset, count
);
let proc = process(); let proc = process();
// We know it's save, pacify the borrow checker // We know it's save, pacify the borrow checker
let proc_cell = UnsafeCell::new(proc); let proc_cell = UnsafeCell::new(proc);
let proc_in = unsafe {&mut *proc_cell.get()}; let proc_in = unsafe { &mut *proc_cell.get() };
let proc_out = unsafe {&mut *proc_cell.get()}; let proc_out = unsafe { &mut *proc_cell.get() };
//let in_file: &mut FileHandle = unsafe { &mut *UnsafeCell::new(proc.get_file(in_fd)?).get() }; //let in_file: &mut FileHandle = unsafe { &mut *UnsafeCell::new(proc.get_file(in_fd)?).get() };
//let out_file: &mut FileHandle = unsafe { &mut *UnsafeCell::new(proc.get_file(out_fd)?).get() }; //let out_file: &mut FileHandle = unsafe { &mut *UnsafeCell::new(proc.get_file(out_fd)?).get() };
let in_file = proc_in.get_file(in_fd)?; let in_file = proc_in.get_file(in_fd)?;
@ -693,11 +755,9 @@ pub fn sys_sendfile(out_fd: usize, in_fd: usize, offset: *mut usize, count: usiz
} }
return Ok(bytes_read); return Ok(bytes_read);
} else { } else {
let proc_mem = unsafe {&mut *proc_cell.get()}; let proc_mem = unsafe { &mut *proc_cell.get() };
proc_mem.vm.check_read_ptr(offset)?; proc_mem.vm.check_read_ptr(offset)?;
let mut read_offset = unsafe { let mut read_offset = unsafe { *offset };
*offset
};
// read from specified offset and write new offset back // read from specified offset and write new offset back
let mut bytes_read = 0; let mut bytes_read = 0;
while bytes_read < count { while bytes_read < count {
@ -726,16 +786,19 @@ pub fn sys_sendfile(out_fd: usize, in_fd: usize, offset: *mut usize, count: usiz
impl Process { impl Process {
pub fn get_file(&mut self, fd: usize) -> Result<&mut FileHandle, SysError> { pub fn get_file(&mut self, fd: usize) -> Result<&mut FileHandle, SysError> {
self.files.get_mut(&fd).ok_or(SysError::EBADF).and_then(|f| { self.files
match f { .get_mut(&fd)
.ok_or(SysError::EBADF)
.and_then(|f| match f {
FileLike::File(file) => Ok(file), FileLike::File(file) => Ok(file),
_ => Err(SysError::EBADF) _ => Err(SysError::EBADF),
}
}) })
} }
pub fn lookup_inode(&self, path: &str) -> Result<Arc<INode>, SysError> { pub fn lookup_inode(&self, path: &str) -> Result<Arc<INode>, SysError> {
debug!("lookup_inode: cwd {} path {}", self.cwd, path); debug!("lookup_inode: cwd {} path {}", self.cwd, path);
Ok(ROOT_INODE.lookup(&self.cwd)?.lookup_follow(path, FOLLOW_MAX_DEPTH)?) Ok(ROOT_INODE
.lookup(&self.cwd)?
.lookup_follow(path, FOLLOW_MAX_DEPTH)?)
} }
} }
@ -1058,7 +1121,7 @@ impl From<Metadata> for Stat {
atime: info.atime, atime: info.atime,
mtime: info.mtime, mtime: info.mtime,
ctime: info.ctime, ctime: info.ctime,
_pad0: 0 _pad0: 0,
} }
} }
@ -1079,7 +1142,7 @@ impl From<Metadata> for Stat {
mtime: info.mtime, mtime: info.mtime,
ctime: info.ctime, ctime: info.ctime,
__pad: 0, __pad: 0,
__pad2: 0 __pad2: 0,
} }
} }
} }
@ -1102,7 +1165,12 @@ pub struct IoVec {
struct IoVecs(Vec<&'static mut [u8]>); struct IoVecs(Vec<&'static mut [u8]>);
impl IoVecs { impl IoVecs {
fn check_and_new(iov_ptr: *const IoVec, iov_count: usize, vm: &MemorySet, readv: bool) -> Result<Self, SysError> { fn check_and_new(
iov_ptr: *const IoVec,
iov_count: usize,
vm: &MemorySet,
readv: bool,
) -> Result<Self, SysError> {
vm.check_read_array(iov_ptr, iov_count)?; vm.check_read_array(iov_ptr, iov_count)?;
let iovs = unsafe { slice::from_raw_parts(iov_ptr, iov_count) }.to_vec(); let iovs = unsafe { slice::from_raw_parts(iov_ptr, iov_count) }.to_vec();
// check all bufs in iov // check all bufs in iov
@ -1116,7 +1184,10 @@ impl IoVecs {
} }
} }
} }
let slices = iovs.iter().map(|iov| unsafe { slice::from_raw_parts_mut(iov.base, iov.len as usize) }).collect(); let slices = iovs
.iter()
.map(|iov| unsafe { slice::from_raw_parts_mut(iov.base, iov.len as usize) })
.collect();
Ok(IoVecs(slices)) Ok(IoVecs(slices))
} }
@ -1148,7 +1219,9 @@ impl IoVecs {
let total_len = self.0.iter().map(|slice| slice.len()).sum::<usize>(); let total_len = self.0.iter().map(|slice| slice.len()).sum::<usize>();
let mut buf = Vec::with_capacity(total_len); let mut buf = Vec::with_capacity(total_len);
if set_len { if set_len {
unsafe { buf.set_len(total_len); } unsafe {
buf.set_len(total_len);
}
} }
buf buf
} }

@ -1,4 +1,4 @@
use rcore_memory::memory_set::handler::{Delay, ByFrame}; use rcore_memory::memory_set::handler::{ByFrame, Delay};
use rcore_memory::memory_set::MemoryAttr; use rcore_memory::memory_set::MemoryAttr;
use rcore_memory::paging::PageTable; use rcore_memory::paging::PageTable;
use rcore_memory::Page; use rcore_memory::Page;
@ -85,7 +85,10 @@ pub fn sys_mprotect(addr: usize, len: usize, prot: usize) -> SysResult {
// FIXME: properly set the attribute of the area // FIXME: properly set the attribute of the area
// now some mut ptr check is fault // now some mut ptr check is fault
let memory_area = proc.vm.iter().find(|area| area.is_overlap_with(addr, addr + len)); let memory_area = proc
.vm
.iter()
.find(|area| area.is_overlap_with(addr, addr + len));
if memory_area.is_none() { if memory_area.is_none() {
return Err(SysError::ENOMEM); return Err(SysError::ENOMEM);
} }

@ -1,7 +1,7 @@
use super::*; use super::*;
use crate::arch::cpu;
use core::mem::size_of; use core::mem::size_of;
use core::sync::atomic::{AtomicI32, Ordering}; use core::sync::atomic::{AtomicI32, Ordering};
use crate::arch::cpu;
pub fn sys_arch_prctl(code: i32, addr: usize, tf: &mut TrapFrame) -> SysResult { pub fn sys_arch_prctl(code: i32, addr: usize, tf: &mut TrapFrame) -> SysResult {
const ARCH_SET_FS: i32 = 0x1002; const ARCH_SET_FS: i32 = 0x1002;
@ -22,8 +22,7 @@ pub fn sys_uname(buf: *mut u8) -> SysResult {
let offset = 65; let offset = 65;
let strings = ["rCore", "orz", "0.1.0", "1", "machine", "domain"]; let strings = ["rCore", "orz", "0.1.0", "1", "machine", "domain"];
let proc = process(); let proc = process();
proc.vm proc.vm.check_write_array(buf, strings.len() * offset)?;
.check_write_array(buf, strings.len() * offset)?;
for i in 0..strings.len() { for i in 0..strings.len() {
unsafe { unsafe {
@ -39,8 +38,7 @@ pub fn sys_sched_getaffinity(pid: usize, size: usize, mask: *mut u32) -> SysResu
pid, size, mask pid, size, mask
); );
let proc = process(); let proc = process();
proc.vm proc.vm.check_write_array(mask, size / size_of::<u32>())?;
.check_write_array(mask, size / size_of::<u32>())?;
// we only have 4 cpu at most. // we only have 4 cpu at most.
// so just set it. // so just set it.
@ -75,9 +73,7 @@ pub fn sys_futex(uaddr: usize, op: u32, val: i32, timeout: *const TimeSpec) -> S
if uaddr % size_of::<u32>() != 0 { if uaddr % size_of::<u32>() != 0 {
return Err(SysError::EINVAL); return Err(SysError::EINVAL);
} }
process() process().vm.check_write_ptr(uaddr as *mut AtomicI32)?;
.vm
.check_write_ptr(uaddr as *mut AtomicI32)?;
let atomic = unsafe { &mut *(uaddr as *mut AtomicI32) }; let atomic = unsafe { &mut *(uaddr as *mut AtomicI32) };
let _timeout = if timeout.is_null() { let _timeout = if timeout.is_null() {
None None
@ -112,7 +108,6 @@ pub fn sys_futex(uaddr: usize, op: u32, val: i32, timeout: *const TimeSpec) -> S
} }
} }
const LINUX_REBOOT_CMD_HALT: u32 = 0xcdef0123; const LINUX_REBOOT_CMD_HALT: u32 = 0xcdef0123;
pub fn sys_reboot(_magic: u32, magic2: u32, cmd: u32, _arg: *const u8) -> SysResult { pub fn sys_reboot(_magic: u32, magic2: u32, cmd: u32, _arg: *const u8) -> SysResult {
// we will skip verifying magic // we will skip verifying magic

@ -1,35 +1,35 @@
//! System call //! System call
use alloc::{string::String, sync::Arc, vec::Vec}; use alloc::{string::String, sync::Arc, vec::Vec};
use core::{slice, str, fmt}; use core::{fmt, slice, str};
use bitflags::bitflags; use bitflags::bitflags;
use rcore_memory::VMError;
use rcore_fs::vfs::{FileType, FsError, INode, Metadata}; use rcore_fs::vfs::{FileType, FsError, INode, Metadata};
use rcore_memory::VMError;
use crate::arch::cpu;
use crate::arch::interrupt::TrapFrame; use crate::arch::interrupt::TrapFrame;
use crate::sync::Condvar; use crate::arch::syscall::*;
use crate::process::*; use crate::process::*;
use crate::sync::Condvar;
use crate::thread; use crate::thread;
use crate::util; use crate::util;
use crate::arch::cpu;
use crate::arch::syscall::*;
use self::custom::*;
use self::fs::*; use self::fs::*;
use self::mem::*; use self::mem::*;
use self::misc::*;
use self::net::*;
use self::proc::*; use self::proc::*;
use self::time::*; use self::time::*;
use self::net::*;
use self::misc::*;
use self::custom::*;
mod custom;
mod fs; mod fs;
mod mem; mod mem;
mod misc;
mod net;
mod proc; mod proc;
mod time; mod time;
mod net;
mod misc;
mod custom;
/// System call dispatcher /// System call dispatcher
// This #[deny(unreachable_patterns)] checks if each match arm is defined // This #[deny(unreachable_patterns)] checks if each match arm is defined
@ -37,9 +37,7 @@ mod custom;
#[deny(unreachable_patterns)] #[deny(unreachable_patterns)]
pub fn syscall(id: usize, args: [usize; 6], tf: &mut TrapFrame) -> isize { pub fn syscall(id: usize, args: [usize; 6], tf: &mut TrapFrame) -> isize {
let cid = cpu::id(); let cid = cpu::id();
let pid = { let pid = { process().pid.clone() };
process().pid.clone()
};
let tid = processor().tid(); let tid = processor().tid();
if !pid.is_init() { if !pid.is_init() {
// we trust pid 0 process // we trust pid 0 process
@ -97,10 +95,24 @@ pub fn syscall(id: usize, args: [usize; 6], tf: &mut TrapFrame) -> isize {
SYS_SOCKET => sys_socket(args[0], args[1], args[2]), SYS_SOCKET => sys_socket(args[0], args[1], args[2]),
SYS_CONNECT => sys_connect(args[0], args[1] as *const SockAddr, args[2]), SYS_CONNECT => sys_connect(args[0], args[1] as *const SockAddr, args[2]),
SYS_ACCEPT => sys_accept(args[0], args[1] as *mut SockAddr, args[2] as *mut u32), SYS_ACCEPT => sys_accept(args[0], args[1] as *mut SockAddr, args[2] as *mut u32),
SYS_SENDTO => sys_sendto(args[0], args[1] as *const u8, args[2], args[3], args[4] as *const SockAddr, args[5]), SYS_SENDTO => sys_sendto(
SYS_RECVFROM => sys_recvfrom(args[0], args[1] as *mut u8, args[2], args[3], args[4] as *mut SockAddr, args[5] as *mut u32), args[0],
// SYS_SENDMSG => sys_sendmsg(), args[1] as *const u8,
// SYS_RECVMSG => sys_recvmsg(), args[2],
args[3],
args[4] as *const SockAddr,
args[5],
),
SYS_RECVFROM => sys_recvfrom(
args[0],
args[1] as *mut u8,
args[2],
args[3],
args[4] as *mut SockAddr,
args[5] as *mut u32,
),
// SYS_SENDMSG => sys_sendmsg(),
// SYS_RECVMSG => sys_recvmsg(),
SYS_SHUTDOWN => sys_shutdown(args[0], args[1]), SYS_SHUTDOWN => sys_shutdown(args[0], args[1]),
SYS_BIND => sys_bind(args[0], args[1] as *const SockAddr, args[2]), SYS_BIND => sys_bind(args[0], args[1] as *const SockAddr, args[2]),
// 50 // 50
@ -108,9 +120,27 @@ pub fn syscall(id: usize, args: [usize; 6], tf: &mut TrapFrame) -> isize {
SYS_GETSOCKNAME => sys_getsockname(args[0], args[1] as *mut SockAddr, args[2] as *mut u32), SYS_GETSOCKNAME => sys_getsockname(args[0], args[1] as *mut SockAddr, args[2] as *mut u32),
SYS_GETPEERNAME => sys_getpeername(args[0], args[1] as *mut SockAddr, args[2] as *mut u32), SYS_GETPEERNAME => sys_getpeername(args[0], args[1] as *mut SockAddr, args[2] as *mut u32),
SYS_SETSOCKOPT => sys_setsockopt(args[0], args[1], args[2], args[3] as *const u8, args[4]), SYS_SETSOCKOPT => sys_setsockopt(args[0], args[1], args[2], args[3] as *const u8, args[4]),
SYS_GETSOCKOPT => sys_getsockopt(args[0], args[1], args[2], args[3] as *mut u8, args[4] as *mut u32), SYS_GETSOCKOPT => sys_getsockopt(
SYS_CLONE => sys_clone(args[0], args[1], args[2] as *mut u32, args[3] as *mut u32, args[4], tf), args[0],
SYS_EXECVE => sys_exec(args[0] as *const u8, args[1] as *const *const u8, args[2] as *const *const u8, tf), args[1],
args[2],
args[3] as *mut u8,
args[4] as *mut u32,
),
SYS_CLONE => sys_clone(
args[0],
args[1],
args[2] as *mut u32,
args[3] as *mut u32,
args[4],
tf,
),
SYS_EXECVE => sys_exec(
args[0] as *const u8,
args[1] as *const *const u8,
args[2] as *const *const u8,
tf,
),
// 60 // 60
SYS_EXIT => sys_exit(args[0] as usize), SYS_EXIT => sys_exit(args[0] as usize),
SYS_WAIT4 => sys_wait4(args[0] as isize, args[1] as *mut i32), // TODO: wait4 SYS_WAIT4 => sys_wait4(args[0] as isize, args[1] as *mut i32), // TODO: wait4
@ -140,7 +170,7 @@ pub fn syscall(id: usize, args: [usize; 6], tf: &mut TrapFrame) -> isize {
Ok(0o777) Ok(0o777)
} }
SYS_GETTIMEOFDAY => sys_gettimeofday(args[0] as *mut TimeVal, args[1] as *const u8), SYS_GETTIMEOFDAY => sys_gettimeofday(args[0] as *mut TimeVal, args[1] as *const u8),
// SYS_GETRLIMIT => sys_getrlimit(), // SYS_GETRLIMIT => sys_getrlimit(),
SYS_GETRUSAGE => sys_getrusage(args[0], args[1] as *mut RUsage), SYS_GETRUSAGE => sys_getrusage(args[0], args[1] as *mut RUsage),
SYS_SYSINFO => sys_sysinfo(args[0] as *mut SysInfo), SYS_SYSINFO => sys_sysinfo(args[0] as *mut SysInfo),
SYS_GETUID => { SYS_GETUID => {
@ -182,7 +212,7 @@ pub fn syscall(id: usize, args: [usize; 6], tf: &mut TrapFrame) -> isize {
Err(SysError::EACCES) Err(SysError::EACCES)
} }
SYS_SETPRIORITY => sys_set_priority(args[0]), SYS_SETPRIORITY => sys_set_priority(args[0]),
// SYS_SETRLIMIT => sys_setrlimit(), // SYS_SETRLIMIT => sys_setrlimit(),
SYS_SYNC => sys_sync(), SYS_SYNC => sys_sync(),
SYS_MOUNT => { SYS_MOUNT => {
warn!("mount is unimplemented"); warn!("mount is unimplemented");
@ -192,9 +222,19 @@ pub fn syscall(id: usize, args: [usize; 6], tf: &mut TrapFrame) -> isize {
warn!("umount2 is unimplemented"); warn!("umount2 is unimplemented");
Err(SysError::EACCES) Err(SysError::EACCES)
} }
SYS_REBOOT => sys_reboot(args[0] as u32, args[1] as u32, args[2] as u32, args[3] as *const u8), SYS_REBOOT => sys_reboot(
args[0] as u32,
args[1] as u32,
args[2] as u32,
args[3] as *const u8,
),
SYS_GETTID => sys_gettid(), SYS_GETTID => sys_gettid(),
SYS_FUTEX => sys_futex(args[0], args[1] as u32, args[2] as i32, args[3] as *const TimeSpec), SYS_FUTEX => sys_futex(
args[0],
args[1] as u32,
args[2] as i32,
args[3] as *const TimeSpec,
),
SYS_SCHED_GETAFFINITY => sys_sched_getaffinity(args[0], args[1], args[2] as *mut u32), SYS_SCHED_GETAFFINITY => sys_sched_getaffinity(args[0], args[1], args[2] as *mut u32),
SYS_GETDENTS64 => sys_getdents64(args[0], args[1] as *mut LinuxDirent64, args[2]), SYS_GETDENTS64 => sys_getdents64(args[0], args[1] as *mut LinuxDirent64, args[2]),
SYS_SET_TID_ADDRESS => { SYS_SET_TID_ADDRESS => {
@ -205,12 +245,12 @@ pub fn syscall(id: usize, args: [usize; 6], tf: &mut TrapFrame) -> isize {
SYS_EXIT_GROUP => sys_exit_group(args[0]), SYS_EXIT_GROUP => sys_exit_group(args[0]),
SYS_OPENAT => sys_openat(args[0], args[1] as *const u8, args[2], args[3]), // TODO: handle `dfd` SYS_OPENAT => sys_openat(args[0], args[1] as *const u8, args[2], args[3]), // TODO: handle `dfd`
SYS_MKDIRAT => sys_mkdir(args[1] as *const u8, args[2]), // TODO: handle `dfd` SYS_MKDIRAT => sys_mkdir(args[1] as *const u8, args[2]), // TODO: handle `dfd`
// SYS_MKNODAT => sys_mknod(), // SYS_MKNODAT => sys_mknod(),
// 260 // 260
SYS_FCHOWNAT => { SYS_FCHOWNAT => {
warn!("sys_fchownat is unimplemented"); warn!("sys_fchownat is unimplemented");
Ok(0) Ok(0)
}, }
SYS_NEWFSTATAT => sys_stat(args[1] as *const u8, args[2] as *mut Stat), // TODO: handle `dfd`, `flag` SYS_NEWFSTATAT => sys_stat(args[1] as *const u8, args[2] as *mut Stat), // TODO: handle `dfd`, `flag`
SYS_UNLINKAT => sys_unlink(args[1] as *const u8), // TODO: handle `dfd`, `flag` SYS_UNLINKAT => sys_unlink(args[1] as *const u8), // TODO: handle `dfd`, `flag`
SYS_RENAMEAT => sys_renameat(args[0], args[1] as *const u8, args[2], args[3] as *const u8), // TODO: handle `olddfd`, `newdfd` SYS_RENAMEAT => sys_renameat(args[0], args[1] as *const u8, args[2], args[3] as *const u8), // TODO: handle `olddfd`, `newdfd`
@ -253,7 +293,10 @@ pub fn syscall(id: usize, args: [usize; 6], tf: &mut TrapFrame) -> isize {
}; };
if !pid.is_init() { if !pid.is_init() {
// we trust pid 0 process // we trust pid 0 process
debug!("{}:{}:{} syscall id {} ret with {:x?}", cid, pid, tid, id, ret); debug!(
"{}:{}:{} syscall id {} ret with {:x?}",
cid, pid, tid, id, ret
);
} }
match ret { match ret {
Ok(code) => code as isize, Ok(code) => code as isize,
@ -270,9 +313,15 @@ fn x86_64_syscall(id: usize, args: [usize; 6], tf: &mut TrapFrame) -> Option<Sys
SYS_POLL => sys_poll(args[0] as *mut PollFd, args[1], args[2]), SYS_POLL => sys_poll(args[0] as *mut PollFd, args[1], args[2]),
SYS_ACCESS => sys_access(args[0] as *const u8, args[1]), SYS_ACCESS => sys_access(args[0] as *const u8, args[1]),
SYS_PIPE => sys_pipe(args[0] as *mut u32), SYS_PIPE => sys_pipe(args[0] as *mut u32),
SYS_SELECT => sys_select(args[0], args[1] as *mut u32, args[2] as *mut u32, args[3] as *mut u32, args[4] as *const TimeVal), SYS_SELECT => sys_select(
args[0],
args[1] as *mut u32,
args[2] as *mut u32,
args[3] as *mut u32,
args[4] as *const TimeVal,
),
SYS_DUP2 => sys_dup2(args[0], args[1]), SYS_DUP2 => sys_dup2(args[0], args[1]),
// SYS_PAUSE => sys_pause(), // SYS_PAUSE => sys_pause(),
SYS_FORK => sys_fork(tf), SYS_FORK => sys_fork(tf),
// use fork for vfork // use fork for vfork
SYS_VFORK => sys_fork(tf), SYS_VFORK => sys_fork(tf),
@ -363,7 +412,9 @@ pub enum SysError {
impl fmt::Display for SysError { impl fmt::Display for SysError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use self::SysError::*; use self::SysError::*;
write!(f, "{}", write!(
f,
"{}",
match self { match self {
EPERM => "Operation not permitted", EPERM => "Operation not permitted",
ENOENT => "No such file or directory", ENOENT => "No such file or directory",
@ -424,7 +475,6 @@ impl From<VMError> for SysError {
} }
} }
const SPIN_WAIT_TIMES: usize = 100; const SPIN_WAIT_TIMES: usize = 100;
pub fn spin_and_wait<T>(condvars: &[&Condvar], mut action: impl FnMut() -> Option<T>) -> T { pub fn spin_and_wait<T>(condvars: &[&Condvar], mut action: impl FnMut() -> Option<T>) -> T {
@ -440,4 +490,3 @@ pub fn spin_and_wait<T>(condvars: &[&Condvar], mut action: impl FnMut() -> Optio
Condvar::wait_any(&condvars); Condvar::wait_any(&condvars);
} }
} }

@ -16,9 +16,18 @@ pub fn sys_fork(tf: &TrapFrame) -> SysResult {
/// and thread pointer will be set to `newtls`. /// and thread pointer will be set to `newtls`.
/// The child tid will be stored at both `parent_tid` and `child_tid`. /// The child tid will be stored at both `parent_tid` and `child_tid`.
/// This is partially implemented for musl only. /// This is partially implemented for musl only.
pub fn sys_clone(flags: usize, newsp: usize, parent_tid: *mut u32, child_tid: *mut u32, newtls: usize, tf: &TrapFrame) -> SysResult { pub fn sys_clone(
info!("clone: flags: {:#x}, newsp: {:#x}, parent_tid: {:?}, child_tid: {:?}, newtls: {:#x}", flags: usize,
flags, newsp, parent_tid, child_tid, newtls); newsp: usize,
parent_tid: *mut u32,
child_tid: *mut u32,
newtls: usize,
tf: &TrapFrame,
) -> SysResult {
info!(
"clone: flags: {:#x}, newsp: {:#x}, parent_tid: {:?}, child_tid: {:?}, newtls: {:#x}",
flags, newsp, parent_tid, child_tid, newtls
);
if flags == 0x4111 { if flags == 0x4111 {
warn!("sys_clone is calling sys_fork instead, ignoring other args"); warn!("sys_clone is calling sys_fork instead, ignoring other args");
return sys_fork(tf); return sys_fork(tf);
@ -64,41 +73,61 @@ pub fn sys_wait4(pid: isize, wstatus: *mut i32) -> SysResult {
let mut proc = process(); let mut proc = process();
// check child_exit_code // check child_exit_code
let find = match target { let find = match target {
WaitFor::AnyChild => proc.child_exit_code WaitFor::AnyChild => proc
.iter().next().map(|(&pid, &code)| (pid, code)), .child_exit_code
WaitFor::Pid(pid) => proc.child_exit_code .iter()
.get(&pid).map(|&code| (pid, code)), .next()
.map(|(&pid, &code)| (pid, code)),
WaitFor::Pid(pid) => proc.child_exit_code.get(&pid).map(|&code| (pid, code)),
}; };
// if found, return // if found, return
if let Some((pid, exit_code)) = find { if let Some((pid, exit_code)) = find {
proc.child_exit_code.remove(&pid); proc.child_exit_code.remove(&pid);
if !wstatus.is_null() { if !wstatus.is_null() {
unsafe { wstatus.write(exit_code as i32); } unsafe {
wstatus.write(exit_code as i32);
}
} }
return Ok(pid); return Ok(pid);
} }
// if not, check pid // if not, check pid
let children: Vec<_> = proc.children.iter() let children: Vec<_> = proc
.children
.iter()
.filter_map(|weak| weak.upgrade()) .filter_map(|weak| weak.upgrade())
.collect(); .collect();
let invalid = match target { let invalid = match target {
WaitFor::AnyChild => children.len() == 0, WaitFor::AnyChild => children.len() == 0,
WaitFor::Pid(pid) => children.iter().find(|p| p.lock().pid.get() == pid).is_none(), WaitFor::Pid(pid) => children
.iter()
.find(|p| p.lock().pid.get() == pid)
.is_none(),
}; };
if invalid { if invalid {
return Err(SysError::ECHILD); return Err(SysError::ECHILD);
} }
info!("wait: thread {} -> {:?}, sleep", thread::current().id(), target); info!(
"wait: thread {} -> {:?}, sleep",
thread::current().id(),
target
);
let condvar = proc.child_exit.clone(); let condvar = proc.child_exit.clone();
drop(proc); // must release lock of current process drop(proc); // must release lock of current process
condvar._wait(); condvar._wait();
} }
} }
pub fn sys_exec(name: *const u8, argv: *const *const u8, envp: *const *const u8, tf: &mut TrapFrame) -> SysResult { pub fn sys_exec(
name: *const u8,
argv: *const *const u8,
envp: *const *const u8,
tf: &mut TrapFrame,
) -> SysResult {
info!("exec: name: {:?}, argv: {:?} envp: {:?}", name, argv, envp); info!("exec: name: {:?}, argv: {:?} envp: {:?}", name, argv, envp);
let proc = process(); let proc = process();
let _name = if name.is_null() { String::from("") } else { let _name = if name.is_null() {
String::from("")
} else {
unsafe { proc.vm.check_and_clone_cstr(name)? } unsafe { proc.vm.check_and_clone_cstr(name)? }
}; };
@ -129,7 +158,9 @@ pub fn sys_exec(name: *const u8, argv: *const *const u8, envp: *const *const u8,
thread.proc.lock().clone_for_exec(&proc); thread.proc.lock().clone_for_exec(&proc);
// Activate new page table // Activate new page table
unsafe { thread.proc.lock().vm.activate(); } unsafe {
thread.proc.lock().vm.activate();
}
// Modify the TrapFrame // Modify the TrapFrame
*tf = unsafe { thread.context.get_init_tf() }; *tf = unsafe { thread.context.get_init_tf() };
@ -148,7 +179,12 @@ pub fn sys_yield() -> SysResult {
/// Kill the process /// Kill the process
pub fn sys_kill(pid: usize, sig: usize) -> SysResult { pub fn sys_kill(pid: usize, sig: usize) -> SysResult {
info!("kill: {} killed: {} with sig {}", thread::current().id(), pid, sig); info!(
"kill: {} killed: {} with sig {}",
thread::current().id(),
pid,
sig
);
let current_pid = process().pid.get().clone(); let current_pid = process().pid.get().clone();
if current_pid == pid { if current_pid == pid {
// killing myself // killing myself
@ -223,7 +259,9 @@ pub fn sys_exit(exit_code: usize) -> ! {
// it has memory access so we can't move it to Thread::drop? // it has memory access so we can't move it to Thread::drop?
let clear_child_tid = current_thread().clear_child_tid; let clear_child_tid = current_thread().clear_child_tid;
if clear_child_tid != 0 { if clear_child_tid != 0 {
unsafe { (clear_child_tid as *mut u32).write(0); } unsafe {
(clear_child_tid as *mut u32).write(0);
}
let queue = process().get_futex(clear_child_tid); let queue = process().get_futex(clear_child_tid);
queue.notify_one(); queue.notify_one();
} }

@ -116,7 +116,7 @@ pub fn sys_time(time: *mut u64) -> SysResult {
#[repr(C)] #[repr(C)]
pub struct RUsage { pub struct RUsage {
utime: TimeVal, utime: TimeVal,
stime: TimeVal stime: TimeVal,
} }
pub fn sys_getrusage(who: usize, rusage: *mut RUsage) -> SysResult { pub fn sys_getrusage(who: usize, rusage: *mut RUsage) -> SysResult {
@ -136,10 +136,8 @@ pub fn sys_getrusage(who: usize, rusage: *mut RUsage) -> SysResult {
stime: TimeVal { stime: TimeVal {
sec: usec / USEC_PER_SEC, sec: usec / USEC_PER_SEC,
usec: usec % USEC_PER_SEC, usec: usec % USEC_PER_SEC,
} },
};
unsafe {
*rusage = new_rusage
}; };
unsafe { *rusage = new_rusage };
Ok(0) Ok(0)
} }

@ -1,17 +1,19 @@
use crate::process::*;
use crate::arch::interrupt::TrapFrame;
use crate::arch::cpu; use crate::arch::cpu;
use crate::arch::interrupt::TrapFrame;
use crate::process::*;
use log::*; use log::*;
pub static mut TICK: usize = 0; pub static mut TICK: usize = 0;
pub fn uptime_msec() -> usize { pub fn uptime_msec() -> usize {
unsafe {crate::trap::TICK / crate::consts::USEC_PER_TICK / 1000} unsafe { crate::trap::TICK / crate::consts::USEC_PER_TICK / 1000 }
} }
pub fn timer() { pub fn timer() {
if cpu::id() == 0 { if cpu::id() == 0 {
unsafe { TICK += 1; } unsafe {
TICK += 1;
}
} }
processor().tick(); processor().tick();
} }

@ -1,9 +1,9 @@
//! ANSI escape sequences parser //! ANSI escape sequences parser
//! (ref: https://en.wikipedia.org/wiki/ANSI_escape_code) //! (ref: https://en.wikipedia.org/wiki/ANSI_escape_code)
use heapless::Vec;
use heapless::consts::U8;
use super::color::ConsoleColor; use super::color::ConsoleColor;
use heapless::consts::U8;
use heapless::Vec;
#[repr(C)] #[repr(C)]
#[derive(Debug, Clone, Copy, PartialEq)] #[derive(Debug, Clone, Copy, PartialEq)]
@ -44,7 +44,9 @@ impl CharacterAttribute {
27 => self.reverse = false, 27 => self.reverse = false,
29 => self.strikethrough = false, 29 => self.strikethrough = false,
30...37 | 90...97 => self.foreground = ConsoleColor::from_console_code(code).unwrap(), 30...37 | 90...97 => self.foreground = ConsoleColor::from_console_code(code).unwrap(),
40...47 | 100...107 => self.background = ConsoleColor::from_console_code(code - 10).unwrap(), 40...47 | 100...107 => {
self.background = ConsoleColor::from_console_code(code - 10).unwrap()
}
_ => { /* unimplemented!() */ } _ => { /* unimplemented!() */ }
} }
} }

@ -3,7 +3,7 @@ pub mod escape_parser;
/// Convert C string to Rust string /// Convert C string to Rust string
pub unsafe fn from_cstr(s: *const u8) -> &'static str { pub unsafe fn from_cstr(s: *const u8) -> &'static str {
use core::{str, slice}; use core::{slice, str};
let len = (0usize..).find(|&i| *s.add(i) == 0).unwrap(); let len = (0usize..).find(|&i| *s.add(i) == 0).unwrap();
str::from_utf8(slice::from_raw_parts(s, len)).unwrap() str::from_utf8(slice::from_raw_parts(s, len)).unwrap()
} }

Loading…
Cancel
Save