Merge pull request #66 from hypocrasy/ch5

Ch5 comments
ch5
chyyuu 3 years ago committed by GitHub
commit 8c92e66f6a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -1,3 +1,4 @@
//! Constants used in rCore
pub const USER_STACK_SIZE: usize = 4096 * 2;
pub const KERNEL_STACK_SIZE: usize = 4096 * 2;
pub const KERNEL_HEAP_SIZE: usize = 0x20_0000;

@ -1,3 +1,4 @@
//! SBI console driver, for text output
use crate::sbi::console_putchar;
use core::fmt::{self, Write};
@ -17,6 +18,7 @@ pub fn print(args: fmt::Arguments) {
}
#[macro_export]
/// print string macro
macro_rules! print {
($fmt: literal $(, $($arg: tt)+)?) => {
$crate::console::print(format_args!($fmt $(, $($arg)+)?));
@ -24,6 +26,7 @@ macro_rules! print {
}
#[macro_export]
/// println string macro
macro_rules! println {
($fmt: literal $(, $($arg: tt)+)?) => {
$crate::console::print(format_args!(concat!($fmt, "\n") $(, $($arg)+)?));

@ -1,3 +1,4 @@
//! The panic handler
use crate::sbi::shutdown;
use core::panic::PanicInfo;

@ -1,13 +1,16 @@
//! Loading user applications into memory
/// Get the total number of applications.
use alloc::vec::Vec;
use lazy_static::*;
///get app number
pub fn get_num_app() -> usize {
extern "C" {
fn _num_app();
}
unsafe { (_num_app as usize as *const usize).read_volatile() }
}
/// get applications data
pub fn get_app_data(app_id: usize) -> &'static [u8] {
extern "C" {
fn _num_app();
@ -25,6 +28,7 @@ pub fn get_app_data(app_id: usize) -> &'static [u8] {
}
lazy_static! {
///All of app's name
static ref APP_NAMES: Vec<&'static str> = {
let num_app = get_num_app();
extern "C" {
@ -49,13 +53,14 @@ lazy_static! {
}
#[allow(unused)]
///get app data from name
pub fn get_app_data_by_name(name: &str) -> Option<&'static [u8]> {
let num_app = get_num_app();
(0..num_app)
.find(|&i| APP_NAMES[i] == name)
.map(get_app_data)
}
///list all apps
pub fn list_apps() {
println!("/**** APPS ****");
for app in APP_NAMES.iter() {

@ -1,3 +1,25 @@
//! The main module and entrypoint
//!
//! Various facilities of the kernels are implemented as submodules. The most
//! important ones are:
//!
//! - [`trap`]: Handles all cases of switching from userspace to the kernel
//! - [`task`]: Task management
//! - [`syscall`]: System call handling and implementation
//! - [`mm`]: Address map using SV39
//! - [`sync`]:Wrap a static data structure inside it so that we are able to access it without any `unsafe`.
//!
//! The operating system also starts in this module. Kernel code starts
//! executing from `entry.asm`, after which [`rust_main()`] is called to
//! initialize various pieces of functionality. (See its source code for
//! details.)
//!
//! We then call [`task::run_tasks()`] and for the first time go to
//! userspace.
#![deny(missing_docs)]
#![deny(warnings)]
#![no_std]
#![no_main]
#![feature(panic_info_message)]
@ -20,19 +42,19 @@ mod console;
mod config;
mod lang_items;
mod loader;
mod mm;
pub mod mm;
mod sbi;
mod sync;
mod syscall;
mod task;
pub mod sync;
pub mod syscall;
pub mod task;
mod timer;
mod trap;
pub mod trap;
use core::arch::global_asm;
global_asm!(include_str!("entry.asm"));
global_asm!(include_str!("link_app.S"));
/// clear BSS segment
fn clear_bss() {
extern "C" {
fn sbss();
@ -45,6 +67,7 @@ fn clear_bss() {
}
#[no_mangle]
/// the rust entry-point of os
pub fn rust_main() -> ! {
clear_bss();
println!("[kernel] Hello, world!");
@ -53,6 +76,7 @@ pub fn rust_main() -> ! {
task::add_initproc();
println!("after initproc!");
trap::init();
//trap::enable_interrupt();
trap::enable_timer_interrupt();
timer::set_next_trigger();
loader::list_apps();

@ -1,22 +1,23 @@
//! Implementation of physical and virtual address and page number.
use super::PageTableEntry;
use crate::config::{PAGE_SIZE, PAGE_SIZE_BITS};
use core::fmt::{self, Debug, Formatter};
/// physical address
const PA_WIDTH_SV39: usize = 56;
const VA_WIDTH_SV39: usize = 39;
const PPN_WIDTH_SV39: usize = PA_WIDTH_SV39 - PAGE_SIZE_BITS;
const VPN_WIDTH_SV39: usize = VA_WIDTH_SV39 - PAGE_SIZE_BITS;
/// Definitions
/// physical address
#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq)]
pub struct PhysAddr(pub usize);
/// virtual address
#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq)]
pub struct VirtAddr(pub usize);
/// physical page number
#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq)]
pub struct PhysPageNum(pub usize);
/// virtual page number
#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq)]
pub struct VirtPageNum(pub usize);
@ -87,17 +88,21 @@ impl From<VirtPageNum> for usize {
v.0
}
}
///
impl VirtAddr {
///`VirtAddr`->`VirtPageNum`
pub fn floor(&self) -> VirtPageNum {
VirtPageNum(self.0 / PAGE_SIZE)
}
///`VirtAddr`->`VirtPageNum`
pub fn ceil(&self) -> VirtPageNum {
VirtPageNum((self.0 - 1 + PAGE_SIZE) / PAGE_SIZE)
}
///Get page offset
pub fn page_offset(&self) -> usize {
self.0 & (PAGE_SIZE - 1)
}
///Check page aligned
pub fn aligned(&self) -> bool {
self.page_offset() == 0
}
@ -114,15 +119,19 @@ impl From<VirtPageNum> for VirtAddr {
}
}
impl PhysAddr {
///`PhysAddr`->`PhysPageNum`
pub fn floor(&self) -> PhysPageNum {
PhysPageNum(self.0 / PAGE_SIZE)
}
///`PhysAddr`->`PhysPageNum`
pub fn ceil(&self) -> PhysPageNum {
PhysPageNum((self.0 - 1 + PAGE_SIZE) / PAGE_SIZE)
}
///Get page offset
pub fn page_offset(&self) -> usize {
self.0 & (PAGE_SIZE - 1)
}
///Check page aligned
pub fn aligned(&self) -> bool {
self.page_offset() == 0
}
@ -140,6 +149,7 @@ impl From<PhysPageNum> for PhysAddr {
}
impl VirtPageNum {
///Return VPN 3 level index
pub fn indexes(&self) -> [usize; 3] {
let mut vpn = self.0;
let mut idx = [0usize; 3];
@ -152,19 +162,23 @@ impl VirtPageNum {
}
impl PhysAddr {
///Get mutable reference to `PhysAddr` value
pub fn get_mut<T>(&self) -> &'static mut T {
unsafe { (self.0 as *mut T).as_mut().unwrap() }
}
}
impl PhysPageNum {
///Get `PageTableEntry` on `PhysPageNum`
pub fn get_pte_array(&self) -> &'static mut [PageTableEntry] {
let pa: PhysAddr = (*self).into();
unsafe { core::slice::from_raw_parts_mut(pa.0 as *mut PageTableEntry, 512) }
}
///
pub fn get_bytes_array(&self) -> &'static mut [u8] {
let pa: PhysAddr = (*self).into();
unsafe { core::slice::from_raw_parts_mut(pa.0 as *mut u8, 4096) }
}
///
pub fn get_mut<T>(&self) -> &'static mut T {
let pa: PhysAddr = (*self).into();
pa.get_mut()
@ -181,6 +195,7 @@ impl StepByOne for VirtPageNum {
}
#[derive(Copy, Clone)]
/// a simple range structure for type T
pub struct SimpleRange<T>
where
T: StepByOne + Copy + PartialEq + PartialOrd + Debug,
@ -213,6 +228,7 @@ where
SimpleRangeIterator::new(self.l, self.r)
}
}
/// iterator for the simple range structure
pub struct SimpleRangeIterator<T>
where
T: StepByOne + Copy + PartialEq + PartialOrd + Debug,
@ -243,4 +259,5 @@ where
}
}
}
/// a simple range structure for virtual page number
pub type VPNRange = SimpleRange<VirtPageNum>;

@ -1,3 +1,5 @@
//! Implementation of [`FrameAllocator`] which
//! controls all the frames in the operating system.
use super::{PhysAddr, PhysPageNum};
use crate::config::MEMORY_END;
use crate::sync::UPSafeCell;
@ -5,11 +7,14 @@ use alloc::vec::Vec;
use core::fmt::{self, Debug, Formatter};
use lazy_static::*;
/// manage a frame which has the same lifecycle as the tracker
pub struct FrameTracker {
///
pub ppn: PhysPageNum,
}
impl FrameTracker {
///Create an empty `FrameTracker`
pub fn new(ppn: PhysPageNum) -> Self {
// page cleaning
let bytes_array = ppn.get_bytes_array();
@ -37,7 +42,7 @@ trait FrameAllocator {
fn alloc(&mut self) -> Option<PhysPageNum>;
fn dealloc(&mut self, ppn: PhysPageNum);
}
/// an implementation for frame allocator
pub struct StackFrameAllocator {
current: usize,
end: usize,
@ -83,10 +88,11 @@ impl FrameAllocator for StackFrameAllocator {
type FrameAllocatorImpl = StackFrameAllocator;
lazy_static! {
/// frame allocator instance through lazy_static!
pub static ref FRAME_ALLOCATOR: UPSafeCell<FrameAllocatorImpl> =
unsafe { UPSafeCell::new(FrameAllocatorImpl::new()) };
}
/// initiate the frame allocator using `ekernel` and `MEMORY_END`
pub fn init_frame_allocator() {
extern "C" {
fn ekernel();
@ -96,19 +102,20 @@ pub fn init_frame_allocator() {
PhysAddr::from(MEMORY_END).floor(),
);
}
/// allocate a frame
pub fn frame_alloc() -> Option<FrameTracker> {
FRAME_ALLOCATOR
.exclusive_access()
.alloc()
.map(FrameTracker::new)
}
/// deallocate a frame
fn frame_dealloc(ppn: PhysPageNum) {
FRAME_ALLOCATOR.exclusive_access().dealloc(ppn);
}
#[allow(unused)]
/// a simple test for frame allocator
pub fn frame_allocator_test() {
let mut v: Vec<FrameTracker> = Vec::new();
for i in 0..5 {

@ -1,16 +1,19 @@
//! The global allocator
use crate::config::KERNEL_HEAP_SIZE;
use buddy_system_allocator::LockedHeap;
#[global_allocator]
/// heap allocator instance
static HEAP_ALLOCATOR: LockedHeap = LockedHeap::empty();
#[alloc_error_handler]
/// panic when heap allocation error occurs
pub fn handle_alloc_error(layout: core::alloc::Layout) -> ! {
panic!("Heap allocation error, layout = {:?}", layout);
}
/// heap space ([u8; KERNEL_HEAP_SIZE])
static mut HEAP_SPACE: [u8; KERNEL_HEAP_SIZE] = [0; KERNEL_HEAP_SIZE];
/// initiate heap allocator
pub fn init_heap() {
unsafe {
HEAP_ALLOCATOR

@ -1,3 +1,4 @@
//! Implementation of [`MapArea`] and [`MemorySet`].
use super::{frame_alloc, FrameTracker};
use super::{PTEFlags, PageTable, PageTableEntry};
use super::{PhysAddr, PhysPageNum, VirtAddr, VirtPageNum};
@ -25,22 +26,25 @@ extern "C" {
}
lazy_static! {
/// a memory set instance through lazy_static! managing kernel space
pub static ref KERNEL_SPACE: Arc<UPSafeCell<MemorySet>> =
Arc::new(unsafe { UPSafeCell::new(MemorySet::new_kernel()) });
}
/// memory set structure, controls virtual-memory space
pub struct MemorySet {
page_table: PageTable,
areas: Vec<MapArea>,
}
impl MemorySet {
///Create an empty `MemorySet`
pub fn new_bare() -> Self {
Self {
page_table: PageTable::new(),
areas: Vec::new(),
}
}
///Get pagetable `root_ppn`
pub fn token(&self) -> usize {
self.page_table.token()
}
@ -56,6 +60,7 @@ impl MemorySet {
None,
);
}
///Remove `MapArea` that starts with `start_vpn`
pub fn remove_area_with_start_vpn(&mut self, start_vpn: VirtPageNum) {
if let Some((idx, area)) = self
.areas
@ -215,6 +220,7 @@ impl MemorySet {
elf.header.pt2.entry_point() as usize,
)
}
///Clone a same `MemorySet`
pub fn from_existed_user(user_space: &MemorySet) -> MemorySet {
let mut memory_set = Self::new_bare();
// map trampoline
@ -234,6 +240,7 @@ impl MemorySet {
}
memory_set
}
///Refresh TLB with `sfence.vma`
pub fn activate(&self) {
let satp = self.page_table.token();
unsafe {
@ -241,15 +248,17 @@ impl MemorySet {
asm!("sfence.vma");
}
}
///Translate throuth pagetable
pub fn translate(&self, vpn: VirtPageNum) -> Option<PageTableEntry> {
self.page_table.translate(vpn)
}
///Remove all `MapArea`
pub fn recycle_data_pages(&mut self) {
//*self = Self::new_bare();
self.areas.clear();
}
}
/// map area structure, controls a contiguous piece of virtual memory
pub struct MapArea {
vpn_range: VPNRange,
data_frames: BTreeMap<VirtPageNum, FrameTracker>,
@ -337,21 +346,28 @@ impl MapArea {
}
#[derive(Copy, Clone, PartialEq, Debug)]
/// map type for memory set: identical or framed
pub enum MapType {
Identical,
Framed,
}
bitflags! {
/// map permission corresponding to that in pte: `R W X U`
pub struct MapPermission: u8 {
///Readable
const R = 1 << 1;
///Writable
const W = 1 << 2;
///Excutable
const X = 1 << 3;
///Accessible in U mode
const U = 1 << 4;
}
}
#[allow(unused)]
///Check PageTable running correctly
pub fn remap_test() {
let mut kernel_space = KERNEL_SPACE.exclusive_access();
let mid_text: VirtAddr = ((stext as usize + etext as usize) / 2).into();

@ -1,3 +1,10 @@
//! Memory management implementation
//!
//! SV39 page-based virtual-memory architecture for RV64 systems, and
//! everything about memory management, like frame allocator, page table,
//! map area and memory set, is implemented here.
//!
//! Every task or process has a memory_set to control its virtual memory.
mod address;
mod frame_allocator;
mod heap_allocator;
@ -11,7 +18,7 @@ pub use memory_set::remap_test;
pub use memory_set::{MapPermission, MemorySet, KERNEL_SPACE};
pub use page_table::{translated_byte_buffer, translated_refmut, translated_str, PageTableEntry};
use page_table::{PTEFlags, PageTable};
/// initiate heap allocator, frame allocator and kernel space
pub fn init() {
heap_allocator::init_heap();
frame_allocator::init_frame_allocator();

@ -1,3 +1,4 @@
//! Implementation of [`PageTableEntry`] and [`PageTable`].
use super::{frame_alloc, FrameTracker, PhysAddr, PhysPageNum, StepByOne, VirtAddr, VirtPageNum};
use alloc::string::String;
use alloc::vec;
@ -19,34 +20,44 @@ bitflags! {
#[derive(Copy, Clone)]
#[repr(C)]
/// page table entry structure
pub struct PageTableEntry {
///PTE
pub bits: usize,
}
impl PageTableEntry {
///Create a PTE from ppn
pub fn new(ppn: PhysPageNum, flags: PTEFlags) -> Self {
PageTableEntry {
bits: ppn.0 << 10 | flags.bits as usize,
}
}
///Return an empty PTE
pub fn empty() -> Self {
PageTableEntry { bits: 0 }
}
///Return 44bit ppn
pub fn ppn(&self) -> PhysPageNum {
(self.bits >> 10 & ((1usize << 44) - 1)).into()
}
///Return 10bit flag
pub fn flags(&self) -> PTEFlags {
PTEFlags::from_bits(self.bits as u8).unwrap()
}
///Check PTE valid
pub fn is_valid(&self) -> bool {
(self.flags() & PTEFlags::V) != PTEFlags::empty()
}
///Check PTE readable
pub fn readable(&self) -> bool {
(self.flags() & PTEFlags::R) != PTEFlags::empty()
}
///Check PTE writable
pub fn writable(&self) -> bool {
(self.flags() & PTEFlags::W) != PTEFlags::empty()
}
///Check PTE executable
pub fn executable(&self) -> bool {
(self.flags() & PTEFlags::X) != PTEFlags::empty()
}
@ -138,8 +149,8 @@ impl PageTable {
8usize << 60 | self.root_ppn.0
}
}
pub fn translated_byte_buffer(token: usize, ptr: *const u8, len: usize) -> Vec<&'static mut [u8]> {
/// translate a pointer to a mutable u8 Vec through page table
pub fn translated_byte_buffer(token: usize, ptr: *const u8, len: usize) -> Vec<&'static mut [u8]> {
let page_table = PageTable::from_token(token);
let mut start = ptr as usize;
let end = start + len;
@ -159,9 +170,9 @@ pub fn translated_byte_buffer(token: usize, ptr: *const u8, len: usize) -> Vec<&
start = end_va.into();
}
v
}
pub fn translated_str(token: usize, ptr: *const u8) -> String {
}
/// translate a pointer to a mutable u8 Vec end with `\0` through page table to a `String`
pub fn translated_str(token: usize, ptr: *const u8) -> String {
let page_table = PageTable::from_token(token);
let mut string = String::new();
let mut va = ptr as usize;
@ -178,9 +189,9 @@ pub fn translated_str(token: usize, ptr: *const u8) -> String {
}
}
string
}
pub fn translated_refmut<T>(token: usize, ptr: *mut T) -> &'static mut T {
}
///translate a generic through page table and return a mutable reference
pub fn translated_refmut<T>(token: usize, ptr: *mut T) -> &'static mut T {
//println!("into translated_refmut!");
let page_table = PageTable::from_token(token);
let va = ptr as usize;
@ -189,4 +200,4 @@ pub fn translated_refmut<T>(token: usize, ptr: *mut T) -> &'static mut T {
.translate_va(VirtAddr::from(va))
.unwrap()
.get_mut()
}
}

@ -1,3 +1,4 @@
//! SBI call wrappers
#![allow(unused)]
use core::arch::asm;
@ -11,7 +12,7 @@ const SBI_REMOTE_FENCE_I: usize = 5;
const SBI_REMOTE_SFENCE_VMA: usize = 6;
const SBI_REMOTE_SFENCE_VMA_ASID: usize = 7;
const SBI_SHUTDOWN: usize = 8;
/// general sbi call
#[inline(always)]
fn sbi_call(which: usize, arg0: usize, arg1: usize, arg2: usize) -> usize {
let mut ret;
@ -26,19 +27,19 @@ fn sbi_call(which: usize, arg0: usize, arg1: usize, arg2: usize) -> usize {
}
ret
}
/// use sbi call to set timer
pub fn set_timer(timer: usize) {
sbi_call(SBI_SET_TIMER, timer, 0, 0);
}
/// use sbi call to putchar in console (qemu uart handler)
pub fn console_putchar(c: usize) {
sbi_call(SBI_CONSOLE_PUTCHAR, c, 0, 0);
}
/// use sbi call to getchar from console (qemu uart handler)
pub fn console_getchar() -> usize {
sbi_call(SBI_CONSOLE_GETCHAR, 0, 0, 0)
}
/// use sbi call to shutdown the kernel
pub fn shutdown() -> ! {
sbi_call(SBI_SHUTDOWN, 0, 0, 0);
panic!("It should shutdown!");

@ -1,3 +1,4 @@
//! Synchronization and interior mutability primitives
mod up;
pub use up::UPSafeCell;

@ -1,3 +1,4 @@
//! Uniprocessor interior mutability primitives
use core::cell::{RefCell, RefMut};
/// Wrap a static data structure inside it so that we are
@ -22,7 +23,7 @@ impl<T> UPSafeCell<T> {
inner: RefCell::new(value),
}
}
/// Panic if the data has been borrowed.
/// Exclusive access inner data in UPSafeCell. Panic if the data has been borrowed.
pub fn exclusive_access(&self) -> RefMut<'_, T> {
self.inner.borrow_mut()
}

@ -1,3 +1,4 @@
//! File and filesystem-related syscalls
use crate::mm::translated_byte_buffer;
use crate::sbi::console_getchar;
use crate::task::{current_user_token, suspend_current_and_run_next};

@ -1,3 +1,14 @@
//! Implementation of syscalls
//!
//! The single entry point to all system calls, [`syscall()`], is called
//! whenever userspace wishes to perform a system call using the `ecall`
//! instruction. In this case, the processor raises an 'Environment call from
//! U-mode' exception, which is handled as one of the cases in
//! [`crate::trap::trap_handler`].
//!
//! For clarity, each single syscall is implemented as its own function, named
//! `sys_` then the name of the syscall. You can find functions like this in
//! submodules, and you should also implement syscalls this way.
const SYSCALL_READ: usize = 63;
const SYSCALL_WRITE: usize = 64;
const SYSCALL_EXIT: usize = 93;
@ -13,7 +24,7 @@ mod process;
use fs::*;
use process::*;
/// handle syscall exception with `syscall_id` and other arguments
pub fn syscall(syscall_id: usize, args: [usize; 3]) -> isize {
match syscall_id {
SYSCALL_READ => sys_read(args[0], args[1] as *const u8, args[2]),

@ -1,13 +1,19 @@
//! Implementation of [`TaskContext`]
use crate::trap::trap_return;
#[repr(C)]
/// task context structure containing some registers
pub struct TaskContext {
/// return address ( e.g. __restore ) of __switch ASM function
ra: usize,
/// kernel stack pointer of app
sp: usize,
/// s0-11 register, callee saved
s: [usize; 12],
}
impl TaskContext {
/// init task context
pub fn zero_init() -> Self {
Self {
ra: 0,
@ -15,6 +21,7 @@ impl TaskContext {
s: [0; 12],
}
}
/// set Task Context{__restore ASM funciton: trap_return, sp: kstack_ptr, s: s_0..12}
pub fn goto_trap_return(kstack_ptr: usize) -> Self {
Self {
ra: trap_return as usize,

@ -1,23 +1,27 @@
//!Implementation of [`TaskManager`]
use super::TaskControlBlock;
use crate::sync::UPSafeCell;
use alloc::collections::VecDeque;
use alloc::sync::Arc;
use lazy_static::*;
///A array of `TaskControlBlock` that is thread-safe
pub struct TaskManager {
ready_queue: VecDeque<Arc<TaskControlBlock>>,
}
/// A simple FIFO scheduler.
impl TaskManager {
///Creat an empty TaskManager
pub fn new() -> Self {
Self {
ready_queue: VecDeque::new(),
}
}
///Add a task to `TaskManager`
pub fn add(&mut self, task: Arc<TaskControlBlock>) {
self.ready_queue.push_back(task);
}
///Remove the first task and return it,or `None` if `TaskManager` is empty
pub fn fetch(&mut self) -> Option<Arc<TaskControlBlock>> {
self.ready_queue.pop_front()
}
@ -27,11 +31,11 @@ lazy_static! {
pub static ref TASK_MANAGER: UPSafeCell<TaskManager> =
unsafe { UPSafeCell::new(TaskManager::new()) };
}
///Interface offered to add task
pub fn add_task(task: Arc<TaskControlBlock>) {
TASK_MANAGER.exclusive_access().add(task);
}
///Interface offered to pop the first task
pub fn fetch_task() -> Option<Arc<TaskControlBlock>> {
TASK_MANAGER.exclusive_access().fetch()
}

@ -1,25 +1,42 @@
//! Task management implementation
//!
//! Everything about task management, like starting and switching tasks is
//! implemented here.
//!
//! A single global instance of [`TaskManager`] called `TASK_MANAGER` controls
//! all the tasks in the whole operating system.
//!
//! A single global instance of [`Processor`] called `PROCESSOR` monitors running
//! task(s) for each core.
//!
//! A single global instance of [`PidAllocator`] called `PID_ALLOCATOR` allocates
//! pid for user apps.
//!
//! Be careful when you see `__switch` ASM function in `switch.S`. Control flow around this function
//! might not be what you expect.
mod context;
mod manager;
mod pid;
mod processor;
mod switch;
#[allow(clippy::module_inception)]
mod task;
use crate::loader::get_app_data_by_name;
use alloc::sync::Arc;
use lazy_static::*;
use manager::fetch_task;
pub use manager::{fetch_task,TaskManager};
use switch::__switch;
use task::{TaskControlBlock, TaskStatus};
pub use context::TaskContext;
pub use manager::add_task;
pub use pid::{pid_alloc, KernelStack, PidHandle};
pub use pid::{pid_alloc, KernelStack, PidHandle,PidAllocator};
pub use processor::{
current_task, current_trap_cx, current_user_token, run_tasks, schedule, take_current_task,
current_task, current_trap_cx, current_user_token, run_tasks, schedule, take_current_task,Processor
};
/// Suspend the current 'Running' task and run the next task in task list.
pub fn suspend_current_and_run_next() {
// There must be an application running.
let task = take_current_task().unwrap();
@ -37,7 +54,7 @@ pub fn suspend_current_and_run_next() {
// jump to scheduling cycle
schedule(task_cx_ptr);
}
/// Exit the current 'Running' task and run the next task in task list.
pub fn exit_current_and_run_next(exit_code: i32) {
// take from Processor
let task = take_current_task().unwrap();
@ -72,11 +89,12 @@ pub fn exit_current_and_run_next(exit_code: i32) {
}
lazy_static! {
///Globle process that init user shell
pub static ref INITPROC: Arc<TaskControlBlock> = Arc::new(TaskControlBlock::new(
get_app_data_by_name("initproc").unwrap()
));
}
///Add init process to the manager
pub fn add_initproc() {
add_task(INITPROC.clone());
}

@ -1,21 +1,24 @@
//!Implementation of [`PidAllocator`]
use crate::config::{KERNEL_STACK_SIZE, PAGE_SIZE, TRAMPOLINE};
use crate::mm::{MapPermission, VirtAddr, KERNEL_SPACE};
use crate::sync::UPSafeCell;
use alloc::vec::Vec;
use lazy_static::*;
struct PidAllocator {
///Pid Allocator struct
pub struct PidAllocator {
current: usize,
recycled: Vec<usize>,
}
impl PidAllocator {
///Create an empty `PidAllocator`
pub fn new() -> Self {
PidAllocator {
current: 0,
recycled: Vec::new(),
}
}
///Allocate a pid
pub fn alloc(&mut self) -> PidHandle {
if let Some(pid) = self.recycled.pop() {
PidHandle(pid)
@ -24,6 +27,7 @@ impl PidAllocator {
PidHandle(self.current - 1)
}
}
///Recycle a pid
pub fn dealloc(&mut self, pid: usize) {
assert!(pid < self.current);
assert!(
@ -36,10 +40,10 @@ impl PidAllocator {
}
lazy_static! {
static ref PID_ALLOCATOR: UPSafeCell<PidAllocator> =
pub static ref PID_ALLOCATOR: UPSafeCell<PidAllocator> =
unsafe { UPSafeCell::new(PidAllocator::new()) };
}
///Bind pid lifetime to `PidHandle`
pub struct PidHandle(pub usize);
impl Drop for PidHandle {
@ -48,7 +52,7 @@ impl Drop for PidHandle {
PID_ALLOCATOR.exclusive_access().dealloc(self.0);
}
}
///Allocate a pid from PID_ALLOCATOR
pub fn pid_alloc() -> PidHandle {
PID_ALLOCATOR.exclusive_access().alloc()
}
@ -59,12 +63,13 @@ pub fn kernel_stack_position(app_id: usize) -> (usize, usize) {
let bottom = top - KERNEL_STACK_SIZE;
(bottom, top)
}
///Kernelstack for app
pub struct KernelStack {
pid: usize,
}
impl KernelStack {
///Create a kernelstack from pid
pub fn new(pid_handle: &PidHandle) -> Self {
let pid = pid_handle.0;
let (kernel_stack_bottom, kernel_stack_top) = kernel_stack_position(pid);
@ -76,6 +81,7 @@ impl KernelStack {
KernelStack { pid: pid_handle.0 }
}
#[allow(unused)]
///Push a value on top of kernelstack
pub fn push_on_top<T>(&self, value: T) -> *mut T
where
T: Sized,
@ -87,6 +93,7 @@ impl KernelStack {
}
ptr_mut
}
///Get the value on the top of kernelstack
pub fn get_top(&self) -> usize {
let (_, kernel_stack_top) = kernel_stack_position(self.pid);
kernel_stack_top

@ -1,3 +1,4 @@
//!Implementation of [`Processor`] and Intersection of control flow
use super::__switch;
use super::{fetch_task, TaskStatus};
use super::{TaskContext, TaskControlBlock};
@ -5,25 +6,31 @@ use crate::sync::UPSafeCell;
use crate::trap::TrapContext;
use alloc::sync::Arc;
use lazy_static::*;
///Processor management structure
pub struct Processor {
///The task currently executing on the current processor
current: Option<Arc<TaskControlBlock>>,
///The basic control flow of each core, helping to select and switch process
idle_task_cx: TaskContext,
}
impl Processor {
///Create an empty Processor
pub fn new() -> Self {
Self {
current: None,
idle_task_cx: TaskContext::zero_init(),
}
}
///Get mutable reference to `idle_task_cx`
fn get_idle_task_cx_ptr(&mut self) -> *mut TaskContext {
&mut self.idle_task_cx as *mut _
}
///Get current task in moving semanteme
pub fn take_current(&mut self) -> Option<Arc<TaskControlBlock>> {
self.current.take()
}
///Get current task in cloning semanteme
pub fn current(&self) -> Option<Arc<TaskControlBlock>> {
self.current.as_ref().map(Arc::clone)
}
@ -32,7 +39,8 @@ impl Processor {
lazy_static! {
pub static ref PROCESSOR: UPSafeCell<Processor> = unsafe { UPSafeCell::new(Processor::new()) };
}
///The main part of process execution and scheduling
///Loop `fetch_task` to get the process that needs to run, and switch the process through `__switch`
pub fn run_tasks() {
loop {
let mut processor = PROCESSOR.exclusive_access();
@ -53,28 +61,28 @@ pub fn run_tasks() {
}
}
}
///Take the current task,leaving a None in its place
pub fn take_current_task() -> Option<Arc<TaskControlBlock>> {
PROCESSOR.exclusive_access().take_current()
}
///Get running task
pub fn current_task() -> Option<Arc<TaskControlBlock>> {
PROCESSOR.exclusive_access().current()
}
///Get token of the address space of current task
pub fn current_user_token() -> usize {
let task = current_task().unwrap();
let token = task.inner_exclusive_access().get_user_token();
token
}
///Get the mutable reference to trap context of current task
pub fn current_trap_cx() -> &'static mut TrapContext {
current_task()
.unwrap()
.inner_exclusive_access()
.get_trap_cx()
}
///Return to idle control flow for new scheduling
pub fn schedule(switched_task_cx_ptr: *mut TaskContext) {
let mut processor = PROCESSOR.exclusive_access();
let idle_task_cx_ptr = processor.get_idle_task_cx_ptr();

@ -1,3 +1,4 @@
//!Wrap `switch.S` as a function
use super::TaskContext;
use core::arch::global_asm;

@ -1,3 +1,4 @@
//!Implementation of [`TaskControlBlock`]
use super::TaskContext;
use super::{pid_alloc, KernelStack, PidHandle};
use crate::config::TRAP_CONTEXT;

@ -1,18 +1,21 @@
//! RISC-V timer-related functionality
use crate::config::CLOCK_FREQ;
use crate::sbi::set_timer;
use riscv::register::time;
const TICKS_PER_SEC: usize = 100;
const MSEC_PER_SEC: usize = 1000;
///get current time
pub fn get_time() -> usize {
time::read()
}
/// get current time in microseconds
pub fn get_time_ms() -> usize {
time::read() / (CLOCK_FREQ / MSEC_PER_SEC)
}
/// set the next timer interrupt
pub fn set_next_trigger() {
set_timer(get_time() + CLOCK_FREQ / TICKS_PER_SEC);
}

@ -1,19 +1,29 @@
//! Implementation of [`TrapContext`]
use riscv::register::sstatus::{self, Sstatus, SPP};
#[repr(C)]
///trap context structure containing sstatus, sepc and registers
pub struct TrapContext {
/// general regs[0..31]
pub x: [usize; 32],
/// CSR sstatus
pub sstatus: Sstatus,
/// CSR sepc
pub sepc: usize,
/// Addr of Page Table
pub kernel_satp: usize,
/// kernel stack
pub kernel_sp: usize,
/// Addr of trap_handler function
pub trap_handler: usize,
}
impl TrapContext {
///set stack pointer to x_2 reg (sp)
pub fn set_sp(&mut self, sp: usize) {
self.x[2] = sp;
}
///init app context
pub fn app_init_context(
entry: usize,
sp: usize,

@ -1,3 +1,16 @@
//! Trap handling functionality
//!
//! For rCore, we have a single trap entry point, namely `__alltraps`. At
//! initialization in [`init()`], we set the `stvec` CSR to point to it.
//!
//! All traps go through `__alltraps`, which is defined in `trap.S`. The
//! assembly language code does just enough work restore the kernel space
//! context, ensuring that Rust code safely runs, and transfers control to
//! [`trap_handler()`].
//!
//! It then calls different functionality based on what exactly the exception
//! was. For example, timer interrupts trigger task preemption, and syscalls go
//! to [`syscall()`].
mod context;
use crate::config::{TRAMPOLINE, TRAP_CONTEXT};
@ -14,7 +27,7 @@ use riscv::register::{
};
global_asm!(include_str!("trap.S"));
/// initialize CSR `stvec` as the entry of `__alltraps`
pub fn init() {
set_kernel_trap_entry();
}
@ -30,7 +43,7 @@ fn set_user_trap_entry() {
stvec::write(TRAMPOLINE as usize, TrapMode::Direct);
}
}
/// enable timer interrupt in sie CSR
pub fn enable_timer_interrupt() {
unsafe {
sie::set_stimer();
@ -38,6 +51,7 @@ pub fn enable_timer_interrupt() {
}
#[no_mangle]
/// handle an interrupt, exception, or system call from user space
pub fn trap_handler() -> ! {
set_kernel_trap_entry();
let scause = scause::read();
@ -89,6 +103,9 @@ pub fn trap_handler() -> ! {
}
#[no_mangle]
/// set the new addr of __restore asm function in TRAMPOLINE page,
/// set the reg a0 = trap_cx_ptr, reg a1 = phy addr of usr page table,
/// finally, jump to new addr of __restore asm function
pub fn trap_return() -> ! {
set_user_trap_entry();
let trap_cx_ptr = TRAP_CONTEXT;
@ -111,6 +128,8 @@ pub fn trap_return() -> ! {
}
#[no_mangle]
/// Unimplement: traps/interrupts/exceptions from kernel mode
/// Todo: Chapter 9: I/O device
pub fn trap_from_kernel() -> ! {
panic!("a trap {:?} from kernel!", scause::read().cause());
}

Loading…
Cancel
Save