Debugging sys_exec :(

ch8-dev
Yifan Wu 4 years ago
parent 6693de9611
commit c599a31dd0

@ -8,7 +8,7 @@ pub const PAGE_SIZE: usize = 0x1000;
pub const PAGE_SIZE_BITS: usize = 0xc;
pub const TRAMPOLINE: usize = usize::MAX - PAGE_SIZE + 1;
pub const TRAP_CONTEXT: usize = TRAMPOLINE - PAGE_SIZE;
pub const TRAP_CONTEXT_BASE: usize = TRAMPOLINE - PAGE_SIZE;
#[cfg(feature = "board_k210")]
pub const CLOCK_FREQ: usize = 403000000 / 62;
@ -39,4 +39,4 @@ pub const MMIO: &[(usize, usize)] = &[
(0x5200_0000, 0x1000), /* SPI0 */
(0x5300_0000, 0x1000), /* SPI1 */
(0x5400_0000, 0x1000), /* SPI2 */
];
];

@ -1,5 +1,6 @@
use core::panic::PanicInfo;
use crate::sbi::shutdown;
use crate::task::current_kstack_top;
#[panic_handler]
fn panic(info: &PanicInfo) -> ! {
@ -8,5 +9,19 @@ fn panic(info: &PanicInfo) -> ! {
} else {
println!("[kernel] Panicked: {}", info.message().unwrap());
}
unsafe { backtrace(); }
shutdown()
}
unsafe fn backtrace() {
let mut fp: usize;
let stop = current_kstack_top();
asm!("mv {}, s0", out(reg) fp);
println!("---START BACKTRACE---");
for i in 0..10 {
if fp == stop { break; }
println!("#{}:ra={:#x}", i, *((fp-8) as *const usize));
fp = *((fp-16) as *const usize);
}
println!("---END BACKTRACE---");
}

@ -52,4 +52,4 @@ pub fn rust_main() -> ! {
task::add_initproc();
task::run_tasks();
panic!("Unreachable in rust_main!");
}
}

@ -12,8 +12,6 @@ use crate::config::{
MEMORY_END,
PAGE_SIZE,
TRAMPOLINE,
TRAP_CONTEXT,
USER_STACK_SIZE,
MMIO,
};
@ -142,8 +140,8 @@ impl MemorySet {
}
memory_set
}
/// Include sections in elf and trampoline and TrapContext and user stack,
/// also returns user_sp and entry point.
/// Include sections in elf and trampoline,
/// also returns user_sp_base and entry point.
pub fn from_elf(elf_data: &[u8]) -> (Self, usize, usize) {
let mut memory_set = Self::new_bare();
// map trampoline
@ -178,10 +176,10 @@ impl MemorySet {
);
}
}
// map user stack with U flags
let max_end_va: VirtAddr = max_end_vpn.into();
let mut user_stack_bottom: usize = max_end_va.into();
(memory_set, user_stack_bottom, elf.header.pt2.entry_point() as usize)
let mut user_stack_base: usize = max_end_va.into();
user_stack_base += PAGE_SIZE;
(memory_set, user_stack_base, elf.header.pt2.entry_point() as usize)
}
pub fn from_existed_user(user_space: &MemorySet) -> MemorySet {
let mut memory_set = Self::new_bare();

@ -252,4 +252,4 @@ impl Iterator for UserBufferIterator {
Some(r)
}
}
}
}

@ -4,14 +4,14 @@ use crate::mm::{
translated_refmut,
translated_str,
};
use crate::task::{current_user_token, current_task};
use crate::task::{current_user_token, current_process};
use crate::fs::{make_pipe, OpenFlags, open_file};
use alloc::sync::Arc;
pub fn sys_write(fd: usize, buf: *const u8, len: usize) -> isize {
let token = current_user_token();
let task = current_task().unwrap();
let inner = task.inner_exclusive_access();
let process = current_process();
let inner = process.inner_exclusive_access();
if fd >= inner.fd_table.len() {
return -1;
}
@ -32,8 +32,8 @@ pub fn sys_write(fd: usize, buf: *const u8, len: usize) -> isize {
pub fn sys_read(fd: usize, buf: *const u8, len: usize) -> isize {
let token = current_user_token();
let task = current_task().unwrap();
let inner = task.inner_exclusive_access();
let process = current_process();
let inner = process.inner_exclusive_access();
if fd >= inner.fd_table.len() {
return -1;
}
@ -53,14 +53,14 @@ pub fn sys_read(fd: usize, buf: *const u8, len: usize) -> isize {
}
pub fn sys_open(path: *const u8, flags: u32) -> isize {
let task = current_task().unwrap();
let process = current_process();
let token = current_user_token();
let path = translated_str(token, path);
if let Some(inode) = open_file(
path.as_str(),
OpenFlags::from_bits(flags).unwrap()
) {
let mut inner = task.inner_exclusive_access();
let mut inner = process.inner_exclusive_access();
let fd = inner.alloc_fd();
inner.fd_table[fd] = Some(inode);
fd as isize
@ -70,8 +70,8 @@ pub fn sys_open(path: *const u8, flags: u32) -> isize {
}
pub fn sys_close(fd: usize) -> isize {
let task = current_task().unwrap();
let mut inner = task.inner_exclusive_access();
let process = current_process();
let mut inner = process.inner_exclusive_access();
if fd >= inner.fd_table.len() {
return -1;
}
@ -83,9 +83,9 @@ pub fn sys_close(fd: usize) -> isize {
}
pub fn sys_pipe(pipe: *mut usize) -> isize {
let task = current_task().unwrap();
let process = current_process();
let token = current_user_token();
let mut inner = task.inner_exclusive_access();
let mut inner = process.inner_exclusive_access();
let (pipe_read, pipe_write) = make_pipe();
let read_fd = inner.alloc_fd();
inner.fd_table[read_fd] = Some(pipe_read);
@ -97,8 +97,8 @@ pub fn sys_pipe(pipe: *mut usize) -> isize {
}
pub fn sys_dup(fd: usize) -> isize {
let task = current_task().unwrap();
let mut inner = task.inner_exclusive_access();
let process = current_process();
let mut inner = process.inner_exclusive_access();
if fd >= inner.fd_table.len() {
return -1;
}
@ -108,4 +108,4 @@ pub fn sys_dup(fd: usize) -> isize {
let new_fd = inner.alloc_fd();
inner.fd_table[new_fd] = Some(Arc::clone(inner.fd_table[fd].as_ref().unwrap()));
new_fd as isize
}
}

@ -2,8 +2,8 @@ use crate::task::{
suspend_current_and_run_next,
exit_current_and_run_next,
current_task,
current_process,
current_user_token,
add_task,
};
use crate::timer::get_time_ms;
use crate::mm::{
@ -34,20 +34,20 @@ pub fn sys_get_time() -> isize {
}
pub fn sys_getpid() -> isize {
current_task().unwrap().pid.0 as isize
current_task().unwrap().process.upgrade().unwrap().getpid() as isize
}
pub fn sys_fork() -> isize {
let current_task = current_task().unwrap();
let new_task = current_task.fork();
let new_pid = new_task.pid.0;
let current_process = current_process();
let new_process = current_process.fork();
let new_pid = new_process.getpid();
// modify trap context of new_task, because it returns immediately after switching
let trap_cx = new_task.inner_exclusive_access().get_trap_cx();
let new_process_inner = new_process.inner_exclusive_access();
let task = new_process_inner.tasks[0].as_ref().unwrap();
let trap_cx = task.inner_exclusive_access().get_trap_cx();
// we do not have to move to next instruction since we have done it before
// for child process, fork returns 0
trap_cx.x[10] = 0;
// add new task to scheduler
add_task(new_task);
new_pid as isize
}
@ -65,9 +65,9 @@ pub fn sys_exec(path: *const u8, mut args: *const usize) -> isize {
}
if let Some(app_inode) = open_file(path.as_str(), OpenFlags::RDONLY) {
let all_data = app_inode.read_all();
let task = current_task().unwrap();
let process = current_process();
let argc = args_vec.len();
task.exec(all_data.as_slice(), args_vec);
process.exec(all_data.as_slice(), args_vec);
// return argc because cx.x[10] will be covered with it later
argc as isize
} else {
@ -78,11 +78,10 @@ pub fn sys_exec(path: *const u8, mut args: *const usize) -> isize {
/// If there is not a child process whose pid is same as given, return -1.
/// Else if there is a child process but it is still running, return -2.
pub fn sys_waitpid(pid: isize, exit_code_ptr: *mut i32) -> isize {
let task = current_task().unwrap();
let process = current_process();
// find a child process
// ---- access current PCB exclusively
let mut inner = task.inner_exclusive_access();
let mut inner = process.inner_exclusive_access();
if inner.children
.iter()
.find(|p| {pid == -1 || pid as usize == p.getpid()})
@ -95,7 +94,7 @@ pub fn sys_waitpid(pid: isize, exit_code_ptr: *mut i32) -> isize {
.enumerate()
.find(|(_, p)| {
// ++++ temporarily access child PCB exclusively
p.inner_exclusive_access().is_zombie() && (pid == -1 || pid as usize == p.getpid())
p.inner_exclusive_access().is_zombie && (pid == -1 || pid as usize == p.getpid())
// ++++ release child PCB
});
if let Some((idx, _)) = pair {
@ -112,4 +111,4 @@ pub fn sys_waitpid(pid: isize, exit_code_ptr: *mut i32) -> isize {
-2
}
// ---- release current PCB automatically
}
}

@ -1,12 +1,8 @@
use alloc::{vec::Vec, sync::Arc};
use lazy_static::*;
use crate::sync::UPSafeCell;
use crate::mm::{KERNEL_SPACE, MapPermission, VirtAddr};
use crate::config::{
PAGE_SIZE,
TRAMPOLINE,
KERNEL_STACK_SIZE,
};
use crate::mm::{KERNEL_SPACE, MapPermission, PhysPageNum, VirtAddr};
use crate::config::{KERNEL_STACK_SIZE, PAGE_SIZE, TRAMPOLINE, TRAP_CONTEXT_BASE, USER_STACK_SIZE};
use super::ProcessControlBlock;
pub struct RecycleAllocator {
@ -110,17 +106,104 @@ impl KernelStack {
pub struct TaskUserRes {
pub tid: usize,
pub ustack_base: usize,
pub kstack: KernelStack,
pub process: Arc<ProcessControlBlock>,
}
impl Drop for TaskUserRes {
fn drop(&mut self) {
fn trap_cx_bottom_from_tid(tid: usize) -> usize {
TRAP_CONTEXT_BASE - tid * PAGE_SIZE
}
fn ustack_bottom_from_tid(ustack_base: usize, tid: usize) -> usize {
ustack_base + tid * (PAGE_SIZE + USER_STACK_SIZE)
}
impl TaskUserRes {
pub fn new(
process: Arc<ProcessControlBlock>,
ustack_base: usize,
alloc_user_res: bool,
) -> Self {
let tid = process.inner_exclusive_access().alloc_tid();
let kstack = kstack_alloc();
let task_user_res = Self {
tid,
ustack_base,
kstack,
process: Arc::clone(&process),
};
if alloc_user_res {
task_user_res.alloc_user_res();
}
task_user_res
}
pub fn alloc_user_res(&self) {
let mut process = self.process.inner_exclusive_access();
// alloc user stack
let ustack_bottom = ustack_bottom_from_tid(self.ustack_base, self.tid);
let ustack_top = ustack_bottom + USER_STACK_SIZE;
process
.memory_set
.insert_framed_area(
ustack_bottom.into(),
ustack_top.into(),
MapPermission::R | MapPermission::W | MapPermission::U,
);
// alloc trap_cx
let trap_cx_bottom = trap_cx_bottom_from_tid(self.tid);
let trap_cx_top = trap_cx_bottom + PAGE_SIZE;
process
.memory_set
.insert_framed_area(
trap_cx_bottom.into(),
trap_cx_top.into(),
MapPermission::R | MapPermission::W,
);
}
pub fn dealloc_tid(&self) {
let mut process = self.process.inner_exclusive_access();
process.dealloc_tid(self.tid);
}
fn dealloc_user_res(&self) {
// dealloc tid
let mut process = self.process.inner_exclusive_access();
process.dealloc_tid(self.tid);
// dealloc ustack manually
let ustack_bottom_va: VirtAddr = ustack_bottom_from_tid(self.ustack_base, self.tid).into();
process.memory_set.remove_area_with_start_vpn(ustack_bottom_va.into());
// dealloc trap_cx manually
let trap_cx_bottom_va: VirtAddr = trap_cx_bottom_from_tid(self.tid).into();
process.memory_set.remove_area_with_start_vpn(trap_cx_bottom_va.into());
}
pub fn trap_cx_user_va(&self) -> usize {
trap_cx_bottom_from_tid(self.tid)
}
pub fn trap_cx_ppn(&self) -> PhysPageNum {
let process = self.process.inner_exclusive_access();
process.task_res_allocator.dealloc(self.tid);
// dealloc trap_cx
process.dealloc_trap_cx(self.tid);
// kstack can be deallocated automatically
let trap_cx_bottom_va: VirtAddr = trap_cx_bottom_from_tid(self.tid).into();
process.memory_set.translate(trap_cx_bottom_va.into()).unwrap().ppn()
}
pub fn ustack_base(&self) -> usize { self.ustack_base }
pub fn ustack_top(&self) -> usize {
ustack_bottom_from_tid(self.ustack_base, self.tid) + USER_STACK_SIZE
}
pub fn kstack_top(&self) -> usize {
self.kstack.get_top()
}
}
impl Drop for TaskUserRes {
fn drop(&mut self) {
self.dealloc_user_res();
// kstack can also be deallocated automatically
}
}

@ -13,14 +13,16 @@ use alloc::sync::Arc;
use manager::fetch_task;
use lazy_static::*;
use process::ProcessControlBlock;
use id::RecycleAllocator;
pub use context::TaskContext;
pub use processor::{
run_tasks,
current_task,
current_process,
current_user_token,
current_trap_cx_user_va,
current_trap_cx,
current_kstack_top,
take_current_task,
schedule,
};
@ -42,7 +44,7 @@ pub fn suspend_current_and_run_next() {
// Change status to Ready
task_inner.task_status = TaskStatus::Ready;
drop(task_inner);
// ---- release current PCB
// ---- release current TCB
// push back to ready queue.
add_task(task);
@ -53,30 +55,35 @@ pub fn suspend_current_and_run_next() {
pub fn exit_current_and_run_next(exit_code: i32) {
// take from Processor
let task = take_current_task().unwrap();
// **** access current TCB exclusively
let mut inner = task.inner_exclusive_access();
// Change status to Zombie
inner.task_status = TaskStatus::Zombie;
// Record exit code
inner.exit_code = exit_code;
// do not move to its parent but under initproc
let task_exit_code = task.inner_exclusive_access().exit_code;
let tid = task.inner_exclusive_access().res.tid;
// remove thread
let process = task.process.upgrade().unwrap();
let mut process_inner = process.inner_exclusive_access();
process_inner.tasks.drain(tid..tid + 1);
// if this is the main thread of the process, then we need terminate this process
if tid == 0 {
// mark this process as a zombie process
process_inner.is_zombie = true;
// record exit code of main process
process_inner.exit_code = task_exit_code;
// ++++++ access initproc TCB exclusively
{
let mut initproc_inner = INITPROC.inner_exclusive_access();
for child in inner.children.iter() {
child.inner_exclusive_access().parent = Some(Arc::downgrade(&INITPROC));
initproc_inner.children.push(child.clone());
{
// move all child processes under init process
let mut initproc_inner = INITPROC.inner_exclusive_access();
for child in process_inner.children.iter() {
child.inner_exclusive_access().parent = Some(Arc::downgrade(&INITPROC));
initproc_inner.children.push(child.clone());
}
}
}
// ++++++ release parent PCB
inner.children.clear();
// deallocate user space
inner.memory_set.recycle_data_pages();
drop(inner);
// **** release current PCB
// drop task manually to maintain rc correctly
process_inner.children.clear();
// deallocate user space as soon as possible
process_inner.memory_set.recycle_data_pages();
}
// maintain rc of process manually since we will break this context soon
drop(process_inner);
drop(process);
drop(task);
// we do not have to save task context
let mut _unused = TaskContext::zero_init();
@ -84,13 +91,13 @@ pub fn exit_current_and_run_next(exit_code: i32) {
}
lazy_static! {
pub static ref INITPROC: Arc<TaskControlBlock> = Arc::new({
pub static ref INITPROC: Arc<ProcessControlBlock> = {
let inode = open_file("initproc", OpenFlags::RDONLY).unwrap();
let v = inode.read_all();
TaskControlBlock::new(v.as_slice())
});
ProcessControlBlock::new(v.as_slice())
};
}
pub fn add_initproc() {
add_task(INITPROC.clone());
let initproc = INITPROC.clone();
}

@ -1,16 +1,17 @@
use crate::mm::{
MemorySet,
KERNEL_SPACE,
VirtAddr,
translated_refmut,
};
use crate::task::TaskContext;
use crate::task::id::TaskUserRes;
use crate::trap::{TrapContext, trap_handler};
use crate::config::TRAP_CONTEXT;
use crate::sync::UPSafeCell;
use core::cell::RefMut;
use super::id::RecycleAllocator;
use super::{TaskContext, TaskControlBlock};
use super::{PidHandle, pid_alloc, KernelStack, kstack_alloc};
use super::TaskControlBlock;
use super::{PidHandle, pid_alloc};
use super::add_task;
use alloc::sync::{Weak, Arc};
use alloc::vec;
use alloc::vec::Vec;
@ -25,14 +26,13 @@ pub struct ProcessControlBlock {
}
pub struct ProcessControlBlockInner {
pub base_size: usize,
pub is_zombie: bool,
pub memory_set: MemorySet,
pub parent: Option<Weak<ProcessControlBlock>>,
pub children: Vec<Arc<ProcessControlBlock>>,
pub exit_code: i32,
pub fd_table: Vec<Option<Arc<dyn File + Send + Sync>>>,
pub tasks: Vec<Option<Weak<TaskControlBlock>>>,
pub tasks: Vec<Option<Arc<TaskControlBlock>>>,
pub task_res_allocator: RecycleAllocator,
}
@ -40,6 +40,7 @@ impl ProcessControlBlockInner {
pub fn get_user_token(&self) -> usize {
self.memory_set.token()
}
pub fn alloc_fd(&mut self) -> usize {
if let Some(fd) = (0..self.fd_table.len())
.find(|fd| self.fd_table[*fd].is_none()) {
@ -49,9 +50,21 @@ impl ProcessControlBlockInner {
self.fd_table.len() - 1
}
}
pub fn dealloc_trap_cx(&mut self, tid: usize) {
unimplemented!();
//self.memory_set.remove_area_with_start_vpn()
pub fn alloc_tid(&mut self) -> usize {
self.task_res_allocator.alloc()
}
pub fn dealloc_tid(&mut self, tid: usize){
self.task_res_allocator.dealloc(tid)
}
pub fn thread_count(&self) -> usize {
self.tasks.len()
}
pub fn get_task(&self, tid: usize) -> Arc<TaskControlBlock> {
self.tasks[tid].as_ref().unwrap().clone()
}
}
@ -60,25 +73,15 @@ impl ProcessControlBlock {
self.inner.exclusive_access()
}
pub fn new(elf_data: &[u8]) -> Self {
pub fn new(elf_data: &[u8]) -> Arc<Self> {
// memory_set with elf program headers/trampoline/trap context/user stack
let (memory_set, user_sp, entry_point) = MemorySet::from_elf(elf_data);
let trap_cx_ppn = memory_set
.translate(VirtAddr::from(TRAP_CONTEXT).into())
.unwrap()
.ppn();
// alloc a pid and a kernel stack in kernel space
let (memory_set, ustack_base, entry_point) = MemorySet::from_elf(elf_data);
// allocate a pid
let pid_handle = pid_alloc();
let kstack = kstack_alloc();
let kernel_stack_top = kstack.get_top();
let task_control_block = Self {
let process = Arc::new(Self {
pid: pid_handle,
kernel_stack,
inner: unsafe { UPSafeCell::new(TaskControlBlockInner {
trap_cx_ppn,
base_size: user_sp,
task_cx: TaskContext::goto_trap_return(kernel_stack_top),
task_status: TaskStatus::Ready,
inner: unsafe { UPSafeCell::new(ProcessControlBlockInner {
is_zombie: false,
memory_set,
parent: None,
children: Vec::new(),
@ -91,33 +94,61 @@ impl ProcessControlBlock {
// 2 -> stderr
Some(Arc::new(Stdout)),
],
})},
};
// prepare TrapContext in user space
let trap_cx = task_control_block.inner_exclusive_access().get_trap_cx();
tasks: Vec::new(),
task_res_allocator: RecycleAllocator::new(),
})}
});
// create a main thread, we should allocate ustack and trap_cx here
let task = Arc::new(TaskControlBlock::new(
Arc::clone(&process),
ustack_base,
true,
));
// prepare trap_cx of main thread
let task_inner = task.inner_exclusive_access();
let trap_cx = task_inner.get_trap_cx();
let ustack_top = task_inner.res.ustack_top();
let kstack_top = task_inner.res.kstack_top();
drop(task_inner);
*trap_cx = TrapContext::app_init_context(
entry_point,
user_sp,
ustack_top,
KERNEL_SPACE.exclusive_access().token(),
kernel_stack_top,
kstack_top,
trap_handler as usize,
);
task_control_block
// add main thread to the process
let mut process_inner = process.inner_exclusive_access();
process_inner.tasks.push(Some(Arc::clone(&task)));
drop(process_inner);
// add main thread to scheduler
add_task(task);
process
}
pub fn exec(&self, elf_data: &[u8], args: Vec<String>) {
/// Only support processes with a single thread.
pub fn exec(self: &Arc<Self>, elf_data: &[u8], args: Vec<String>) {
assert_eq!(self.inner_exclusive_access().thread_count(), 1);
// memory_set with elf program headers/trampoline/trap context/user stack
let (memory_set, mut user_sp, entry_point) = MemorySet::from_elf(elf_data);
let trap_cx_ppn = memory_set
.translate(VirtAddr::from(TRAP_CONTEXT).into())
.unwrap()
.ppn();
let (memory_set, ustack_base, entry_point) = MemorySet::from_elf(elf_data);
let new_token = memory_set.token();
// substitute memory_set
self.inner_exclusive_access().memory_set = memory_set;
// then we alloc user resource for main thread again
// since memory_set has been changed
let task = self.inner_exclusive_access().get_task(0);
let mut task_inner = task.inner_exclusive_access();
task_inner.res.dealloc_tid();
task_inner.res.ustack_base = ustack_base;
task_inner.res.alloc_user_res();
// push arguments on user stack
let mut user_sp = task_inner.res.ustack_top();
user_sp -= (args.len() + 1) * core::mem::size_of::<usize>();
let argv_base = user_sp;
let mut argv: Vec<_> = (0..=args.len())
.map(|arg| {
translated_refmut(
memory_set.token(),
new_token,
(argv_base + arg * core::mem::size_of::<usize>()) as *mut usize
)
})
@ -128,86 +159,84 @@ impl ProcessControlBlock {
*argv[i] = user_sp;
let mut p = user_sp;
for c in args[i].as_bytes() {
*translated_refmut(memory_set.token(), p as *mut u8) = *c;
*translated_refmut(new_token, p as *mut u8) = *c;
p += 1;
}
*translated_refmut(memory_set.token(), p as *mut u8) = 0;
*translated_refmut(new_token, p as *mut u8) = 0;
}
// make the user_sp aligned to 8B for k210 platform
user_sp -= user_sp % core::mem::size_of::<usize>();
// **** access current TCB exclusively
let mut inner = self.inner_exclusive_access();
// substitute memory_set
inner.memory_set = memory_set;
// update trap_cx ppn
inner.trap_cx_ppn = trap_cx_ppn;
// initialize trap_cx
let mut trap_cx = TrapContext::app_init_context(
entry_point,
user_sp,
KERNEL_SPACE.exclusive_access().token(),
self.kernel_stack.get_top(),
task_inner.res.kstack_top(),
trap_handler as usize,
);
trap_cx.x[10] = args.len();
trap_cx.x[11] = argv_base;
*inner.get_trap_cx() = trap_cx;
// **** release current PCB
*task_inner.get_trap_cx() = trap_cx;
task_inner.task_cx = TaskContext::goto_trap_return(task_inner.res.kstack_top());
}
pub fn fork(self: &Arc<TaskControlBlock>) -> Arc<TaskControlBlock> {
// ---- hold parent PCB lock
let mut parent_inner = self.inner_exclusive_access();
// copy user space(include trap context)
let memory_set = MemorySet::from_existed_user(
&parent_inner.memory_set
);
let trap_cx_ppn = memory_set
.translate(VirtAddr::from(TRAP_CONTEXT).into())
.unwrap()
.ppn();
// alloc a pid and a kernel stack in kernel space
let pid_handle = pid_alloc();
let kernel_stack = KernelStack::new(&pid_handle);
let kernel_stack_top = kernel_stack.get_top();
/// Only support processes with a single thread.
pub fn fork(self: &Arc<Self>) -> Arc<Self> {
let mut parent = self.inner_exclusive_access();
assert_eq!(parent.thread_count(), 1);
// clone parent's memory_set completely including trampoline/ustacks/trap_cxs
let memory_set = MemorySet::from_existed_user(&parent.memory_set);
// alloc a pid
let pid = pid_alloc();
// copy fd table
let mut new_fd_table: Vec<Option<Arc<dyn File + Send + Sync>>> = Vec::new();
for fd in parent_inner.fd_table.iter() {
for fd in parent.fd_table.iter() {
if let Some(file) = fd {
new_fd_table.push(Some(file.clone()));
} else {
new_fd_table.push(None);
}
}
let task_control_block = Arc::new(TaskControlBlock {
pid: pid_handle,
kernel_stack,
inner: unsafe { UPSafeCell::new(TaskControlBlockInner {
trap_cx_ppn,
base_size: parent_inner.base_size,
task_cx: TaskContext::goto_trap_return(kernel_stack_top),
task_status: TaskStatus::Ready,
memory_set,
parent: Some(Arc::downgrade(self)),
children: Vec::new(),
exit_code: 0,
fd_table: new_fd_table,
})},
// create child process pcb
let child = Arc::new(Self {
pid,
inner: unsafe { UPSafeCell::new(ProcessControlBlockInner {
is_zombie: false,
memory_set,
parent: Some(Arc::downgrade(self)),
children: Vec::new(),
exit_code: 0,
fd_table: new_fd_table,
tasks: Vec::new(),
task_res_allocator: RecycleAllocator::new(),
})}
});
// add child
parent_inner.children.push(task_control_block.clone());
// modify kernel_sp in trap_cx
// **** access child PCB exclusively
let trap_cx = task_control_block.inner_exclusive_access().get_trap_cx();
trap_cx.kernel_sp = kernel_stack_top;
// return
task_control_block
// **** release child PCB
// ---- release parent PCB
parent.children.push(Arc::clone(&child));
// create main thread of child process
let task = Arc::new(TaskControlBlock::new(
Arc::clone(&child),
parent.get_task(0).inner_exclusive_access().res.ustack_base(),
// here we do not allocate trap_cx or ustack again
// but mention that we allocate a new kstack here
false,
));
// attach task to child process
let mut child_inner = child.inner_exclusive_access();
child_inner.tasks.push(Some(Arc::clone(&task)));
drop(child_inner);
// modify kstack_top in trap_cx of this thread
let task_inner = task.inner_exclusive_access();
let trap_cx = task_inner.get_trap_cx();
trap_cx.kernel_sp = task_inner.res.kstack_top();
drop(task_inner);
// add this thread to scheduler
add_task(task);
child
}
pub fn getpid(&self) -> usize {
self.pid.0
}
}

@ -1,4 +1,4 @@
use super::{TaskContext, TaskControlBlock};
use super::{TaskContext, TaskControlBlock, ProcessControlBlock};
use alloc::sync::Arc;
use lazy_static::*;
use super::{fetch_task, TaskStatus};
@ -55,6 +55,8 @@ pub fn run_tasks() {
next_task_cx_ptr,
);
}
} else {
println!("no tasks available in run_tasks");
}
}
}
@ -67,9 +69,13 @@ pub fn current_task() -> Option<Arc<TaskControlBlock>> {
PROCESSOR.exclusive_access().current()
}
pub fn current_process() -> Arc<ProcessControlBlock> {
current_task().unwrap().process.upgrade().unwrap()
}
pub fn current_user_token() -> usize {
let task = current_task().unwrap();
let token = task.inner_exclusive_access().get_user_token();
let token = task.get_user_token();
token
}
@ -77,6 +83,14 @@ pub fn current_trap_cx() -> &'static mut TrapContext {
current_task().unwrap().inner_exclusive_access().get_trap_cx()
}
pub fn current_trap_cx_user_va() -> usize {
current_task().unwrap().inner_exclusive_access().res.trap_cx_user_va()
}
pub fn current_kstack_top() -> usize {
current_task().unwrap().inner_exclusive_access().res.kstack_top()
}
pub fn schedule(switched_task_cx_ptr: *mut TaskContext) {
let mut processor = PROCESSOR.exclusive_access();
let idle_task_cx_ptr = processor.get_idle_task_cx_ptr();

@ -1,22 +1,35 @@
use alloc::sync::Arc;
use alloc::sync::{Arc, Weak};
use crate::{mm::PhysPageNum, sync::UPSafeCell};
use crate::trap::TrapContext;
use super::id::TaskUserRes;
use super::{
KernelStack,
ProcessControlBlock,
TaskContext
};
use core::cell::RefMut;
pub struct TaskControlBlock {
// immutable
pub tid: usize,
pub kstack: KernelStack,
pub process: Arc<ProcessControlBlock>,
pub process: Weak<ProcessControlBlock>,
// mutable
inner: UPSafeCell<TaskControlBlockInner>,
}
impl TaskControlBlock {
pub fn inner_exclusive_access(&self) -> RefMut<'_, TaskControlBlockInner> {
self.inner.exclusive_access()
}
pub fn get_user_token(&self) -> usize {
let process = self.process.upgrade().unwrap();
let inner = process.inner_exclusive_access();
inner.memory_set.token()
}
}
pub struct TaskControlBlockInner {
pub res: TaskUserRes,
pub trap_cx_ppn: PhysPageNum,
pub task_cx: TaskContext,
pub task_status: TaskStatus,
@ -27,11 +40,37 @@ impl TaskControlBlockInner {
pub fn get_trap_cx(&self) -> &'static mut TrapContext {
self.trap_cx_ppn.get_mut()
}
fn get_status(&self) -> TaskStatus {
self.task_status
}
}
impl TaskControlBlock {
pub fn new(
process: Arc<ProcessControlBlock>,
ustack_base: usize,
alloc_user_res: bool
) -> Self {
let res = TaskUserRes::new(Arc::clone(&process), ustack_base, alloc_user_res);
let trap_cx_ppn = res.trap_cx_ppn();
let kstack_top = res.kstack_top();
Self {
process: Arc::downgrade(&process),
inner: unsafe { UPSafeCell::new(
TaskControlBlockInner {
res,
trap_cx_ppn,
task_cx: TaskContext::goto_trap_return(kstack_top),
task_status: TaskStatus::Ready,
exit_code: 0,
}
)},
}
}
}
#[derive(Copy, Clone, PartialEq)]
pub enum TaskStatus {
Ready,

@ -18,9 +18,10 @@ use crate::task::{
suspend_current_and_run_next,
current_user_token,
current_trap_cx,
current_trap_cx_user_va,
};
use crate::timer::set_next_trigger;
use crate::config::{TRAP_CONTEXT, TRAMPOLINE};
use crate::config::TRAMPOLINE;
global_asm!(include_str!("trap.S"));
@ -46,6 +47,7 @@ pub fn enable_timer_interrupt() {
#[no_mangle]
pub fn trap_handler() -> ! {
println!("into trap!");
set_kernel_trap_entry();
let scause = scause::read();
let stval = stval::read();
@ -53,6 +55,7 @@ pub fn trap_handler() -> ! {
Trap::Exception(Exception::UserEnvCall) => {
// jump to next instruction anyway
let mut cx = current_trap_cx();
println!("syscall #{}", cx.x[17]);
cx.sepc += 4;
// get system call return value
let result = syscall(cx.x[17], [cx.x[10], cx.x[11], cx.x[12]]);
@ -88,15 +91,16 @@ pub fn trap_handler() -> ! {
panic!("Unsupported trap {:?}, stval = {:#x}!", scause.cause(), stval);
}
}
//println!("before trap_return");
trap_return();
}
#[no_mangle]
pub fn trap_return() -> ! {
println!("into trap_return!");
set_user_trap_entry();
let trap_cx_ptr = TRAP_CONTEXT;
let trap_cx_user_va = current_trap_cx_user_va();
let user_satp = current_user_token();
println!("trap_cx = {:#x}, user_satp = {:#x}", trap_cx_user_va, user_satp);
extern "C" {
fn __alltraps();
fn __restore();
@ -107,7 +111,7 @@ pub fn trap_return() -> ! {
"fence.i",
"jr {restore_va}",
restore_va = in(reg) restore_va,
in("a0") trap_cx_ptr,
in("a0") trap_cx_user_va,
in("a1") user_satp,
options(noreturn)
);

@ -13,6 +13,7 @@ use user_lib::{
#[no_mangle]
fn main() -> i32 {
println!("start initproc!");
if fork() == 0 {
exec("user_shell\0", &[0 as *const u8]);
} else {
@ -31,4 +32,4 @@ fn main() -> i32 {
}
}
0
}
}

Loading…
Cancel
Save