From 2002ddd5fa941a607e2cb783a4915f21fbbde172 Mon Sep 17 00:00:00 2001 From: chenqiuhao Date: Sat, 3 Nov 2018 21:45:03 +0800 Subject: [PATCH] move processor from wrj --- crate/bbl/src/sbi.rs | 69 +-- crate/process/Cargo.toml | 1 + crate/process/src/lib.rs | 11 +- crate/process/src/process_manager.rs | 196 +++++++++ crate/process/src/processor.rs | 439 +++----------------- crate/process/src/scheduler.rs | 76 ++-- crate/process/src/thread.rs | 260 +++++------- crate/riscv | 2 +- kernel/Cargo.lock | 1 + kernel/src/arch/riscv32/boot/entry.asm | 11 +- kernel/src/arch/riscv32/boot/trap.asm | 7 +- kernel/src/arch/riscv32/consts.rs | 14 + kernel/src/arch/riscv32/context.rs | 30 +- kernel/src/arch/riscv32/cpu.rs | 36 ++ kernel/src/arch/riscv32/interrupt.rs | 7 +- kernel/src/arch/riscv32/mod.rs | 30 +- kernel/src/arch/riscv32/paging.rs | 27 +- kernel/src/arch/x86_64/consts.rs | 97 +++++ kernel/src/arch/x86_64/cpu.rs | 22 + kernel/src/arch/x86_64/driver/ide.rs | 329 ++++----------- kernel/src/arch/x86_64/driver/mod.rs | 14 +- kernel/src/arch/x86_64/gdt.rs | 5 +- kernel/src/arch/x86_64/interrupt/handler.rs | 9 +- kernel/src/arch/x86_64/interrupt/mod.rs | 16 +- kernel/src/arch/x86_64/memory.rs | 13 +- kernel/src/arch/x86_64/mod.rs | 37 +- kernel/src/arch/x86_64/paging.rs | 23 +- kernel/src/consts.rs | 129 +----- kernel/src/fs.rs | 5 +- kernel/src/lang.rs | 3 +- kernel/src/lib.rs | 27 +- kernel/src/memory.rs | 25 +- kernel/src/process/context.rs | 112 +++-- kernel/src/process/mod.rs | 119 ++++-- kernel/src/smp.rs | 4 + kernel/src/sync/condvar.rs | 6 +- kernel/src/syscall.rs | 66 ++- kernel/src/trap.rs | 40 +- 38 files changed, 1061 insertions(+), 1257 deletions(-) create mode 100644 crate/process/src/process_manager.rs create mode 100644 kernel/src/arch/riscv32/consts.rs create mode 100644 kernel/src/arch/riscv32/cpu.rs create mode 100644 kernel/src/arch/x86_64/consts.rs create mode 100644 kernel/src/smp.rs diff --git a/crate/bbl/src/sbi.rs b/crate/bbl/src/sbi.rs index 87007cd..3942fc2 100644 --- a/crate/bbl/src/sbi.rs +++ b/crate/bbl/src/sbi.rs @@ -1,13 +1,5 @@ //! Port from sbi.h -//! -//! This code is used for OS to use hardware outside with calling these implements -/* -** @brief translate implement calling message to RISCV asm -** @param which: usize ecall type -** arg0, arg1, arg2: usize ecall args -** @retval ret: usize the result of the asm -*/ #[inline(always)] fn sbi_call(which: usize, arg0: usize, arg1: usize, arg2: usize) -> usize { let ret; @@ -21,38 +13,18 @@ fn sbi_call(which: usize, arg0: usize, arg1: usize, arg2: usize) -> usize { ret } -/* -** @brief output char to console -** @param ch: usize the char to output to console -** @retval none -*/ pub fn console_putchar(ch: usize) { sbi_call(SBI_CONSOLE_PUTCHAR, ch, 0, 0); } -/* -** @brief input char from console -** @param none -** @retval ch: usize the char get from console -*/ pub fn console_getchar() -> usize { sbi_call(SBI_CONSOLE_GETCHAR, 0, 0, 0) } -/* -** @brief call this function to shutdown -** @param none -** @retval none -*/ pub fn shutdown() { sbi_call(SBI_SHUTDOWN, 0, 0, 0); } -/* -** @brief set a timer when running -** @param stime_value: u64 time to be set -** @retval none -*/ pub fn set_timer(stime_value: u64) { #[cfg(target_pointer_width = "32")] sbi_call(SBI_SET_TIMER, stime_value as usize, (stime_value >> 32) as usize, 0); @@ -60,49 +32,24 @@ pub fn set_timer(stime_value: u64) { sbi_call(SBI_SET_TIMER, stime_value as usize, 0, 0); } -/* -** @brief clear the ipi -** @param none -** @retval none -*/ pub fn clear_ipi() { sbi_call(SBI_CLEAR_IPI, 0, 0, 0); } -/* -** @brief -** @param -** @retval none -*/ -pub fn send_ipi(hart_mask: *const usize) { - sbi_call(SBI_SEND_IPI, hart_mask as usize, 0, 0); +pub fn send_ipi(hart_mask: usize) { + sbi_call(SBI_SEND_IPI, &hart_mask as *const _ as usize, 0, 0); } -/* -** @brief -** @param -** @retval none -*/ -pub fn remote_fence_i(hart_mask: *const usize) { - sbi_call(SBI_REMOTE_FENCE_I, hart_mask as usize, 0, 0); +pub fn remote_fence_i(hart_mask: usize) { + sbi_call(SBI_REMOTE_FENCE_I, &hart_mask as *const _ as usize, 0, 0); } -/* -** @brief -** @param -** @retval none -*/ -pub fn remote_sfence_vma(hart_mask: *const usize, _start: usize, _size: usize) { - sbi_call(SBI_REMOTE_SFENCE_VMA, hart_mask as usize, 0, 0); +pub fn remote_sfence_vma(hart_mask: usize, _start: usize, _size: usize) { + sbi_call(SBI_REMOTE_SFENCE_VMA, &hart_mask as *const _ as usize, 0, 0); } -/* -** @brief -** @param -** @retval none -*/ -pub fn remote_sfence_vma_asid(hart_mask: *const usize, _start: usize, _size: usize, _asid: usize) { - sbi_call(SBI_REMOTE_SFENCE_VMA_ASID, hart_mask as usize, 0, 0); +pub fn remote_sfence_vma_asid(hart_mask: usize, _start: usize, _size: usize, _asid: usize) { + sbi_call(SBI_REMOTE_SFENCE_VMA_ASID, &hart_mask as *const _ as usize, 0, 0); } const SBI_SET_TIMER: usize = 0; diff --git a/crate/process/Cargo.toml b/crate/process/Cargo.toml index c73e482..72a3883 100644 --- a/crate/process/Cargo.toml +++ b/crate/process/Cargo.toml @@ -5,3 +5,4 @@ authors = ["WangRunji "] [dependencies] log = "0.4" +spin = "0.4" \ No newline at end of file diff --git a/crate/process/src/lib.rs b/crate/process/src/lib.rs index e3d2554..07c2e2c 100644 --- a/crate/process/src/lib.rs +++ b/crate/process/src/lib.rs @@ -1,18 +1,25 @@ #![no_std] #![feature(alloc)] #![feature(const_fn)] +#![feature(linkage)] +#![feature(nll)] +#![feature(vec_resize_default)] extern crate alloc; #[macro_use] extern crate log; +extern crate spin; // To use `println!` in test #[cfg(test)] #[macro_use] extern crate std; -pub mod processor; +mod process_manager; +mod processor; pub mod scheduler; pub mod thread; -mod util; mod event_hub; + +pub use process_manager::*; +pub use processor::Processor; diff --git a/crate/process/src/process_manager.rs b/crate/process/src/process_manager.rs new file mode 100644 index 0000000..beca345 --- /dev/null +++ b/crate/process/src/process_manager.rs @@ -0,0 +1,196 @@ +use alloc::boxed::Box; +use alloc::sync::Arc; +use spin::Mutex; +use scheduler::Scheduler; +use core::cell::UnsafeCell; +use alloc::vec::Vec; +use event_hub::EventHub; + +struct Process { + id: Pid, + status: Status, + status_after_stop: Status, + context: Option>, +} + +pub type Pid = usize; +type ExitCode = usize; + +#[derive(Debug, Clone, Eq, PartialEq)] +pub enum Status { + Ready, + Running(usize), + Sleeping, + Waiting(Pid), + /// aka ZOMBIE. Its context was dropped. + Exited(ExitCode), +} + +enum Event { + Wakeup(Pid), +} + +pub trait Context { + unsafe fn switch_to(&mut self, target: &mut Context); +} + +pub struct ProcessManager { + procs: Vec>>, + scheduler: Mutex>, + wait_queue: Vec>>, + event_hub: Mutex>, +} + +impl ProcessManager { + pub fn new(scheduler: Box, max_proc_num: usize) -> Self { + ProcessManager { + procs: { + let mut vec = Vec::new(); + vec.resize_default(max_proc_num); + vec + }, + scheduler: Mutex::new(scheduler), + wait_queue: { + let mut vec = Vec::new(); + vec.resize_default(max_proc_num); + vec + }, + event_hub: Mutex::new(EventHub::new()), + } + } + + fn alloc_pid(&self) -> Pid { + for (i, proc) in self.procs.iter().enumerate() { + if proc.lock().is_none() { + return i; + } + } + panic!("Process number exceeded"); + } + + /// Add a new process + pub fn add(&self, context: Box) -> Pid { + let pid = self.alloc_pid(); + *(&self.procs[pid]).lock() = Some(Process { + id: pid, + status: Status::Ready, + status_after_stop: Status::Ready, + context: Some(context), + }); + self.scheduler.lock().insert(pid); + pid + } + + /// Make process `pid` time slice -= 1. + /// Return true if time slice == 0. + /// Called by timer interrupt handler. + pub fn tick(&self, pid: Pid) -> bool { + let mut event_hub = self.event_hub.lock(); + event_hub.tick(); + while let Some(event) = event_hub.pop() { + match event { + Event::Wakeup(pid) => self.set_status(pid, Status::Ready), + } + } + self.scheduler.lock().tick(pid) + } + + /// Set the priority of process `pid` + pub fn set_priority(&self, pid: Pid, priority: u8) { + self.scheduler.lock().set_priority(pid, priority); + } + + /// Called by Processor to get a process to run. + /// The manager first mark it `Running`, + /// then take out and return its Context. + pub fn run(&self, cpu_id: usize) -> (Pid, Box) { + let mut scheduler = self.scheduler.lock(); + let pid = scheduler.select() + .expect("failed to select a runnable process"); + scheduler.remove(pid); + let mut proc_lock = self.procs[pid].lock(); + let mut proc = proc_lock.as_mut().unwrap(); + proc.status = Status::Running(cpu_id); + (pid, proc.context.take().unwrap()) + } + + /// Called by Processor to finish running a process + /// and give its context back. + pub fn stop(&self, pid: Pid, context: Box) { + let mut proc_lock = self.procs[pid].lock(); + let mut proc = proc_lock.as_mut().unwrap(); + proc.status = proc.status_after_stop.clone(); + proc.status_after_stop = Status::Ready; + proc.context = Some(context); + match proc.status { + Status::Ready => self.scheduler.lock().insert(pid), + Status::Exited(_) => proc.context = None, + _ => {} + } + } + + /// Switch the status of a process. + /// Insert/Remove it to/from scheduler if necessary. + fn set_status(&self, pid: Pid, status: Status) { + let mut scheduler = self.scheduler.lock(); + let mut proc_lock = self.procs[pid].lock(); + let mut proc = proc_lock.as_mut().unwrap(); + trace!("process {} {:?} -> {:?}", pid, proc.status, status); + match (&proc.status, &status) { + (Status::Ready, Status::Ready) => return, + (Status::Ready, _) => scheduler.remove(pid), + (Status::Running(_), _) => {}, + (Status::Exited(_), _) => panic!("can not set status for a exited process"), + (Status::Waiting(target), Status::Exited(_)) => + self.wait_queue[*target].lock().retain(|&i| i != pid), + // TODO: Sleep -> Exited Remove wakeup event. + (_, Status::Ready) => scheduler.insert(pid), + _ => {} + } + match proc.status { + Status::Running(_) => proc.status_after_stop = status, + _ => proc.status = status, + } + match proc.status { + Status::Exited(_) => proc.context = None, + _ => {} + } + } + + + pub fn get_status(&self, pid: Pid) -> Option { + self.procs[pid].lock().as_ref().map(|p| p.status.clone()) + } + + pub fn remove(&self, pid: Pid) { + let mut proc_lock = self.procs[pid].lock(); + let proc = proc_lock.as_ref().unwrap(); + match proc.status { + Status::Exited(_) => *proc_lock = None, + _ => panic!("can not remove non-exited process"), + } + } + + pub fn sleep(&self, pid: Pid, time: usize) { + self.set_status(pid, Status::Sleeping); + if time != 0 { + self.event_hub.lock().push(time, Event::Wakeup(pid)); + } + } + + pub fn wakeup(&self, pid: Pid) { + self.set_status(pid, Status::Ready); + } + + pub fn wait(&self, pid: Pid, target: Pid) { + self.set_status(pid, Status::Waiting(target)); + self.wait_queue[target].lock().push(pid); + } + + pub fn exit(&self, pid: Pid, code: ExitCode) { + self.set_status(pid, Status::Exited(code)); + for waiter in self.wait_queue[pid].lock().drain(..) { + self.wakeup(waiter); + } + } +} diff --git a/crate/process/src/processor.rs b/crate/process/src/processor.rs index a2358b7..c1432ca 100644 --- a/crate/process/src/processor.rs +++ b/crate/process/src/processor.rs @@ -1,396 +1,95 @@ -use alloc::{boxed::Box, collections::BTreeMap}; -use scheduler::*; -use event_hub::EventHub; -use util::GetMut2; -use core::fmt::Debug; - -#[derive(Debug)] -pub struct Process { - pid: Pid, - parent: Pid, - status: Status, - context: T, -} - -pub type Pid = usize; -pub type ErrorCode = usize; - -#[derive(Debug, Clone, Eq, PartialEq)] -pub enum Status { - Ready, - Running, - Waiting(Pid), - Sleeping, - Exited(ErrorCode), +use alloc::boxed::Box; +use alloc::sync::Arc; +use spin::Mutex; +use core::cell::UnsafeCell; +use process_manager::*; + +/// Process executor +/// +/// Per-CPU struct. Defined at global. +/// Only accessed by associated CPU with interrupt disabled. +#[derive(Default)] +pub struct Processor { + inner: UnsafeCell>, } -pub trait Context: Debug { - unsafe fn switch(&mut self, target: &mut Self); - fn new_kernel(entry: extern fn(usize) -> !, arg: usize) -> Self; -} +unsafe impl Sync for Processor {} -pub struct Processor_ { - procs: BTreeMap>, - current_pid: Pid, - event_hub: EventHub, - /// Choose what on next schedule ? - next: Option, - // WARNING: if MAX_PROCESS_NUM is too large, will cause stack overflow - scheduler: S, +struct ProcessorInner { + id: usize, + proc: Option<(Pid, Box)>, + loop_context: Box, + manager: Arc, } -impl Process { - fn exit_code(&self) -> Option { - match self.status { - Status::Exited(code) => Some(code), - _ => None, - } - } -} - -// TODO: 除schedule()外的其它函数,应该只设置进程状态,不应调用schedule -impl Processor_ { - - /* - ** @brief create a new Processor - ** @param init_context: T initiate context - ** scheduler: S the scheduler to use - ** @retval the Processor created - */ - pub fn new(init_context: T, scheduler: S) -> Self { - let init_proc = Process { - pid: 0, - parent: 0, - status: Status::Running, - context: init_context, - }; - Processor_ { - procs: { - let mut map = BTreeMap::>::new(); - map.insert(0, init_proc); - map - }, - current_pid: 0, - event_hub: EventHub::new(), - next: None, - scheduler, - } - } - - /* - ** @brief set the priority of current process - ** @param priority: u8 the priority to set - ** @retval none - */ - pub fn set_priority(&mut self, priority: u8) { - self.scheduler.set_priority(self.current_pid, priority); - } - - /* - ** @brief mark the current process to reschedule - ** @param none - ** @retval none - */ - pub fn set_reschedule(&mut self) { - let pid = self.current_pid; - self.set_status(pid, Status::Ready); - } - - /* - ** @brief allocate the pid of the process - ** @param none - ** @retval the pid allocated - */ - fn alloc_pid(&self) -> Pid { - let mut next: Pid = 0; - for &i in self.procs.keys() { - if i != next { - return next; - } else { - next = i + 1; - } - } - return next; - } - - /* - ** @brief set the status of the process - ** @param pid: Pid the pid of process which needs to be set - ** status: Status the status to be set - ** @retval none - */ - fn set_status(&mut self, pid: Pid, status: Status) { - let status0 = self.get(pid).status.clone(); - match (&status0, &status) { - (&Status::Ready, &Status::Ready) => return, - (&Status::Ready, _) => self.scheduler.remove(pid), - (_, &Status::Ready) => self.scheduler.insert(pid), - _ => {} - } - trace!("process {} {:?} -> {:?}", pid, status0, status); - self.get_mut(pid).status = status; - } - - /* - ** @brief Called by timer. - ** Handle events. - ** @param none - ** @retval none - */ - pub fn tick(&mut self) { - let current_pid = self.current_pid; - if self.scheduler.tick(current_pid) { - self.set_reschedule(); - } - self.event_hub.tick(); - while let Some(event) = self.event_hub.pop() { - debug!("event {:?}", event); - match event { - Event::Schedule => { - self.event_hub.push(10, Event::Schedule); - self.set_reschedule(); - }, - Event::Wakeup(pid) => { - self.set_status(pid, Status::Ready); - self.set_reschedule(); - self.next = Some(pid); - }, - } - } - } - - /* - ** @brief get now time - ** @param none - ** @retval the time got - */ - pub fn get_time(&self) -> usize { - self.event_hub.get_time() +impl Processor { + pub const fn new() -> Self { + Processor { inner: UnsafeCell::new(None) } } - /* - ** @brief add a new process - ** @param context: T the context fo the process - ** @retval the pid of new process - */ - pub fn add(&mut self, context: T) -> Pid { - let pid = self.alloc_pid(); - let process = Process { - pid, - parent: self.current_pid, - status: Status::Ready, - context, - }; - self.scheduler.insert(pid); - self.procs.insert(pid, process); - pid - } - - /* - ** @brief Called every interrupt end - ** Do schedule ONLY IF current status != Running - ** @param none - ** @retval none - */ - pub fn schedule(&mut self) { - if self.get(self.current_pid).status == Status::Running { - return; + pub unsafe fn init(&self, id: usize, context: Box, manager: Arc) { + unsafe { + *self.inner.get() = Some(ProcessorInner { + id, + proc: None, + loop_context: context, + manager, + }); } - let pid = self.next.take().unwrap_or_else(|| self.scheduler.select().unwrap()); - self.switch_to(pid); } - /* - ** @brief Switch process to `pid`, switch page table if necessary. - Store `rsp` and point it to target kernel stack. - The current status must be set before, and not be `Running`. - ** @param the pid of the process to switch - ** @retval none - */ - fn switch_to(&mut self, pid: Pid) { - // for debug print - let pid0 = self.current_pid; - - if pid == self.current_pid { - if self.get(self.current_pid).status != Status::Running { - self.set_status(pid, Status::Running); + fn inner(&self) -> &mut ProcessorInner { + unsafe { &mut *self.inner.get() }.as_mut() + .expect("Processor is not initialized") + } + + /// Begin running processes after CPU setup. + /// + /// This function never returns. It loops, doing: + /// - choose a process to run + /// - switch to start running that process + /// - eventually that process transfers control + /// via switch back to the scheduler. + pub fn run(&self) -> ! { + let inner = self.inner(); + loop { + let proc = inner.manager.run(inner.id); + trace!("CPU{} begin running process {}", inner.id, proc.0); + inner.proc = Some(proc); + unsafe { + inner.loop_context.switch_to(&mut *inner.proc.as_mut().unwrap().1); } - return; + let (pid, context) = inner.proc.take().unwrap(); + trace!("CPU{} stop running process {}", inner.id, pid); + inner.manager.stop(pid, context); } - self.current_pid = pid; - - let (from, to) = self.procs.get_mut2(pid0, pid); - - assert_ne!(from.status, Status::Running); - assert_eq!(to.status, Status::Ready); - to.status = Status::Running; - self.scheduler.remove(pid); - - //info!("switch from {} to {} {:x?}", pid0, pid, to.context); - unsafe { from.context.switch(&mut to.context); } - } - - /* - ** @brief get process by pid - ** @param pid: Pid the pid of the process - ** @retval the process struct - */ - fn get(&self, pid: Pid) -> &Process { - self.procs.get(&pid).unwrap() } - /* - ** @brief get mut process struct by pid - ** @param pid: Pid the pid of the process - ** @retval the mut process struct - */ - fn get_mut(&mut self, pid: Pid) -> &mut Process { - self.procs.get_mut(&pid).unwrap() - } - - /* - ** @brief get context of current process - ** @param none - ** @retval current context - */ - pub fn current_context(&self) -> &T { - &self.get(self.current_pid).context - } - - pub fn current_context_mut(&mut self) -> &mut T { - let id = self.current_pid; - &mut self.get_mut(id).context - } - - /* - ** @brief get pid of current process - ** @param none - ** @retval current pid - */ - pub fn current_pid(&self) -> Pid { - self.current_pid - } - - /* - ** @brief kill a process by pid - ** @param pid: Pid the pid of the process to kill - ** @retval none - */ - pub fn kill(&mut self, pid: Pid) { - self.exit(pid, 0x1000); // TODO: error code for killed - } - - /* - ** @brief exit a process by pid - ** @param pid: Pid the pid to exit - ** error_code: ErrorCode the error code when exiting - ** @retval none - */ - pub fn exit(&mut self, pid: Pid, error_code: ErrorCode) { - info!("{} exit, code: {}", pid, error_code); - self.set_status(pid, Status::Exited(error_code)); - if let Some(waiter) = self.find_waiter(pid) { - info!(" then wakeup {}", waiter); - self.set_status(waiter, Status::Ready); - self.next = Some(waiter); + /// Called by process running on this Processor. + /// Yield and reschedule. + pub fn yield_now(&self) { + let inner = self.inner(); + unsafe { + inner.proc.as_mut().unwrap().1.switch_to(&mut *inner.loop_context); } } - /* - ** @brief let a process to sleep for a while - ** @param pid: Pid the pid of the process to sleep - ** time: usize the time to sleep - ** @retval none - */ - pub fn sleep(&mut self, pid: Pid, time: usize) { - self.set_status(pid, Status::Sleeping); - self.event_hub.push(time, Event::Wakeup(pid)); + pub fn pid(&self) -> Pid { + self.inner().proc.as_ref().unwrap().0 } - /* - ** @brief let a process to sleep until wake up - ** @param pid: Pid the pid of the process to sleep - ** @retval none - */ - pub fn sleep_(&mut self, pid: Pid) { - self.set_status(pid, Status::Sleeping); + pub fn context(&self) -> &Context { + &*self.inner().proc.as_ref().unwrap().1 } - /* - ** @brief wake up al sleeping process - ** @param pid: Pid the pid of the process to wake up - ** @retval none - */ - pub fn wakeup_(&mut self, pid: Pid) { - self.set_status(pid, Status::Ready); + pub fn manager(&self) -> &ProcessManager { + &*self.inner().manager } - /* - ** @brief Let current process wait for another - ** @param pid: Pid the pid of the process to wait for - ** @retval the result of wait - */ - pub fn current_wait_for(&mut self, pid: Pid) -> WaitResult { - info!("current {} wait for {:?}", self.current_pid, pid); - if self.procs.values().filter(|&p| p.parent == self.current_pid).next().is_none() { - return WaitResult::NotExist; + pub fn tick(&self) { + let need_reschedule = self.manager().tick(self.pid()); + if need_reschedule { + self.yield_now(); } - let pid = self.try_wait(pid).unwrap_or_else(|| { - let current_pid = self.current_pid; - self.set_status(current_pid, Status::Waiting(pid)); - self.schedule(); // yield - self.try_wait(pid).unwrap() - }); - let exit_code = self.get(pid).exit_code().unwrap(); - info!("{} wait end and remove {}", self.current_pid, pid); - self.procs.remove(&pid); - WaitResult::Ok(pid, exit_code) - } - - /* - ** @brief Try to find a exited wait target - ** @param pid: Pid the pid of the process to wait for - ** @retval the pid found or none - */ - fn try_wait(&mut self, pid: Pid) -> Option { - match pid { - 0 => self.procs.values() - .find(|&p| p.parent == self.current_pid && p.exit_code().is_some()) - .map(|p| p.pid), - _ => self.get(pid).exit_code().map(|_| pid), - } - } - - /* - ** @brief find one process which is waiting for the input process - ** @param pid: Pid the pid of the target process - ** @retval the pid of the waiting process or none - */ - fn find_waiter(&self, pid: Pid) -> Option { - self.procs.values().find(|&p| { - p.status == Status::Waiting(pid) || - (p.status == Status::Waiting(0) && self.get(pid).parent == p.pid) - }).map(|ref p| p.pid) } } - -#[derive(Debug)] -pub enum WaitResult { - /// The target process is exited with `ErrorCode`. - Ok(Pid, ErrorCode), - /// The target process is not exist. - NotExist, -} - -#[derive(Debug)] -enum Event { - Schedule, - Wakeup(Pid), -} - -impl GetMut2 for BTreeMap> { - type Output = Process; - fn get_mut(&mut self, id: Pid) -> &mut Process { - self.get_mut(&id).unwrap() - } -} \ No newline at end of file diff --git a/crate/process/src/scheduler.rs b/crate/process/src/scheduler.rs index cfc42a4..9ab52fa 100644 --- a/crate/process/src/scheduler.rs +++ b/crate/process/src/scheduler.rs @@ -2,51 +2,19 @@ use alloc::{collections::BinaryHeap, vec::Vec}; type Pid = usize; - -// implements of process scheduler +/// pub trait Scheduler { - - /* - ** @brief add a new process - ** @param pid: Pid the pid of the process to add - ** @retval none - */ fn insert(&mut self, pid: Pid); - - /* - ** @brief remove a processs from the list - ** @param pid: Pid the pid of the process to remove - ** @retval none - */ fn remove(&mut self, pid: Pid); - - /* - ** @brief choose a process to run next - ** @param none - ** @retval Option the pid of the process to run or none - */ fn select(&mut self) -> Option; - - /* - ** @brief when a clock interrupt occurs, update the list and check whether need to reschedule - ** @param current: Pid the pid of the process which is running now - ** @retval bool if need to reschedule - */ fn tick(&mut self, current: Pid) -> bool; // need reschedule? - - /* - ** @brief set the priority of the process - ** @param pid: Pid the pid of the process to be set - ** priority: u8 the priority to be set - ** @retval none - */ fn set_priority(&mut self, pid: Pid, priority: u8); + fn move_to_head(&mut self, pid: Pid); } pub use self::rr::RRScheduler; pub use self::stride::StrideScheduler; -// use round-robin scheduling mod rr { use super::*; @@ -112,6 +80,14 @@ mod rr { fn set_priority(&mut self, pid: usize, priority: u8) { } + + fn move_to_head(&mut self, pid: usize) { + let pid = pid + 1; + assert!(self.infos[pid].present); + self._list_remove(pid); + self._list_add_after(pid, 0); + trace!("rr move_to_head {}", pid - 1); + } } impl RRScheduler { @@ -128,6 +104,10 @@ mod rr { self.infos[prev].next = i; self.infos[at].prev = i; } + fn _list_add_after(&mut self, i: Pid, at: Pid) { + let next = self.infos[at].next; + self._list_add_before(i, next); + } fn _list_remove(&mut self, i: Pid) { let next = self.infos[i].next; let prev = self.infos[i].prev; @@ -139,7 +119,6 @@ mod rr { } } -// use stride scheduling mod stride { use super::*; @@ -190,13 +169,17 @@ mod stride { let info = &mut self.infos[pid]; assert!(info.present); info.present = false; - // BinaryHeap only support pop the top. - // So in order to remove an arbitrary element, - // we have to take all elements into a Vec, - // then push the rest back. - let rest: Vec<_> = self.queue.drain().filter(|&p| p.1 != pid).collect(); - use core::iter::FromIterator; - self.queue = BinaryHeap::from_iter(rest.into_iter()); + if self.queue.peek().is_some() && self.queue.peek().unwrap().1 == pid { + self.queue.pop(); + } else { + // BinaryHeap only support pop the top. + // So in order to remove an arbitrary element, + // we have to take all elements into a Vec, + // then push the rest back. + let rest: Vec<_> = self.queue.drain().filter(|&p| p.1 != pid).collect(); + use core::iter::FromIterator; + self.queue = BinaryHeap::from_iter(rest.into_iter()); + } trace!("stride remove {}", pid); } @@ -229,6 +212,15 @@ mod stride { self.infos[pid].priority = priority; trace!("stride {} priority = {}", pid, priority); } + + fn move_to_head(&mut self, pid: Pid) { + if self.queue.peek().is_some() { + let stride = -self.queue.peek().unwrap().0; + self.remove(pid); + self.infos[pid].stride = stride; + self.insert(pid); + } + } } impl StrideScheduler { diff --git a/crate/process/src/thread.rs b/crate/process/src/thread.rs index 3e45c58..1c8b1ad 100644 --- a/crate/process/src/thread.rs +++ b/crate/process/src/thread.rs @@ -1,199 +1,165 @@ //! Thread std-like interface //! -//! Based on Processor. -//! Used in the kernel. +//! Based on Processor. Used in kernel. //! -//! # Example -//! -//! ``` -//! // Define a support implementation struct -//! pub struct ThreadSupportImpl; -//! -//! // Impl `ThreadSupport` trait -//! impl ThreadSupport for ThreadSupportImpl { ... } -//! -//! // Export the full struct as `thread`. -//! #[allow(non_camel_case_types)] -//! pub type thread = ThreadMod; -//! ``` -//! -//! ``` -//! // Use it just like `std::thread` -//! use thread; -//! let t = thread::current(); -//! -//! // But the other struct is not available ... -//! let t: thread::Thread; // ERROR! -//! ``` +//! You need to implement the following functions before use: +//! - `processor`: Get a reference of the current `Processor` +//! - `new_kernel_context`: Construct a `Context` of the new kernel thread use alloc::boxed::Box; use alloc::collections::BTreeMap; -use core::any::Any; use core::marker::PhantomData; use core::ptr; use core::time::Duration; -use core::ops::DerefMut; use processor::*; +use process_manager::*; use scheduler::Scheduler; -/// All dependencies for thread mod. -pub trait ThreadSupport { - type Context: Context; - type Scheduler: Scheduler; - type ProcessorGuard: DerefMut>; - fn processor() -> Self::ProcessorGuard; +#[linkage = "weak"] +#[no_mangle] +/// Get a reference of the current `Processor` +fn processor() -> &'static Processor { + unimplemented!("thread: Please implement and export `processor`") } -/// Root structure served as thread mod -pub struct ThreadMod { - mark: PhantomData +#[linkage = "weak"] +#[no_mangle] +/// Construct a `Context` of the new kernel thread +fn new_kernel_context(entry: extern fn(usize) -> !, arg: usize) -> Box { + unimplemented!("thread: Please implement and export `new_kernel_context`") } -impl ThreadMod { - /* - ** @brief Gets a handle to the thread that invokes it. - ** @param none - ** @retval the thread to get - */ - pub fn current() -> Thread { - Thread { - pid: S::processor().current_pid(), - mark: PhantomData, - } + +/// Gets a handle to the thread that invokes it. +pub fn current() -> Thread { + Thread { + pid: processor().pid(), } +} - /* - ** @brief Puts the current thread to sleep for the specified amount of time. - ** @param dur: Duration the time to sleep - ** @retval none - */ - pub fn sleep(dur: Duration) { - let time = dur_to_ticks(dur); - info!("sleep: {:?} ticks", time); - let mut processor = S::processor(); - let pid = processor.current_pid(); - processor.sleep(pid, time); - processor.schedule(); +/// Puts the current thread to sleep for the specified amount of time. +pub fn sleep(dur: Duration) { + let time = dur_to_ticks(dur); + info!("sleep: {:?} ticks", time); + processor().manager().sleep(current().id(), time); + park(); - fn dur_to_ticks(dur: Duration) -> usize { - return dur.as_secs() as usize * 100 + dur.subsec_nanos() as usize / 10_000_000; - } + fn dur_to_ticks(dur: Duration) -> usize { + return dur.as_secs() as usize * 100 + dur.subsec_nanos() as usize / 10_000_000; } +} + +/// Spawns a new thread, returning a JoinHandle for it. +/// +/// `F`: Type of the function `f` +/// `T`: Type of the return value of `f` +pub fn spawn(f: F) -> JoinHandle + where + F: Send + 'static + FnOnce() -> T, + T: Send + 'static, +{ + info!("spawn:"); - /* - ** @brief Spawns a new thread, returning a JoinHandle for it. - ** @param f: F the thread to start - ** @retval JoinHandle the JoinHandle of the new thread - */ - pub fn spawn(f: F) -> JoinHandle + // 注意到下面的问题: + // Processor只能从入口地址entry+参数arg创建新线程 + // 而我们现在需要让它执行一个未知类型的(闭包)函数f + + // 首先把函数本体(代码数据)置于堆空间中 + let f = Box::into_raw(Box::new(f)); + + // 定义一个静态函数作为新线程的入口点 + // 其参数是函数f在堆上的指针 + // 这样我们就把函数f传到了一个静态函数内部 + // + // 注意到它具有泛型参数,因此对每一次spawn调用, + // 由于F类型是独特的,因此都会生成一个新的kernel_thread_entry + extern fn kernel_thread_entry(f: usize) -> ! where F: Send + 'static + FnOnce() -> T, T: Send + 'static, { - info!("spawn:"); - let f = Box::into_raw(Box::new(f)); - let pid = S::processor().add(Context::new_kernel(kernel_thread_entry::, f as usize)); - return JoinHandle { - thread: Thread { pid, mark: PhantomData }, - mark: PhantomData, - }; - - extern fn kernel_thread_entry(f: usize) -> ! - where - S: ThreadSupport, - F: Send + 'static + FnOnce() -> T, - T: Send + 'static, - { - let f = unsafe { Box::from_raw(f as *mut F) }; - let ret = Box::new(f()); -// unsafe { LocalKey::::get_map() }.clear(); - let mut processor = S::processor(); - let pid = processor.current_pid(); - processor.exit(pid, Box::into_raw(ret) as usize); - processor.schedule(); - unreachable!() - } + // 在静态函数内部: + // 根据传进来的指针,恢复f + let f = unsafe { Box::from_raw(f as *mut F) }; + // 调用f,并将其返回值也放在堆上 + let ret = Box::new(f()); + // 清理本地线程存储 + // unsafe { LocalKey::::get_map() }.clear(); + // 让Processor退出当前线程 + // 把f返回值在堆上的指针,以线程返回码的形式传递出去 + let exit_code = Box::into_raw(ret) as usize; + processor().manager().exit(current().id(), exit_code); + processor().yield_now(); + // 再也不会被调度回来了 + unreachable!() } - /* - ** @brief Cooperatively gives up a timeslice to the OS scheduler. - ** @param none - ** @retval none - */ - pub fn yield_now() { - info!("yield:"); - let mut processor = S::processor(); - processor.set_reschedule(); - processor.schedule(); - } + // 在Processor中创建新的线程 + let context = new_kernel_context(kernel_thread_entry::, f as usize); + let pid = processor().manager().add(context); - /* - ** @brief Blocks unless or until the current thread's token is made available. - ** @param none - ** @retval none - */ - pub fn park() { - info!("park:"); - let mut processor = S::processor(); - let pid = processor.current_pid(); - processor.sleep_(pid); - processor.schedule(); - } + // 接下来看看`JoinHandle::join()`的实现 + // 了解是如何获取f返回值的 + return JoinHandle { + thread: Thread { pid }, + mark: PhantomData, + }; +} + +/// Cooperatively gives up a timeslice to the OS scheduler. +pub fn yield_now() { + info!("yield:"); + processor().yield_now(); +} + +/// Blocks unless or until the current thread's token is made available. +pub fn park() { + info!("park:"); + processor().manager().sleep(current().id(), 0); + processor().yield_now(); } /// A handle to a thread. -pub struct Thread { +pub struct Thread { pid: usize, - mark: PhantomData, } -impl Thread { - /* - ** @brief Atomically makes the handle's token available if it is not already. - ** @param none - ** @retval none - */ +impl Thread { + /// Atomically makes the handle's token available if it is not already. pub fn unpark(&self) { - let mut processor = S::processor(); - processor.wakeup_(self.pid); + processor().manager().wakeup(self.pid); } - /* - ** @brief Gets the thread's unique identifier. - ** @param none - ** @retval usize the the thread's unique identifier - */ + /// Gets the thread's unique identifier. pub fn id(&self) -> usize { self.pid } } /// An owned permission to join on a thread (block on its termination). -pub struct JoinHandle { - thread: Thread, +pub struct JoinHandle { + thread: Thread, mark: PhantomData, } -impl JoinHandle { - /* - ** @brief Extracts a handle to the underlying thread. - ** @param none - ** @retval the thread of the handle - */ - pub fn thread(&self) -> &Thread { +impl JoinHandle { + /// Extracts a handle to the underlying thread. + pub fn thread(&self) -> &Thread { &self.thread } - /* - ** @brief Waits for the associated thread to finish. - ** @param none - ** @retval Result the result of the associated thread - */ + /// Waits for the associated thread to finish. pub fn join(self) -> Result { - let mut processor = S::processor(); - match processor.current_wait_for(self.thread.pid) { - WaitResult::Ok(_, exit_code) => unsafe { - Ok(*Box::from_raw(exit_code as *mut T)) + loop { + match processor().manager().get_status(self.thread.pid) { + Some(Status::Exited(exit_code)) => { + processor().manager().remove(self.thread.pid); + // Find return value on the heap from the exit code. + return Ok(unsafe { *Box::from_raw(exit_code as *mut T) }); + } + None => return Err(()), + _ => {} } - WaitResult::NotExist => Err(()), + processor().manager().wait(current().id(), self.thread.pid); + processor().yield_now(); } } } diff --git a/crate/riscv b/crate/riscv index a37a65f..f358204 160000 --- a/crate/riscv +++ b/crate/riscv @@ -1 +1 @@ -Subproject commit a37a65fa13a00c5aa0068c3f2b5d55af6a37dd93 +Subproject commit f358204af01f2374ab6ed6ea059f724cd5f2fe6f diff --git a/kernel/Cargo.lock b/kernel/Cargo.lock index 6c43a1a..d798497 100644 --- a/kernel/Cargo.lock +++ b/kernel/Cargo.lock @@ -244,6 +244,7 @@ name = "ucore-process" version = "0.1.0" dependencies = [ "log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", + "spin 0.4.9 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] diff --git a/kernel/src/arch/riscv32/boot/entry.asm b/kernel/src/arch/riscv32/boot/entry.asm index 8bb9268..0485f0e 100644 --- a/kernel/src/arch/riscv32/boot/entry.asm +++ b/kernel/src/arch/riscv32/boot/entry.asm @@ -1,14 +1,19 @@ .section .text.entry .globl _start _start: - lui sp, %hi(bootstacktop) - addi sp, sp, %lo(bootstacktop) + add t0, a0, 1 + slli t0, t0, 16 + + lui sp, %hi(bootstack) + addi sp, sp, %lo(bootstack) + add sp, sp, t0 + call rust_main .section .bss .align 12 #PGSHIFT .global bootstack bootstack: - .space 4096 * 16 #KSTACKSIZE + .space 4096 * 16 * 8 .global bootstacktop bootstacktop: diff --git a/kernel/src/arch/riscv32/boot/trap.asm b/kernel/src/arch/riscv32/boot/trap.asm index d400826..bf4aba5 100644 --- a/kernel/src/arch/riscv32/boot/trap.asm +++ b/kernel/src/arch/riscv32/boot/trap.asm @@ -13,7 +13,8 @@ _save_context: # save x registers except x2 (sp) sw x1, 1*4(sp) sw x3, 3*4(sp) - sw x4, 4*4(sp) + # tp(x4) = hartid. DON'T change. + # sw x4, 4*4(sp) sw x5, 5*4(sp) sw x6, 6*4(sp) sw x7, 7*4(sp) @@ -61,7 +62,7 @@ _save_context: lw s1, 32*4(sp) # s1 = sstatus lw s2, 33*4(sp) # s2 = sepc andi s0, s1, 1 << 8 - bnez s0, _restore_context # back to U-mode? (sstatus.SPP = 1) + bnez s0, _restore_context # back to S-mode? (sstatus.SPP = 1) _save_kernel_sp: addi s0, sp, 36*4 csrw 0x140, s0 # sscratch = kernel-sp @@ -73,7 +74,7 @@ _restore_context: # restore x registers except x2 (sp) lw x1, 1*4(sp) lw x3, 3*4(sp) - lw x4, 4*4(sp) + # lw x4, 4*4(sp) lw x5, 5*4(sp) lw x6, 6*4(sp) lw x7, 7*4(sp) diff --git a/kernel/src/arch/riscv32/consts.rs b/kernel/src/arch/riscv32/consts.rs new file mode 100644 index 0000000..d60b6e2 --- /dev/null +++ b/kernel/src/arch/riscv32/consts.rs @@ -0,0 +1,14 @@ +// Physical address available on THINPAD: +// [0x80000000, 0x80800000] +const P2_SIZE: usize = 1 << 22; +const P2_MASK: usize = 0x3ff << 22; +pub const RECURSIVE_INDEX: usize = 0x3fe; +pub const KERNEL_OFFSET: usize = 0; +pub const KERNEL_P2_INDEX: usize = 0x8000_0000 >> 22; +pub const KERNEL_HEAP_OFFSET: usize = 0x8020_0000; +pub const KERNEL_HEAP_SIZE: usize = 0x0020_0000; +pub const MEMORY_OFFSET: usize = 0x8000_0000; +pub const MEMORY_END: usize = 0x8080_0000; +pub const USER_STACK_OFFSET: usize = 0x70000000; +pub const USER_STACK_SIZE: usize = 0x10000; +pub const USER32_STACK_OFFSET: usize = USER_STACK_OFFSET; \ No newline at end of file diff --git a/kernel/src/arch/riscv32/context.rs b/kernel/src/arch/riscv32/context.rs index 0dc73e8..1486e9f 100644 --- a/kernel/src/arch/riscv32/context.rs +++ b/kernel/src/arch/riscv32/context.rs @@ -1,6 +1,6 @@ use super::super::riscv::register::*; -#[derive(Debug, Clone)] +#[derive(Clone)] #[repr(C)] pub struct TrapFrame { pub x: [usize; 32], // general registers @@ -53,9 +53,8 @@ impl TrapFrame { tf.x[2] = sp; tf.sepc = entry_addr; tf.sstatus = sstatus::read(); - // Supervisor Previous Interrupt Disable ? - tf.sstatus.set_spie(false); // Enable interrupt - // Supervisor Previous Privilege Mode is User + tf.sstatus.set_spie(true); + tf.sstatus.set_sie(false); tf.sstatus.set_spp(sstatus::SPP::User); tf } @@ -65,6 +64,29 @@ impl TrapFrame { } } +use core::fmt::{Debug, Formatter, Error}; +impl Debug for TrapFrame { + fn fmt(&self, f: &mut Formatter) -> Result<(), Error> { + struct Regs<'a>(&'a [usize; 32]); + impl<'a> Debug for Regs<'a> { + fn fmt(&self, f: &mut Formatter) -> Result<(), Error> { + const REG_NAME: [&str; 32] = [ + "zero", "ra", "sp", "gp", "tp", "t0", "t1", "t2", + "s0", "s1", "a0", "a1", "a2", "a3", "a4", "a5", "a6", "a7", + "s2", "s3", "s4", "s5", "s6", "s7", "s8", "s9", "s10", "s11", + "t3", "t4", "t5", "t6"]; + f.debug_map().entries(REG_NAME.iter().zip(self.0)).finish() + } + } + f.debug_struct("TrapFrame") + .field("regs", &Regs(&self.x)) + .field("sstatus", &self.sstatus) + .field("sepc", &self.sepc) + .field("sbadaddr", &self.sbadaddr) + .field("scause", &self.scause) + .finish() + } +} /// kernel stack contents for a new thread #[derive(Debug)] #[repr(C)] diff --git a/kernel/src/arch/riscv32/cpu.rs b/kernel/src/arch/riscv32/cpu.rs new file mode 100644 index 0000000..fedd853 --- /dev/null +++ b/kernel/src/arch/riscv32/cpu.rs @@ -0,0 +1,36 @@ +use consts::MAX_CPU_NUM; +use core::ptr::{read_volatile, write_volatile}; +use memory::*; + +static mut STARTED: [bool; MAX_CPU_NUM] = [false; MAX_CPU_NUM]; + +pub unsafe fn set_cpu_id(cpu_id: usize) { + asm!("mv tp, $0" : : "r"(cpu_id)); +} + +pub fn id() -> usize { + let cpu_id; + unsafe { asm!("mv $0, tp" : "=r"(cpu_id)); } + cpu_id +} + +pub fn send_ipi(cpu_id: usize) { + super::bbl::sbi::send_ipi(1 << cpu_id); +} + +pub unsafe fn has_started(cpu_id: usize) -> bool { + read_volatile(&STARTED[cpu_id]) +} + +pub unsafe fn start_others(hart_mask: usize) { + for cpu_id in 0..MAX_CPU_NUM { + if (hart_mask >> cpu_id) & 1 != 0 { + write_volatile(&mut STARTED[cpu_id], true); + } + } +} + +pub fn halt() { + use super::riscv::asm::wfi; + unsafe { wfi() } +} \ No newline at end of file diff --git a/kernel/src/arch/riscv32/interrupt.rs b/kernel/src/arch/riscv32/interrupt.rs index 7c781e1..21f96d3 100644 --- a/kernel/src/arch/riscv32/interrupt.rs +++ b/kernel/src/arch/riscv32/interrupt.rs @@ -18,6 +18,8 @@ pub fn init() { sscratch::write(0); // Set the exception vector address stvec::write(__alltraps as usize, stvec::TrapMode::Direct); + // Enable IPI + sie::set_ssoft(); } info!("interrupt: init end"); } @@ -77,10 +79,13 @@ pub extern fn rust_trap(tf: &mut TrapFrame) { Trap::Exception(E::InstructionPageFault) => page_fault(tf), _ => ::trap::error(tf), } - ::trap::before_return(); trace!("Interrupt end"); } +fn ipi() { + debug!("IPI"); + super::bbl::sbi::clear_ipi(); +} /* * @brief: * process timer interrupt diff --git a/kernel/src/arch/riscv32/mod.rs b/kernel/src/arch/riscv32/mod.rs index 140c712..de43a25 100644 --- a/kernel/src/arch/riscv32/mod.rs +++ b/kernel/src/arch/riscv32/mod.rs @@ -7,19 +7,35 @@ pub mod timer; pub mod paging; pub mod memory; pub mod compiler_rt; +pub mod consts; +pub mod cpu; #[no_mangle] -pub extern fn rust_main() -> ! { - println!("Hello RISCV! {}", 123); - // First init log mod, so that we can print log info. +pub extern fn rust_main(hartid: usize, dtb: usize, hart_mask: usize) -> ! { + unsafe { cpu::set_cpu_id(hartid); } + println!("Hello RISCV! in hart {}, {}, {}", hartid, dtb, hart_mask); + + if hartid != 0 { + while unsafe { !cpu::has_started(hartid) } { } + others_main(); + unreachable!(); + } + ::logging::init(); - // Init interrupt handling. interrupt::init(); - // Init physical memory management and heap memory::init(); - // Init timer interrupt timer::init(); - + + ::process::init(); + ::thread::spawn(::fs::shell); + + unsafe { cpu::start_others(hart_mask); } + ::kmain(); +} + +fn others_main() -> ! { + interrupt::init(); + timer::init(); ::kmain(); } diff --git a/kernel/src/arch/riscv32/paging.rs b/kernel/src/arch/riscv32/paging.rs index d9d525c..de81266 100644 --- a/kernel/src/arch/riscv32/paging.rs +++ b/kernel/src/arch/riscv32/paging.rs @@ -1,4 +1,4 @@ -use consts::{KERNEL_PML4, RECURSIVE_PAGE_PML4}; +use consts::{KERNEL_P2_INDEX, RECURSIVE_INDEX}; // Depends on kernel use memory::{active_table, alloc_frame, alloc_stack, dealloc_frame}; use super::riscv::addr::*; @@ -20,14 +20,14 @@ use ucore_memory::paging::*; pub fn setup_page_table(frame: Frame) { let p2 = unsafe { &mut *(frame.start_address().as_u32() as *mut RvPageTable) }; p2.zero(); - p2.set_recursive(RECURSIVE_PAGE_PML4, frame.clone()); + p2.set_recursive(RECURSIVE_INDEX, frame.clone()); // Set kernel identity map // 0x10000000 ~ 1K area p2.map_identity(0x40, EF::VALID | EF::READABLE | EF::WRITABLE); // 0x80000000 ~ 8K area - p2.map_identity(KERNEL_PML4, EF::VALID | EF::READABLE | EF::WRITABLE | EF::EXECUTABLE); - p2.map_identity(KERNEL_PML4 + 1, EF::VALID | EF::READABLE | EF::WRITABLE | EF::EXECUTABLE); + p2.map_identity(KERNEL_P2_INDEX, EF::VALID | EF::READABLE | EF::WRITABLE | EF::EXECUTABLE); + p2.map_identity(KERNEL_P2_INDEX + 1, EF::VALID | EF::READABLE | EF::WRITABLE | EF::EXECUTABLE); use super::riscv::register::satp; unsafe { satp::set(satp::Mode::Sv32, 0, frame); } @@ -88,7 +88,7 @@ impl PageTable for ActivePageTable { let page = Page::of_addr(VirtAddr::new(addr)); // ??? let _ = self.0.translate_page(page); - let entry_addr = ((addr >> 10) & 0x003ffffc) | (RECURSIVE_PAGE_PML4 << 22); + let entry_addr = ((addr >> 10) & ((1 << 22) - 4)) | (RECURSIVE_INDEX << 22); unsafe { &mut *(entry_addr as *mut PageEntry) } } @@ -130,7 +130,7 @@ impl PageTable for ActivePageTable { // define the ROOT_PAGE_TABLE, and the virtual address of it? const ROOT_PAGE_TABLE: *mut RvPageTable = - (((RECURSIVE_PAGE_PML4 << 10) | (RECURSIVE_PAGE_PML4 + 1)) << 12) as *mut RvPageTable; + (((RECURSIVE_INDEX << 10) | (RECURSIVE_INDEX + 1)) << 12) as *mut RvPageTable; impl ActivePageTable { pub unsafe fn new() -> Self { @@ -185,7 +185,6 @@ impl Entry for PageEntry { flags.set(EF::RESERVED2, !writable); } fn clear_shared(&mut self) { self.as_flags().remove(EF::RESERVED1 | EF::RESERVED2); } - // valid property must be 0 used when swapped fn swapped(&self) -> bool { self.0.flags().contains(EF::RESERVED1) } fn set_swapped(&mut self, value: bool) { self.as_flags().set(EF::RESERVED1, value); } fn user(&self) -> bool { self.0.flags().contains(EF::USER) } @@ -231,7 +230,7 @@ impl InactivePageTable for InactivePageTable0 { .expect("failed to allocate frame"); active_table().with_temporary_map(&frame, |_, table: &mut RvPageTable| { table.zero(); - table.set_recursive(RECURSIVE_PAGE_PML4, frame.clone()); + table.set_recursive(RECURSIVE_INDEX, frame.clone()); }); InactivePageTable0 { p2_frame: frame } } @@ -244,17 +243,17 @@ impl InactivePageTable for InactivePageTable0 { */ fn edit(&mut self, f: impl FnOnce(&mut Self::Active)) { active_table().with_temporary_map(&satp::read().frame(), |active_table, p2_table: &mut RvPageTable| { - let backup = p2_table[RECURSIVE_PAGE_PML4].clone(); + let backup = p2_table[RECURSIVE_INDEX].clone(); // overwrite recursive mapping - p2_table[RECURSIVE_PAGE_PML4].set(self.p2_frame.clone(), EF::VALID); + p2_table[RECURSIVE_INDEX].set(self.p2_frame.clone(), EF::VALID); sfence_vma_all(); // execute f in the new context f(active_table); - // restore recursive mapping to original p4 table - p2_table[RECURSIVE_PAGE_PML4] = backup; + // restore recursive mapping to original p2 table + p2_table[RECURSIVE_INDEX] = backup; sfence_vma_all(); }); } @@ -351,12 +350,12 @@ impl InactivePageTable0 { fn map_kernel(&mut self) { let table = unsafe { &mut *ROOT_PAGE_TABLE }; let e0 = table[0x40]; - let e1 = table[KERNEL_PML4]; + let e1 = table[KERNEL_P2_INDEX]; assert!(!e1.is_unused()); self.edit(|_| { table[0x40] = e0; - table[KERNEL_PML4].set(e1.frame(), EF::VALID | EF::GLOBAL); + table[KERNEL_P2_INDEX].set(e1.frame(), EF::VALID | EF::GLOBAL); }); } } diff --git a/kernel/src/arch/x86_64/consts.rs b/kernel/src/arch/x86_64/consts.rs new file mode 100644 index 0000000..c2426c5 --- /dev/null +++ b/kernel/src/arch/x86_64/consts.rs @@ -0,0 +1,97 @@ +// Copy from Redox consts.rs: + +// Because the memory map is so important to not be aliased, it is defined here, in one place +// The lower 256 PML4 entries are reserved for userspace +// Each PML4 entry references up to 512 GB of memory +// The top (511) PML4 is reserved for recursive mapping +// The second from the top (510) PML4 is reserved for the kernel +/// The size of a single PML4 +pub const PML4_SIZE: usize = 0x0000_0080_0000_0000; +pub const PML4_MASK: usize = 0x0000_ff80_0000_0000; + +/// Offset of recursive paging +pub const RECURSIVE_PAGE_OFFSET: usize = (-(PML4_SIZE as isize)) as usize; +pub const RECURSIVE_PAGE_PML4: usize = (RECURSIVE_PAGE_OFFSET & PML4_MASK) / PML4_SIZE; + +/// Offset of kernel +pub const KERNEL_OFFSET: usize = RECURSIVE_PAGE_OFFSET - PML4_SIZE; +pub const KERNEL_PML4: usize = (KERNEL_OFFSET & PML4_MASK) / PML4_SIZE; + +pub const KERNEL_SIZE: usize = PML4_SIZE; + +/// Offset to kernel heap +pub const KERNEL_HEAP_OFFSET: usize = KERNEL_OFFSET - PML4_SIZE; +pub const KERNEL_HEAP_PML4: usize = (KERNEL_HEAP_OFFSET & PML4_MASK) / PML4_SIZE; +/// Size of kernel heap +pub const KERNEL_HEAP_SIZE: usize = 8 * 1024 * 1024; // 8 MB + +pub const MEMORY_OFFSET: usize = 0; + +/// Offset to kernel percpu variables +//TODO: Use 64-bit fs offset to enable this pub const KERNEL_PERCPU_OFFSET: usize = KERNEL_HEAP_OFFSET - PML4_SIZE; +pub const KERNEL_PERCPU_OFFSET: usize = 0xC000_0000; +/// Size of kernel percpu variables +pub const KERNEL_PERCPU_SIZE: usize = 64 * 1024; // 64 KB + +/// Offset to user image +pub const USER_OFFSET: usize = 0; +pub const USER_PML4: usize = (USER_OFFSET & PML4_MASK) / PML4_SIZE; + +/// Offset to user TCB +pub const USER_TCB_OFFSET: usize = 0xB000_0000; + +/// Offset to user arguments +pub const USER_ARG_OFFSET: usize = USER_OFFSET + PML4_SIZE / 2; + +/// Offset to user heap +pub const USER_HEAP_OFFSET: usize = USER_OFFSET + PML4_SIZE; +pub const USER_HEAP_PML4: usize = (USER_HEAP_OFFSET & PML4_MASK) / PML4_SIZE; + +/// Offset to user grants +pub const USER_GRANT_OFFSET: usize = USER_HEAP_OFFSET + PML4_SIZE; +pub const USER_GRANT_PML4: usize = (USER_GRANT_OFFSET & PML4_MASK) / PML4_SIZE; + +/// Offset to user stack +pub const USER_STACK_OFFSET: usize = USER_GRANT_OFFSET + PML4_SIZE; +pub const USER32_STACK_OFFSET: usize = 0xB000_0000; +pub const USER_STACK_PML4: usize = (USER_STACK_OFFSET & PML4_MASK) / PML4_SIZE; +/// Size of user stack +pub const USER_STACK_SIZE: usize = 1024 * 1024; // 1 MB + +/// Offset to user sigstack +pub const USER_SIGSTACK_OFFSET: usize = USER_STACK_OFFSET + PML4_SIZE; +pub const USER_SIGSTACK_PML4: usize = (USER_SIGSTACK_OFFSET & PML4_MASK) / PML4_SIZE; +/// Size of user sigstack +pub const USER_SIGSTACK_SIZE: usize = 256 * 1024; // 256 KB + +/// Offset to user TLS +pub const USER_TLS_OFFSET: usize = USER_SIGSTACK_OFFSET + PML4_SIZE; +pub const USER_TLS_PML4: usize = (USER_TLS_OFFSET & PML4_MASK) / PML4_SIZE; + +/// Offset to user temporary image (used when cloning) +pub const USER_TMP_OFFSET: usize = USER_TLS_OFFSET + PML4_SIZE; +pub const USER_TMP_PML4: usize = (USER_TMP_OFFSET & PML4_MASK) / PML4_SIZE; + +/// Offset to user temporary heap (used when cloning) +pub const USER_TMP_HEAP_OFFSET: usize = USER_TMP_OFFSET + PML4_SIZE; +pub const USER_TMP_HEAP_PML4: usize = (USER_TMP_HEAP_OFFSET & PML4_MASK) / PML4_SIZE; + +/// Offset to user temporary page for grants +pub const USER_TMP_GRANT_OFFSET: usize = USER_TMP_HEAP_OFFSET + PML4_SIZE; +pub const USER_TMP_GRANT_PML4: usize = (USER_TMP_GRANT_OFFSET & PML4_MASK) / PML4_SIZE; + +/// Offset to user temporary stack (used when cloning) +pub const USER_TMP_STACK_OFFSET: usize = USER_TMP_GRANT_OFFSET + PML4_SIZE; +pub const USER_TMP_STACK_PML4: usize = (USER_TMP_STACK_OFFSET & PML4_MASK) / PML4_SIZE; + +/// Offset to user temporary sigstack (used when cloning) +pub const USER_TMP_SIGSTACK_OFFSET: usize = USER_TMP_STACK_OFFSET + PML4_SIZE; +pub const USER_TMP_SIGSTACK_PML4: usize = (USER_TMP_SIGSTACK_OFFSET & PML4_MASK) / PML4_SIZE; + +/// Offset to user temporary tls (used when cloning) +pub const USER_TMP_TLS_OFFSET: usize = USER_TMP_SIGSTACK_OFFSET + PML4_SIZE; +pub const USER_TMP_TLS_PML4: usize = (USER_TMP_TLS_OFFSET & PML4_MASK) / PML4_SIZE; + +/// Offset for usage in other temporary pages +pub const USER_TMP_MISC_OFFSET: usize = USER_TMP_TLS_OFFSET + PML4_SIZE; +pub const USER_TMP_MISC_PML4: usize = (USER_TMP_MISC_OFFSET & PML4_MASK) / PML4_SIZE; \ No newline at end of file diff --git a/kernel/src/arch/x86_64/cpu.rs b/kernel/src/arch/x86_64/cpu.rs index 8e93cca..ae427e9 100644 --- a/kernel/src/arch/x86_64/cpu.rs +++ b/kernel/src/arch/x86_64/cpu.rs @@ -1,3 +1,6 @@ +use super::apic::{LocalApic, XApic}; +use super::raw_cpuid::CpuId; + /// Exit qemu /// See: https://wiki.osdev.org/Shutdown /// Must run qemu with `-device isa-debug-exit` @@ -7,4 +10,23 @@ pub unsafe fn exit_in_qemu(error_code: u8) -> ! { assert_eq!(error_code & 1, 1, "error code should be odd"); Port::new(0x501).write((error_code - 1) / 2); unreachable!() +} + +pub fn id() -> usize { + CpuId::new().get_feature_info().unwrap().initial_local_apic_id() as usize +} + +pub fn send_ipi(cpu_id: usize) { + let mut lapic = unsafe { XApic::new(0xffffff00_fee00000) }; + unsafe { lapic.send_ipi(cpu_id as u8, 0x30); } // TODO: Find a IPI trap num +} + +pub fn init() { + let mut lapic = unsafe { XApic::new(0xffffff00_fee00000) }; + lapic.cpu_init(); +} + +pub fn halt() { + use x86_64::instructions::hlt; + hlt(); } \ No newline at end of file diff --git a/kernel/src/arch/x86_64/driver/ide.rs b/kernel/src/arch/x86_64/driver/ide.rs index 475a95b..356978e 100644 --- a/kernel/src/arch/x86_64/driver/ide.rs +++ b/kernel/src/arch/x86_64/driver/ide.rs @@ -5,267 +5,120 @@ use spin::Mutex; lazy_static! { - pub static ref DISK0: LockedIde = LockedIde(Mutex::new(DmaController::new(0))); - pub static ref DISK1: LockedIde = LockedIde(Mutex::new(DmaController::new(1))); + pub static ref DISK0: LockedIde = LockedIde(Mutex::new(IDE::new(0))); + pub static ref DISK1: LockedIde = LockedIde(Mutex::new(IDE::new(1))); } pub const BLOCK_SIZE: usize = 512; -pub struct LockedIde(pub Mutex); +pub struct LockedIde(pub Mutex); -pub struct DmaController { +pub struct IDE { num: u8, + /// I/O Base + base: u16, + /// Control Base + ctrl: u16, } -impl DmaController -{ - /// Read ATA DMA. Block size = 512 bytes. - pub fn read(&self, blockidx: u64, count: usize, dst: &mut [u32]) -> Result { - assert_eq!(dst.len(), count * SECTOR_SIZE); - let dst = if count > MAX_DMA_SECTORS { &mut dst[..MAX_DMA_SECTORS * SECTOR_SIZE] } else { dst }; - //self.do_dma(blockidx, DMABuffer::new_mut(dst, 32), disk, false); - self.ide_read_secs(self.num, blockidx, dst, count as u8) - } - /// Write ATA DMA. Block size = 512 bytes. - pub fn write(&self, blockidx: u64, count: usize, dst: &[u32]) -> Result { - assert_eq!(dst.len(), count * SECTOR_SIZE); - let dst = if count > MAX_DMA_SECTORS { &dst[..MAX_DMA_SECTORS * SECTOR_SIZE] } else { dst }; - //println!("ide_write_secs: disk={},blockidx={},count={}",disk,blockidx,count); - self.ide_write_secs(self.num, blockidx, dst, count as u8) - } - /// Create structure and init - fn new(num: u8) -> Self { - assert!(num < MAX_IDE as u8); - let ide = DmaController { num }; - ide.ide_init(); +impl IDE { + pub fn new(num: u8) -> Self { + let ide = match num { + 0 => IDE { num: 0, base: 0x1f0, ctrl: 0x3f4 }, + 1 => IDE { num: 1, base: 0x1f0, ctrl: 0x3f4 }, + 2 => IDE { num: 2, base: 0x170, ctrl: 0x374 }, + 3 => IDE { num: 3, base: 0x170, ctrl: 0x374 }, + _ => panic!("ide number should be 0,1,2,3"), + }; + ide.init(); ide } - fn ide_wait_ready(&self, iobase: u16, check_error: usize) -> usize { + /// Read ATA DMA. Block size = 512 bytes. + pub fn read(&self, sector: u64, count: usize, data: &mut [u32]) -> Result<(), ()> { + assert_eq!(data.len(), count * SECTOR_SIZE); + self.wait(); unsafe { - let mut r = port::inb(iobase + ISA_STATUS); - //println!("iobase:{} ready:{}",iobase,r); - while (r & IDE_BSY) > 0 { - r = port::inb(iobase + ISA_STATUS); - //println!("busy"); - } - /* nothing */ - if check_error == 1 && (r & (IDE_DF | IDE_ERR)) != 0 { - return 1; + self.select(sector, count as u8); + port::outb(self.base + ISA_COMMAND, IDE_CMD_READ); + for i in 0..count { + let ptr = &data[(i as usize) * SECTOR_SIZE]; + if self.wait_error() { + return Err(()); + } + asm!("rep insl" :: "{dx}"(self.base), "{rdi}"(ptr), "{cx}"(SECTOR_SIZE) : "rdi" : "volatile"); } } - return 0; + Ok(()) } - - fn ide_init(&self) { - //static_assert((SECTSIZE % 4) == 0); - let ideno = self.num; - //println!("ideno:{}",ideno); - /* assume that no device here */ - //ide_devices[ideno].valid = 0; - - //let iobase = IO_BASE(ideno); - let iobase = CHANNELS[if ideno > 2 { 1 } else { 0 }].0; - - /* wait device ready */ - self.ide_wait_ready(iobase, 0); - //println!("ide_wait_ready"); + /// Write ATA DMA. Block size = 512 bytes. + pub fn write(&self, sector: u64, count: usize, data: &[u32]) -> Result<(), ()> { + assert_eq!(data.len(), count * SECTOR_SIZE); + self.wait(); unsafe { - /* step1: select drive */ - //println!("outb"); - port::outb(iobase + ISA_SDH, (0xE0 | ((ideno & 1) << 4)) as u8); - self.ide_wait_ready(iobase, 0); - - /* step2: send ATA identify command */ - //println!("outb"); - port::outb(iobase + ISA_COMMAND, IDE_CMD_IDENTIFY); - self.ide_wait_ready(iobase, 0); - - /* step3: polling */ - //println!("inb"); - if port::inb(iobase + ISA_STATUS) == 0 || self.ide_wait_ready(iobase, 1) != 0 { - return; - } - - //println!("insl"); - let mut buffer: [u32; 128] = [0; 128]; - for i in 0..buffer.len() { - buffer[i] = i as u32; - if i == 1 { - //println!("{:#x}",&buffer[i] as *const u32 as usize - ::consts::KERNEL_OFFSET) + self.select(sector, count as u8); + port::outb(self.base + ISA_COMMAND, IDE_CMD_WRITE); + for i in 0..count { + let ptr = &data[(i as usize) * SECTOR_SIZE]; + if self.wait_error() { + return Err(()); } - } - //println!("insl {:#x}",&buffer as *const u32 as usize - ::consts::KERNEL_OFFSET); - - //println!("insl {:#x}",buffer.as_ptr() as usize - ::consts::KERNEL_OFFSET); - //port::insl(iobase + ISA_DATA, &mut buffer); - let port = iobase + ISA_DATA; - //let buf=&mut buffer; - for i in 0..buffer.len() { - asm!("insl %dx, (%rdi)" - :: "{dx}"(port), "{rdi}"(&buffer[i]) - : "rdi" : "volatile"); - } - //println!("insl"); - for i in 0..4 { - info!("ide init: {}", buffer[i]); + asm!("rep outsl" :: "{dx}"(self.base), "{rsi}"(ptr), "{cx}"(SECTOR_SIZE) : "rsi" : "volatile"); } } - /* device is ok */ - //ide_devices[ideno].valid = 1; - - /* read identification space of the device */ - /*let buffer[128]; - insl(iobase + ISA_DATA, buffer, sizeof(buffer) / sizeof(unsigned int)); - - unsigned char *ident = (unsigned char *)buffer; - unsigned int sectors; - unsigned int cmdsets = *(unsigned int *)(ident + IDE_IDENT_CMDSETS); - /* device use 48-bits or 28-bits addressing */ - if (cmdsets & (1 << 26)) { - sectors = *(unsigned int *)(ident + IDE_IDENT_MAX_LBA_EXT); - } - else { - sectors = *(unsigned int *)(ident + IDE_IDENT_MAX_LBA); - } - ide_devices[ideno].sets = cmdsets; - ide_devices[ideno].size = sectors; - - /* check if supports LBA */ - assert((*(unsigned short *)(ident + IDE_IDENT_CAPABILITIES) & 0x200) != 0); - - unsigned char *model = ide_devices[ideno].model, *data = ident + IDE_IDENT_MODEL; - unsigned int i, length = 40; - for (i = 0; i < length; i += 2) { - model[i] = data[i + 1], model[i + 1] = data[i]; - } - do { - model[i] = '\0'; - } while (i -- > 0 && model[i] == ' '); - - cprintf("ide %d: %10u(sectors), '%s'.\n", ideno, ide_devices[ideno].size, ide_devices[ideno].model);*/ + Ok(()) + } - // enable ide interrupt - //pic_enable(IRQ_IDE1); - //pic_enable(IRQ_IDE2); + fn wait(&self) { + while unsafe { port::inb(self.base + ISA_STATUS) } & IDE_BUSY != 0 {} + } - info!("ide {} init end", self.num); + fn wait_error(&self) -> bool { + self.wait(); + let status = unsafe { port::inb(self.base + ISA_STATUS) }; + status & (IDE_DF | IDE_ERR) != 0 } - fn ide_read_secs<'a>(&'a self, ideno: u8, secno: u64, dst: &'a mut [u32], nsecs: u8) -> Result { - //assert(nsecs <= MAX_NSECS && VALID_IDE(ideno)); - //assert(secno < MAX_DISK_NSECS && secno + nsecs <= MAX_DISK_NSECS); - let iobase = CHANNELS[if ideno > 2 { 1 } else { 0 }].0; - let ioctrl = CHANNELS[if ideno > 2 { 1 } else { 0 }].1; - //ide_wait_ready(iobase, 0); + fn init(&self) { + self.wait(); + unsafe { + // step1: select drive + port::outb(self.base + ISA_SDH, (0xE0 | ((self.num & 1) << 4)) as u8); + self.wait(); - self.ide_wait_ready(iobase, 0); + // step2: send ATA identify command + port::outb(self.base + ISA_COMMAND, IDE_CMD_IDENTIFY); + self.wait(); - let ret = 0; - // generate interrupt - unsafe { - port::outb(ioctrl + ISA_CTRL, 0); - port::outb(iobase + ISA_SECCNT, nsecs); - port::outb(iobase + ISA_SECTOR, (secno & 0xFF) as u8); - port::outb(iobase + ISA_CYL_LO, ((secno >> 8) & 0xFF) as u8); - port::outb(iobase + ISA_CYL_HI, ((secno >> 16) & 0xFF) as u8); - port::outb(iobase + ISA_SDH, 0xE0 | ((ideno & 1) << 4) | (((secno >> 24) & 0xF) as u8)); - //port::outb(iobase + ISA_SDH, (0xE0 | ((ideno & 1) << 4)) as u8); - //self.ide_wait_ready(iobase, 0); - port::outb(iobase + ISA_COMMAND, IDE_CMD_READ); - //self.ide_wait_ready(iobase, 0); - // if port::inb(iobase + ISA_STATUS) == 0 || self.ide_wait_ready(iobase, 1) != 0 { - // println!("error?"); - // } - for i in 0..nsecs { - //dst = dst + SECTSIZE; - let tmp = &mut dst[(i as usize) * SECTOR_SIZE..((i + 1) as usize) * SECTOR_SIZE]; - if self.ide_wait_ready(iobase, 1) != 0 { - println!("wait ready error"); - } - //self.ide_wait_ready(iobase, 1); - //port::insl(iobase, tmp); - let port = iobase; - //let buf=&mut buffer; - for i in 0..tmp.len() { - asm!("insl %dx, (%rdi)" - :: "{dx}"(port), "{rdi}"(&tmp[i]) - : "rdi" : "volatile"); - } - //println!("read :{}",i); + // step3: polling + if port::inb(self.base + ISA_STATUS) == 0 || self.wait_error() { + return; } + + // ??? + let mut data = [0; SECTOR_SIZE]; + asm!("rep insl" :: "{dx}"(self.base + ISA_DATA), "{rdi}"(data.as_ptr()), "{cx}"(SECTOR_SIZE) : "rdi" : "volatile"); } - Ok(ret) } - fn ide_write_secs<'a>(&'a self, ideno: u8, secno: u64, src: &'a [u32], nsecs: u8) -> Result { - //assert(nsecs <= MAX_NSECS && VALID_IDE(ideno)); - //assert(secno < MAX_DISK_NSECS && secno + nsecs <= MAX_DISK_NSECS); - let iobase = CHANNELS[if ideno > 2 { 1 } else { 0 }].0; - let ioctrl = CHANNELS[if ideno > 2 { 1 } else { 0 }].1; - - //ide_wait_ready(iobase, 0); - - self.ide_wait_ready(iobase, 0); - - let ret = 0; - // generate interrupt + fn select(&self, sector: u64, count: u8) { + assert_ne!(count, 0); + self.wait(); unsafe { - port::outb(ioctrl + ISA_CTRL, 0); - port::outb(iobase + ISA_SECCNT, nsecs); - port::outb(iobase + ISA_SECTOR, (secno & 0xFF) as u8); - port::outb(iobase + ISA_CYL_LO, ((secno >> 8) & 0xFF) as u8); - port::outb(iobase + ISA_CYL_HI, ((secno >> 16) & 0xFF) as u8); - port::outb(iobase + ISA_SDH, 0xE0 | ((ideno & 1) << 4) | (((secno >> 24) & 0xF) as u8)); - port::outb(iobase + ISA_COMMAND, IDE_CMD_WRITE); - //println!("{}",nsecs); - for i in 0..nsecs { - //dst = dst + SECTSIZE; - // if ((ret = ide_wait_ready(iobase, 1)) != 0) { - // goto out; - // } - //port::insb(iobase, dst); - //println!("i={}",i); - let tmp = &src[(i as usize) * SECTOR_SIZE..((i + 1) as usize) * SECTOR_SIZE]; - if self.ide_wait_ready(iobase, 1) != 0 { - println!("wait ready error"); - } - //println!("write {}:{}",i,src[i as usize]); - //println!("outsl"); - //port::outsl(iobase, tmp); - let port = iobase; - //let buf=&mut buffer; - for i in 0..tmp.len() { - asm!("outsl (%rsi), %dx" - :: "{dx}"(port), "{rsi}"(&tmp[i]) - : "rsi"); - } - //println!("write :{}",i); - // for i in 0..4 { - // println!("{}",src[i as usize]); - // } - //port::outb(iobase, src[i as usize]); - } + // generate interrupt + port::outb(self.ctrl + ISA_CTRL, 0); + port::outb(self.base + ISA_SECCNT, count); + port::outb(self.base + ISA_SECTOR, (sector & 0xFF) as u8); + port::outb(self.base + ISA_CYL_LO, ((sector >> 8) & 0xFF) as u8); + port::outb(self.base + ISA_CYL_HI, ((sector >> 16) & 0xFF) as u8); + port::outb(self.base + ISA_SDH, 0xE0 | ((self.num & 1) << 4) | (((sector >> 24) & 0xF) as u8)); } - Ok(ret) } } const SECTOR_SIZE: usize = 128; -//const MAX_DMA_SECTORS: usize = 0x2_0000 / SECTOR_SIZE; // Limited by sector count (and PRDT entries) const MAX_DMA_SECTORS: usize = 0x1F_F000 / SECTOR_SIZE; // Limited by sector count (and PRDT entries) // 512 PDRT entries, assume maximum fragmentation = 512 * 4K max = 2^21 = 2MB per transfer -const HDD_PIO_W28: u8 = 0x30; -const HDD_PIO_R28: u8 = 0x20; -const HDD_PIO_W48: u8 = 0x34; -const HDD_PIO_R48: u8 = 0x24; -const HDD_IDENTIFY: u8 = 0xEC; - -const HDD_DMA_R28: u8 = 0xC8; -const HDD_DMA_W28: u8 = 0xCA; -const HDD_DMA_R48: u8 = 0x25; -const HDD_DMA_W48: u8 = 0x35; - const ISA_DATA: u16 = 0x00; const ISA_ERROR: u16 = 0x01; const ISA_PRECOMP: u16 = 0x01; @@ -278,7 +131,7 @@ const ISA_SDH: u16 = 0x06; const ISA_COMMAND: u16 = 0x07; const ISA_STATUS: u16 = 0x07; -const IDE_BSY: u8 = 0x80; +const IDE_BUSY: u8 = 0x80; const IDE_DRDY: u8 = 0x40; const IDE_DF: u8 = 0x20; const IDE_DRQ: u8 = 0x08; @@ -288,33 +141,7 @@ const IDE_CMD_READ: u8 = 0x20; const IDE_CMD_WRITE: u8 = 0x30; const IDE_CMD_IDENTIFY: u8 = 0xEC; -const IDE_IDENT_SECTORS: usize = 20; -const IDE_IDENT_MODEL: usize = 54; -const IDE_IDENT_CAPABILITIES: usize = 98; -const IDE_IDENT_CMDSETS: usize = 164; -const IDE_IDENT_MAX_LBA: usize = 120; -const IDE_IDENT_MAX_LBA_EXT: usize = 200; - -const IO_BASE0: u16 = 0x1F0; -const IO_BASE1: u16 = 0x170; -const IO_CTRL0: u16 = 0x3F4; -const IO_CTRL1: u16 = 0x374; - -const MAX_IDE: usize = 4; const MAX_NSECS: usize = 128; -//const MAX_DISK_NSECS 0x10000000U; -//const VALID_IDE(ideno) (((ideno) >= 0) && ((ideno) < MAX_IDE) && (ide_devices[ideno].valid)) - -struct Channels { - base: u16, - // I/O Base - ctrl: u16, // Control Base -} - -const CHANNELS: [(u16, u16); 2] = [(IO_BASE0, IO_CTRL0), (IO_BASE1, IO_CTRL1)]; - -//const IO_BASE(ideno) (CHANNELS[(ideno) >> 1].base) -//const IO_CTRL(ideno) (CHANNELS[(ideno) >> 1].ctrl) mod port { use x86_64::instructions::port::Port; diff --git a/kernel/src/arch/x86_64/driver/mod.rs b/kernel/src/arch/x86_64/driver/mod.rs index 5064353..e2450c6 100644 --- a/kernel/src/arch/x86_64/driver/mod.rs +++ b/kernel/src/arch/x86_64/driver/mod.rs @@ -1,7 +1,6 @@ extern crate syscall as redox_syscall; pub mod vga; -pub mod apic; pub mod serial; pub mod pic; pub mod keyboard; @@ -11,13 +10,12 @@ pub mod ide; pub fn init() { assert_has_not_been_called!(); - if cfg!(feature = "use_apic") { - pic::disable(); - apic::init(); - } else { - pic::init(); - } - pit::init(); + // Use IOAPIC instead of PIC + pic::disable(); + + // Use APIC Timer instead of PIT + // pit::init(); + serial::init(); keyboard::init(); } \ No newline at end of file diff --git a/kernel/src/arch/x86_64/gdt.rs b/kernel/src/arch/x86_64/gdt.rs index 20d0034..620e543 100644 --- a/kernel/src/arch/x86_64/gdt.rs +++ b/kernel/src/arch/x86_64/gdt.rs @@ -1,5 +1,4 @@ use alloc::boxed::Box; -use arch::driver::apic::lapic_id; use consts::MAX_CPU_NUM; use core::fmt; use core::fmt::Debug; @@ -50,7 +49,7 @@ pub fn init() { load_tss(TSS_SELECTOR); } - CPUS[lapic_id() as usize].call_once(|| + CPUS[super::cpu::id() as usize].call_once(|| Mutex::new(Cpu { gdt, tss: unsafe { &mut *tss } })); } @@ -67,7 +66,7 @@ pub struct Cpu { impl Cpu { pub fn current() -> MutexGuard<'static, Cpu> { - CPUS[lapic_id() as usize].try().unwrap().lock() + CPUS[super::cpu::id()].try().unwrap().lock() } /// 设置从Ring3跳到Ring0时,自动切换栈的地址 diff --git a/kernel/src/arch/x86_64/interrupt/handler.rs b/kernel/src/arch/x86_64/interrupt/handler.rs index 9d5b5aa..2b8b188 100644 --- a/kernel/src/arch/x86_64/interrupt/handler.rs +++ b/kernel/src/arch/x86_64/interrupt/handler.rs @@ -72,7 +72,7 @@ global_asm!(include_str!("vector.asm")); #[no_mangle] pub extern fn rust_trap(tf: &mut TrapFrame) { - trace!("Interrupt: {:#x}", tf.trap_num); + trace!("Interrupt: {:#x} @ CPU{}", tf.trap_num, super::super::cpu::id()); // Dispatch match tf.trap_num as u8 { T_BRKPT => breakpoint(), @@ -88,11 +88,7 @@ pub extern fn rust_trap(tf: &mut TrapFrame) { IRQ_IDE => ide(), _ => panic!("Invalid IRQ number: {}", irq), } - #[cfg(feature = "use_apic")] - use arch::driver::apic::ack; - #[cfg(not(feature = "use_apic"))] - use arch::driver::pic::ack; - ack(irq); + super::ack(irq); } T_SWITCH_TOK => to_kernel(tf), T_SWITCH_TOU => to_user(tf), @@ -101,7 +97,6 @@ pub extern fn rust_trap(tf: &mut TrapFrame) { T_DIVIDE | T_GPFLT | T_ILLOP => error(tf), _ => panic!("Unhandled interrupt {:x}", tf.trap_num), } - ::trap::before_return(); } fn breakpoint() { diff --git a/kernel/src/arch/x86_64/interrupt/mod.rs b/kernel/src/arch/x86_64/interrupt/mod.rs index a1b9ecb..2e62cdb 100644 --- a/kernel/src/arch/x86_64/interrupt/mod.rs +++ b/kernel/src/arch/x86_64/interrupt/mod.rs @@ -1,5 +1,4 @@ use x86_64; -use arch::driver::{apic::IOAPIC, pic}; pub mod consts; mod handler; @@ -7,6 +6,8 @@ mod trapframe; pub use self::trapframe::*; pub use self::handler::*; +use super::apic::*; +use consts::KERNEL_OFFSET; #[inline(always)] pub unsafe fn enable() { @@ -39,9 +40,12 @@ pub fn no_interrupt(f: impl FnOnce()) { #[inline(always)] pub fn enable_irq(irq: u8) { - if cfg!(feature = "use_apic") { - IOAPIC.lock().enable(irq, 0); - } else { - pic::enable_irq(irq); - } + let mut ioapic = unsafe { IoApic::new(KERNEL_OFFSET + IOAPIC_ADDR as usize) }; + ioapic.enable(irq, 0); +} + +#[inline(always)] +pub fn ack(irq: u8) { + let mut lapic = unsafe { XApic::new(KERNEL_OFFSET + LAPIC_ADDR) }; + lapic.eoi(); } \ No newline at end of file diff --git a/kernel/src/arch/x86_64/memory.rs b/kernel/src/arch/x86_64/memory.rs index 54bd283..e0fcf30 100644 --- a/kernel/src/arch/x86_64/memory.rs +++ b/kernel/src/arch/x86_64/memory.rs @@ -1,14 +1,15 @@ use bit_allocator::BitAlloc; use consts::KERNEL_OFFSET; // Depends on kernel -use memory::{FRAME_ALLOCATOR, init_heap}; +use memory::{FRAME_ALLOCATOR, init_heap, active_table}; use super::{BootInfo, MemoryRegionType}; use ucore_memory::PAGE_SIZE; -use ucore_memory::paging::PageTable; +use ucore_memory::paging::*; pub fn init(boot_info: &BootInfo) { assert_has_not_been_called!("memory::init must be called only once"); init_frame_allocator(boot_info); + init_device_vm_map(); init_heap(); info!("memory: init end"); } @@ -22,3 +23,11 @@ fn init_frame_allocator(boot_info: &BootInfo) { } } } + +fn init_device_vm_map() { + let mut page_table = active_table(); + // IOAPIC + page_table.map(KERNEL_OFFSET + 0xfec00000, 0xfec00000).update(); + // LocalAPIC + page_table.map(KERNEL_OFFSET + 0xfee00000, 0xfee00000).update(); +} diff --git a/kernel/src/arch/x86_64/mod.rs b/kernel/src/arch/x86_64/mod.rs index d1868f7..b739925 100644 --- a/kernel/src/arch/x86_64/mod.rs +++ b/kernel/src/arch/x86_64/mod.rs @@ -1,6 +1,10 @@ extern crate bootloader; +extern crate apic; +extern crate raw_cpuid; use self::bootloader::bootinfo::{BootInfo, MemoryRegionType}; +use core::sync::atomic::*; +use consts::KERNEL_OFFSET; pub mod driver; pub mod cpu; @@ -8,14 +12,23 @@ pub mod interrupt; pub mod paging; pub mod gdt; pub mod idt; -// TODO: Move multi-core init to bootloader -//pub mod smp; pub mod memory; pub mod io; +pub mod consts; + +static AP_CAN_INIT: AtomicBool = ATOMIC_BOOL_INIT; /// The entry point of kernel #[no_mangle] // don't mangle the name of this function pub extern "C" fn _start(boot_info: &'static BootInfo) -> ! { + let cpu_id = cpu::id(); + println!("Hello world! from CPU {}!", cpu_id); + + if cpu_id != 0 { + while !AP_CAN_INIT.load(Ordering::Relaxed) {} + other_start(); + } + // First init log mod, so that we can print log info. ::logging::init(); info!("Hello world!"); @@ -30,20 +43,22 @@ pub extern "C" fn _start(boot_info: &'static BootInfo) -> ! { // Now heap is available gdt::init(); + cpu::init(); + driver::init(); + ::process::init(); + ::thread::spawn(::fs::shell); + + AP_CAN_INIT.store(true, Ordering::Relaxed); + ::kmain(); } -/// The entry point for another processors -#[no_mangle] -pub extern "C" fn other_main() -> ! { +/// The entry point for other processors +fn other_start() -> ! { idt::init(); gdt::init(); - driver::apic::other_init(); - let cpu_id = driver::apic::lapic_id(); -// let ms = unsafe { smp::notify_started(cpu_id) }; - println!("Hello world! from CPU {}!", cpu_id); -// unsafe{ let a = *(0xdeadbeaf as *const u8); } // Page fault - loop {} + cpu::init(); + ::kmain(); } \ No newline at end of file diff --git a/kernel/src/arch/x86_64/paging.rs b/kernel/src/arch/x86_64/paging.rs index 8717703..8838dce 100644 --- a/kernel/src/arch/x86_64/paging.rs +++ b/kernel/src/arch/x86_64/paging.rs @@ -1,6 +1,6 @@ use bit_allocator::{BitAlloc, BitAlloc64K}; // Depends on kernel -use memory::{active_table, alloc_frame, alloc_stack, dealloc_frame}; +use memory::{active_table, alloc_frame, dealloc_frame}; use spin::{Mutex, MutexGuard}; use ucore_memory::cow::CowExt; use ucore_memory::memory_set::*; @@ -49,7 +49,7 @@ impl PageTable for ActivePageTable { let flags = EF::PRESENT | EF::WRITABLE | EF::NO_EXECUTE; self.0.map_to(Page::of_addr(addr), Frame::of_addr(target), flags, &mut FrameAllocatorForX86) .unwrap().flush(); - self.get_entry(addr) + unsafe { &mut *(get_entry_ptr(addr, 1)) } } fn unmap(&mut self, addr: usize) { @@ -57,9 +57,12 @@ impl PageTable for ActivePageTable { flush.flush(); } - fn get_entry(&mut self, addr: usize) -> &mut PageEntry { - let entry_addr = ((addr >> 9) & 0o777_777_777_7770) | 0xffffff80_00000000; - unsafe { &mut *(entry_addr as *mut PageEntry) } + fn get_entry(&mut self, addr: usize) -> Option<&mut PageEntry> { + for level in 0..3 { + let entry = get_entry_ptr(addr, 4 - level); + if unsafe { !(*entry).present() } { return None; } + } + unsafe { Some(&mut *(get_entry_ptr(addr, 1))) } } fn get_page_slice_mut<'a, 'b>(&'a mut self, addr: usize) -> &'b mut [u8] { @@ -140,6 +143,12 @@ impl Entry for PageEntry { fn set_execute(&mut self, value: bool) { self.as_flags().set(EF::NO_EXECUTE, !value); } } +fn get_entry_ptr(addr: usize, level: u8) -> *mut PageEntry { + debug_assert!(level <= 4); + let entry_addr = ((addr >> (level * 9)) & !0x7) | !((1 << (48 - level * 9)) - 1); + entry_addr as *mut PageEntry +} + impl PageEntry { fn as_flags(&mut self) -> &mut EF { unsafe { &mut *(self as *mut _ as *mut EF) } @@ -222,10 +231,6 @@ impl InactivePageTable for InactivePageTable0 { fn dealloc_frame(target: usize) { dealloc_frame(target) } - - fn alloc_stack() -> Stack { - alloc_stack() - } } impl InactivePageTable0 { diff --git a/kernel/src/consts.rs b/kernel/src/consts.rs index be597e0..ea22abd 100644 --- a/kernel/src/consts.rs +++ b/kernel/src/consts.rs @@ -1,133 +1,6 @@ #![allow(dead_code)] -#[cfg(target_arch = "riscv32")] -pub use self::riscv::*; -#[cfg(target_arch = "x86_64")] -pub use self::x86_64::*; +pub use arch::consts::*; pub const MAX_CPU_NUM: usize = 8; pub const MAX_PROCESS_NUM: usize = 48; - -// Memory address for riscv32 -#[cfg(target_arch = "riscv32")] -mod riscv { - // Physical address available on THINPAD: - // [0x80000000, 0x80800000] - const P2_SIZE: usize = 1 << 22; - const P2_MASK: usize = 0x3ff << 22; - // RECURSIVE_PAGE_PML4 indicate the index of the self-maping entry in root pagetable - pub const RECURSIVE_PAGE_PML4: usize = 0x3fe; - // KERNEL_OFFSET indicate (virtual kernel address - physical kernel address) ??? - pub const KERNEL_OFFSET: usize = 0; - // KERNEL_PML4 indicate the index of the kernel entry in root pagetable - pub const KERNEL_PML4: usize = 0x8000_0000 >> 22; - pub const KERNEL_HEAP_OFFSET: usize = 0x8020_0000; - pub const KERNEL_HEAP_SIZE: usize = 0x0020_0000; - pub const MEMORY_OFFSET: usize = 0x8000_0000; - pub const MEMORY_END: usize = 0x8080_0000; - pub const USER_STACK_OFFSET: usize = 0x70000000; - pub const USER_STACK_SIZE: usize = 0x10000; - pub const USER32_STACK_OFFSET: usize = USER_STACK_OFFSET; -} - -// Memory address for x86_64 -#[cfg(target_arch = "x86_64")] -mod x86_64 { - // Copy from Redox consts.rs: - - // Because the memory map is so important to not be aliased, it is defined here, in one place - // The lower 256 PML4 entries are reserved for userspace - // Each PML4 entry references up to 512 GB of memory - // The top (511) PML4 is reserved for recursive mapping - // The second from the top (510) PML4 is reserved for the kernel - /// The size of a single PML4 - pub const PML4_SIZE: usize = 0x0000_0080_0000_0000; - pub const PML4_MASK: usize = 0x0000_ff80_0000_0000; - - /// Offset of recursive paging - pub const RECURSIVE_PAGE_OFFSET: usize = (-(PML4_SIZE as isize)) as usize; - pub const RECURSIVE_PAGE_PML4: usize = (RECURSIVE_PAGE_OFFSET & PML4_MASK) / PML4_SIZE; - - /// Offset of kernel - pub const KERNEL_OFFSET: usize = RECURSIVE_PAGE_OFFSET - PML4_SIZE; - pub const KERNEL_PML4: usize = (KERNEL_OFFSET & PML4_MASK) / PML4_SIZE; - - pub const KERNEL_SIZE: usize = PML4_SIZE; - - /// Offset to kernel heap - pub const KERNEL_HEAP_OFFSET: usize = KERNEL_OFFSET - PML4_SIZE; - pub const KERNEL_HEAP_PML4: usize = (KERNEL_HEAP_OFFSET & PML4_MASK) / PML4_SIZE; - /// Size of kernel heap - pub const KERNEL_HEAP_SIZE: usize = 8 * 1024 * 1024; // 8 MB - - pub const MEMORY_OFFSET: usize = 0; - - /// Offset to kernel percpu variables - //TODO: Use 64-bit fs offset to enable this pub const KERNEL_PERCPU_OFFSET: usize = KERNEL_HEAP_OFFSET - PML4_SIZE; - pub const KERNEL_PERCPU_OFFSET: usize = 0xC000_0000; - /// Size of kernel percpu variables - pub const KERNEL_PERCPU_SIZE: usize = 64 * 1024; // 64 KB - - /// Offset to user image - pub const USER_OFFSET: usize = 0; - pub const USER_PML4: usize = (USER_OFFSET & PML4_MASK) / PML4_SIZE; - - /// Offset to user TCB - pub const USER_TCB_OFFSET: usize = 0xB000_0000; - - /// Offset to user arguments - pub const USER_ARG_OFFSET: usize = USER_OFFSET + PML4_SIZE / 2; - - /// Offset to user heap - pub const USER_HEAP_OFFSET: usize = USER_OFFSET + PML4_SIZE; - pub const USER_HEAP_PML4: usize = (USER_HEAP_OFFSET & PML4_MASK) / PML4_SIZE; - - /// Offset to user grants - pub const USER_GRANT_OFFSET: usize = USER_HEAP_OFFSET + PML4_SIZE; - pub const USER_GRANT_PML4: usize = (USER_GRANT_OFFSET & PML4_MASK) / PML4_SIZE; - - /// Offset to user stack - pub const USER_STACK_OFFSET: usize = USER_GRANT_OFFSET + PML4_SIZE; - pub const USER32_STACK_OFFSET: usize = 0xB000_0000; - pub const USER_STACK_PML4: usize = (USER_STACK_OFFSET & PML4_MASK) / PML4_SIZE; - /// Size of user stack - pub const USER_STACK_SIZE: usize = 1024 * 1024; // 1 MB - - /// Offset to user sigstack - pub const USER_SIGSTACK_OFFSET: usize = USER_STACK_OFFSET + PML4_SIZE; - pub const USER_SIGSTACK_PML4: usize = (USER_SIGSTACK_OFFSET & PML4_MASK) / PML4_SIZE; - /// Size of user sigstack - pub const USER_SIGSTACK_SIZE: usize = 256 * 1024; // 256 KB - - /// Offset to user TLS - pub const USER_TLS_OFFSET: usize = USER_SIGSTACK_OFFSET + PML4_SIZE; - pub const USER_TLS_PML4: usize = (USER_TLS_OFFSET & PML4_MASK) / PML4_SIZE; - - /// Offset to user temporary image (used when cloning) - pub const USER_TMP_OFFSET: usize = USER_TLS_OFFSET + PML4_SIZE; - pub const USER_TMP_PML4: usize = (USER_TMP_OFFSET & PML4_MASK) / PML4_SIZE; - - /// Offset to user temporary heap (used when cloning) - pub const USER_TMP_HEAP_OFFSET: usize = USER_TMP_OFFSET + PML4_SIZE; - pub const USER_TMP_HEAP_PML4: usize = (USER_TMP_HEAP_OFFSET & PML4_MASK) / PML4_SIZE; - - /// Offset to user temporary page for grants - pub const USER_TMP_GRANT_OFFSET: usize = USER_TMP_HEAP_OFFSET + PML4_SIZE; - pub const USER_TMP_GRANT_PML4: usize = (USER_TMP_GRANT_OFFSET & PML4_MASK) / PML4_SIZE; - - /// Offset to user temporary stack (used when cloning) - pub const USER_TMP_STACK_OFFSET: usize = USER_TMP_GRANT_OFFSET + PML4_SIZE; - pub const USER_TMP_STACK_PML4: usize = (USER_TMP_STACK_OFFSET & PML4_MASK) / PML4_SIZE; - - /// Offset to user temporary sigstack (used when cloning) - pub const USER_TMP_SIGSTACK_OFFSET: usize = USER_TMP_STACK_OFFSET + PML4_SIZE; - pub const USER_TMP_SIGSTACK_PML4: usize = (USER_TMP_SIGSTACK_OFFSET & PML4_MASK) / PML4_SIZE; - - /// Offset to user temporary tls (used when cloning) - pub const USER_TMP_TLS_OFFSET: usize = USER_TMP_SIGSTACK_OFFSET + PML4_SIZE; - pub const USER_TMP_TLS_PML4: usize = (USER_TMP_TLS_OFFSET & PML4_MASK) / PML4_SIZE; - - /// Offset for usage in other temporary pages - pub const USER_TMP_MISC_OFFSET: usize = USER_TMP_TLS_OFFSET + PML4_SIZE; - pub const USER_TMP_MISC_PML4: usize = (USER_TMP_MISC_OFFSET & PML4_MASK) / PML4_SIZE; -} \ No newline at end of file diff --git a/kernel/src/fs.rs b/kernel/src/fs.rs index 748cc12..d52bad0 100644 --- a/kernel/src/fs.rs +++ b/kernel/src/fs.rs @@ -48,8 +48,9 @@ pub fn shell() { if let Ok(file) = root.borrow().lookup(name.as_str()) { use process::*; let len = file.borrow().read_at(0, &mut *buf).unwrap(); - let pid = processor().add(Context::new_user(&buf[..len])); - processor().current_wait_for(pid); + let pid = processor().manager().add(ContextImpl::new_user(&buf[..len])); + processor().manager().wait(thread::current().id(), pid); + processor().yield_now(); } else { println!("Program not exist"); } diff --git a/kernel/src/lang.rs b/kernel/src/lang.rs index a289ccc..43730f0 100644 --- a/kernel/src/lang.rs +++ b/kernel/src/lang.rs @@ -13,7 +13,8 @@ pub fn panic(info: &PanicInfo) -> ! { let location = info.location().unwrap(); let message = info.message().unwrap(); error!("\n\nPANIC in {} at line {}\n {}", location.file(), location.line(), message); - loop { } + use arch::cpu::halt; + loop { halt() } } #[lang = "oom"] diff --git a/kernel/src/lib.rs b/kernel/src/lib.rs index 28ef150..9c54064 100644 --- a/kernel/src/lib.rs +++ b/kernel/src/lib.rs @@ -9,6 +9,8 @@ #![feature(panic_info_message)] #![feature(global_asm)] #![feature(compiler_builtins_lib)] +#![feature(raw)] +#![feature(vec_resize_default)] #![no_std] @@ -34,6 +36,8 @@ extern crate volatile; extern crate x86_64; extern crate xmas_elf; +pub use process::{processor, new_kernel_context}; +use ucore_process::thread; use linked_list_allocator::LockedHeap; #[macro_use] // print! @@ -45,8 +49,6 @@ mod consts; mod process; mod syscall; mod fs; - -use process::{thread, thread_}; mod sync; mod trap; mod console; @@ -61,22 +63,13 @@ pub mod arch; pub mod arch; pub fn kmain() -> ! { - // Init the first kernel process(idle proc) - process::init(); - // enable the interrupt - unsafe { arch::interrupt::enable(); } - - // the test is not supported in riscv32(maybe) - //thread::test::local_key(); - //thread::test::unpack(); - //sync::test::philosopher_using_mutex(); - //sync::test::philosopher_using_monitor(); - //sync::mpsc::test::test_all(); - - // come into shell - fs::shell(); + process::processor().run(); - loop {} +// thread::test::local_key(); +// thread::test::unpack(); +// sync::test::philosopher_using_mutex(); +// sync::test::philosopher_using_monitor(); +// sync::mpsc::test::test_all(); } /// Global heap allocator diff --git a/kernel/src/memory.rs b/kernel/src/memory.rs index 23070e9..8e1e68b 100644 --- a/kernel/src/memory.rs +++ b/kernel/src/memory.rs @@ -7,7 +7,7 @@ use ucore_memory::{*, paging::PageTable}; use ucore_memory::cow::CowExt; pub use ucore_memory::memory_set::{MemoryArea, MemoryAttr, MemorySet as MemorySet_, Stack}; use ucore_memory::swap::*; -use process::processor; +use process::{processor, process}; pub type MemorySet = MemorySet_; @@ -73,6 +73,26 @@ pub fn alloc_stack() -> Stack { Stack { top, bottom } } +pub struct KernelStack(usize); +const STACK_SIZE: usize = 0x8000; + +impl KernelStack { + pub fn new() -> Self { + use alloc::alloc::{alloc, Layout}; + let bottom = unsafe{ alloc(Layout::from_size_align(STACK_SIZE, STACK_SIZE).unwrap()) } as usize; + KernelStack(bottom) + } + pub fn top(&self) -> usize { + self.0 + STACK_SIZE + } +} + +impl Drop for KernelStack { + fn drop(&mut self) { + use alloc::alloc::{dealloc, Layout}; + unsafe{ dealloc(self.0 as _, Layout::from_size_align(STACK_SIZE, STACK_SIZE).unwrap()); } + } +} /* @@ -92,8 +112,7 @@ pub fn page_fault_handler(addr: usize) -> bool { // handle the swap in/out info!("start handling swap in/out page fault"); unsafe { ACTIVE_TABLE_SWAP.force_unlock(); } - let mut temp_proc = processor(); - let pt = temp_proc.current_context_mut().get_memory_set_mut().get_page_table_mut(); + let pt = process().get_memory_set_mut().get_page_table_mut(); if active_table_swap().page_fault_handler(pt as *mut InactivePageTable0, addr, || alloc_frame().unwrap()){ return true; } diff --git a/kernel/src/process/context.rs b/kernel/src/process/context.rs index e289efb..4062c7e 100644 --- a/kernel/src/process/context.rs +++ b/kernel/src/process/context.rs @@ -1,66 +1,47 @@ use arch::interrupt::{TrapFrame, Context as ArchContext}; -use memory::{MemoryArea, MemoryAttr, MemorySet, active_table_swap, alloc_frame}; +use memory::{MemoryArea, MemoryAttr, MemorySet, KernelStack, active_table_swap, alloc_frame}; use xmas_elf::{ElfFile, header, program::{Flags, ProgramHeader, Type}}; use core::fmt::{Debug, Error, Formatter}; +use ucore_process::Context; +use alloc::boxed::Box; use ucore_memory::{Page}; use ::memory::{InactivePageTable0}; -pub struct Context { +pub struct ContextImpl { arch: ArchContext, memory_set: MemorySet, + kstack: KernelStack, } -impl ::ucore_process::processor::Context for Context { - /* - * @param: - * target: the target process context - * @brief: - * switch to the target process context - */ - unsafe fn switch(&mut self, target: &mut Self) { - super::PROCESSOR.try().unwrap().force_unlock(); +impl Context for ContextImpl { + unsafe fn switch_to(&mut self, target: &mut Context) { + use core::mem::transmute; + let (target, _): (&mut ContextImpl, *const ()) = transmute(target); self.arch.switch(&mut target.arch); - use core::mem::forget; - // don't run the distructor of processor() - forget(super::processor()); - } - - /* - * @param: - * entry: the program entry for the process - * arg: a0 (a parameter) - * @brief: - * new a kernel thread Context - * @retval: - * the new kernel thread Context - */ - fn new_kernel(entry: extern fn(usize) -> !, arg: usize) -> Self { - let ms = MemorySet::new(); - Context { - arch: unsafe { ArchContext::new_kernel_thread(entry, arg, ms.kstack_top(), ms.token()) }, - memory_set: ms, - } } } -impl Context { - pub unsafe fn new_init() -> Self { - Context { +impl ContextImpl { + pub unsafe fn new_init() -> Box { + Box::new(ContextImpl { arch: ArchContext::null(), memory_set: MemorySet::new(), - } + kstack: KernelStack::new(), + }) + } + + pub fn new_kernel(entry: extern fn(usize) -> !, arg: usize) -> Box { + let memory_set = MemorySet::new(); + let kstack = KernelStack::new(); + Box::new(ContextImpl { + arch: unsafe { ArchContext::new_kernel_thread(entry, arg, kstack.top(), memory_set.token()) }, + memory_set, + kstack, + }) } /// Make a new user thread from ELF data - /* - * @param: - * data: the ELF data stream - * @brief: - * make a new thread from ELF data - * @retval: - * the new user thread Context - */ - pub fn new_user(data: &[u8]) -> Self { + pub fn new_user(data: &[u8]) -> Box { // Parse elf let elf = ElfFile::new(data).expect("failed to read elf"); let is32 = match elf.header.pt2 { @@ -106,22 +87,23 @@ impl Context { } }); } + // map the memory set swappable + //memory_set_map_swappable(&mut memory_set); - - //set the user Memory pages in the memory set swappable - memory_set_map_swappable(&mut memory_set); + let kstack = KernelStack::new(); - Context { + Box::new(ContextImpl { arch: unsafe { ArchContext::new_user_thread( - entry_addr, user_stack_top - 8, memory_set.kstack_top(), is32, memory_set.token()) + entry_addr, user_stack_top - 8, kstack.top(), is32, memory_set.token()) }, memory_set, - } + kstack, + }) } /// Fork - pub fn fork(&self, tf: &TrapFrame) -> Self { + pub fn fork(&self, tf: &TrapFrame) -> Box { // Clone memory set, make a new page table let mut memory_set = self.memory_set.clone(); @@ -140,12 +122,15 @@ impl Context { }); } // map the memory set swappable - memory_set_map_swappable(&mut memory_set); + //memory_set_map_swappable(&mut memory_set); - Context { - arch: unsafe { ArchContext::new_fork(tf, memory_set.kstack_top(), memory_set.token()) }, + let kstack = KernelStack::new(); + + Box::new(ContextImpl { + arch: unsafe { ArchContext::new_fork(tf, kstack.top(), memory_set.token()) }, memory_set, - } + kstack, + }) } pub fn get_memory_set_mut(&mut self) -> &mut MemorySet { @@ -153,11 +138,11 @@ impl Context { } } - -impl Drop for Context{ +/* +impl Drop for ContextImpl{ fn drop(&mut self){ //set the user Memory pages in the memory set unswappable - let Self {ref mut arch, ref mut memory_set} = self; + let Self {ref mut arch, ref mut memory_set, ref mut kstack} = self; let pt = { memory_set.get_page_table_mut() as *mut InactivePageTable0 }; @@ -173,8 +158,8 @@ impl Drop for Context{ } } - -impl Debug for Context { +*/ +impl Debug for ContextImpl { fn fmt(&self, f: &mut Formatter) -> Result<(), Error> { write!(f, "{:x?}", self.arch) } @@ -196,7 +181,7 @@ fn memory_set_from<'a>(elf: &'a ElfFile<'a>) -> MemorySet { } let (virt_addr, mem_size, flags) = match ph { ProgramHeader::Ph32(ph) => (ph.virtual_addr as usize, ph.mem_size as usize, ph.flags), - ProgramHeader::Ph64(ph) => (ph.virtual_addr as usize, ph.mem_size as usize, ph.flags),//??? + ProgramHeader::Ph64(ph) => (ph.virtual_addr as usize, ph.mem_size as usize, ph.flags), }; set.push(MemoryArea::new(virt_addr, virt_addr + mem_size, memory_attr_from(flags), "")); @@ -210,7 +195,7 @@ fn memory_attr_from(elf_flags: Flags) -> MemoryAttr { if elf_flags.is_execute() { flags = flags.execute(); } flags } - +/* /* * @param: * memory_set: the target MemorySet to set swappable @@ -228,4 +213,5 @@ fn memory_set_map_swappable(memory_set: &mut MemorySet){ } } info!("Finishing setting pages swappable"); -} \ No newline at end of file +} +*/ diff --git a/kernel/src/process/mod.rs b/kernel/src/process/mod.rs index 41355f4..8531278 100644 --- a/kernel/src/process/mod.rs +++ b/kernel/src/process/mod.rs @@ -1,57 +1,94 @@ -use spin::Once; -use sync::{SpinNoIrqLock, Mutex, MutexGuard, SpinNoIrq}; -pub use self::context::Context; -pub use ucore_process::processor::{*, Context as _whatever}; -pub use ucore_process::scheduler::*; -pub use ucore_process::thread::*; +use spin::Mutex; +pub use self::context::ContextImpl; +pub use ucore_process::*; +use consts::{MAX_CPU_NUM, MAX_PROCESS_NUM}; +use arch::cpu; +use alloc::{boxed::Box, sync::Arc, vec::Vec}; +use sync::Condvar; +use core::sync::atomic::*; mod context; -type Processor = Processor_; - -/* -* @brief: -* initialize a new kernel process (idleproc) -*/ pub fn init() { - PROCESSOR.call_once(|| - SpinNoIrqLock::new({ - let mut processor = Processor::new( - unsafe { Context::new_init() }, - // NOTE: max_time_slice <= 5 to ensure 'priority' test pass - StrideScheduler::new(5), - ); - extern fn idle(arg: usize) -> ! { - loop {} - } - processor.add(Context::new_kernel(idle, 0)); - processor - }) - ); + // NOTE: max_time_slice <= 5 to ensure 'priority' test pass + let scheduler = Box::new(scheduler::RRScheduler::new(5)); + let manager = Arc::new(ProcessManager::new(scheduler, MAX_PROCESS_NUM)); + + extern fn idle(_arg: usize) -> ! { + loop { cpu::halt(); } + } + for i in 0..4 { + manager.add(ContextImpl::new_kernel(idle, i)); + } + + unsafe { + for cpu_id in 0..MAX_CPU_NUM { + PROCESSORS[cpu_id].init(cpu_id, ContextImpl::new_init(), manager.clone()); + } + } info!("process init end"); } -pub static PROCESSOR: Once> = Once::new(); +static PROCESSORS: [Processor; MAX_CPU_NUM] = [Processor::new(), Processor::new(), Processor::new(), Processor::new(), Processor::new(), Processor::new(), Processor::new(), Processor::new()]; + +/// Ugly solution for sys_wait(0) (wait for any child) +#[derive(Default)] +pub struct Process { + parent: AtomicUsize, + children: Mutex>, + subproc_exit: Condvar, // Trigger parent's when exit +} -pub fn processor() -> MutexGuard<'static, Processor, SpinNoIrq> { - PROCESSOR.try().unwrap().lock() +impl Process { + pub fn new_fork(pid: usize, parent: usize) { + PROCESS[pid].parent.store(parent, Ordering::Relaxed); + PROCESS[pid].subproc_exit._clear(); + PROCESS[parent].children.lock().push(pid); + } + pub fn proc_exit(pid: usize) { + let parent = PROCESS[pid].parent.load(Ordering::Relaxed); + PROCESS[parent].subproc_exit.notify_all(); + } + pub fn wait_child() { + Self::current().subproc_exit._wait(); + } + pub fn get_children() -> Vec { + Self::current().children.lock().clone() + } + pub fn do_wait(pid: usize) { + Self::current().children.lock().retain(|&p| p != pid); + } + fn current() -> &'static Self { + &PROCESS[thread::current().id()] + } } -#[allow(non_camel_case_types)] -pub type thread = ThreadMod; +lazy_static! { + pub static ref PROCESS: Vec = { + let mut vec = Vec::new(); + vec.resize_default(MAX_PROCESS_NUM); + vec + }; +} -pub mod thread_ { - pub type Thread = super::Thread; +/// Get current thread struct +pub fn process() -> &'static mut ContextImpl { + use core::mem::transmute; + let (process, _): (&mut ContextImpl, *const ()) = unsafe { + transmute(processor().context()) + }; + process } -pub struct ThreadSupportImpl; -impl ThreadSupport for ThreadSupportImpl { - type Context = Context; - type Scheduler = StrideScheduler; - type ProcessorGuard = MutexGuard<'static, Processor, SpinNoIrq>; +// Implement dependencies for std::thread - fn processor() -> Self::ProcessorGuard { - processor() - } +#[no_mangle] +pub fn processor() -> &'static Processor { + &PROCESSORS[cpu::id()] +} + +#[no_mangle] +pub fn new_kernel_context(entry: extern fn(usize) -> !, arg: usize) -> Box { + ContextImpl::new_kernel(entry, arg) } \ No newline at end of file diff --git a/kernel/src/smp.rs b/kernel/src/smp.rs new file mode 100644 index 0000000..3439d17 --- /dev/null +++ b/kernel/src/smp.rs @@ -0,0 +1,4 @@ +pub struct cpu { + pub id: usize +} + diff --git a/kernel/src/sync/condvar.rs b/kernel/src/sync/condvar.rs index 4dbbc78..98d5675 100644 --- a/kernel/src/sync/condvar.rs +++ b/kernel/src/sync/condvar.rs @@ -1,11 +1,10 @@ use alloc::collections::VecDeque; use super::*; use thread; -use thread_; #[derive(Default)] pub struct Condvar { - wait_queue: SpinNoIrqLock>, + wait_queue: SpinNoIrqLock>, } impl Condvar { @@ -34,4 +33,7 @@ impl Condvar { t.unpark(); } } + pub fn _clear(&self) { + self.wait_queue.lock().clear(); + } } \ No newline at end of file diff --git a/kernel/src/syscall.rs b/kernel/src/syscall.rs index 80a170c..79c4ea4 100644 --- a/kernel/src/syscall.rs +++ b/kernel/src/syscall.rs @@ -58,25 +58,45 @@ fn sys_close(fd: usize) -> i32 { /// Fork the current process. Return the child's PID. fn sys_fork(tf: &TrapFrame) -> i32 { - let mut processor = processor(); - let context = processor.current_context().fork(tf); - let pid = processor.add(context); - info!("fork: {} -> {}", processor.current_pid(), pid); + let mut context = process().fork(tf); + let pid = processor().manager().add(context); + Process::new_fork(pid, thread::current().id()); + info!("fork: {} -> {}", thread::current().id(), pid); pid as i32 } /// Wait the process exit. /// Return the PID. Store exit code to `code` if it's not null. fn sys_wait(pid: usize, code: *mut i32) -> i32 { - let mut processor = processor(); - match processor.current_wait_for(pid) { - WaitResult::Ok(pid, error_code) => { - if !code.is_null() { - unsafe { *code = error_code as i32 }; + loop { + let wait_procs = match pid { + 0 => Process::get_children(), + _ => vec![pid], + }; + if wait_procs.is_empty() { + return -1; + } + for pid in wait_procs { + match processor().manager().get_status(pid) { + Some(Status::Exited(exit_code)) => { + if !code.is_null() { + unsafe { code.write(exit_code as i32); } + } + processor().manager().remove(pid); + Process::do_wait(pid); + info!("wait: {} -> {}", thread::current().id(), pid); + return 0; + } + None => return -1, + _ => {} } - 0 } - WaitResult::NotExist => -1, + if pid == 0 { + Process::wait_child(); + } else { + processor().manager().wait(thread::current().id(), pid); + processor().yield_now(); + } } } @@ -87,7 +107,11 @@ fn sys_yield() -> i32 { /// Kill the process fn sys_kill(pid: usize) -> i32 { - processor().kill(pid); + processor().manager().exit(pid, 0x100); + Process::proc_exit(pid); + if pid == thread::current().id() { + processor().yield_now(); + } 0 } @@ -97,11 +121,12 @@ fn sys_getpid() -> i32 { } /// Exit the current process -fn sys_exit(error_code: usize) -> i32 { - let mut processor = processor(); - let pid = processor.current_pid(); - processor.exit(pid, error_code); - 0 +fn sys_exit(exit_code: usize) -> i32 { + let pid = thread::current().id(); + processor().manager().exit(pid, exit_code); + Process::proc_exit(pid); + processor().yield_now(); + unreachable!(); } fn sys_sleep(time: usize) -> i32 { @@ -111,13 +136,12 @@ fn sys_sleep(time: usize) -> i32 { } fn sys_get_time() -> i32 { - let processor = processor(); - processor.get_time() as i32 + unsafe { ::trap::TICK as i32 } } fn sys_lab6_set_priority(priority: usize) -> i32 { - let mut processor = processor(); - processor.set_priority(priority as u8); + let pid = thread::current().id(); + processor().manager().set_priority(pid, priority as u8); 0 } diff --git a/kernel/src/trap.rs b/kernel/src/trap.rs index 5430e43..529fd20 100644 --- a/kernel/src/trap.rs +++ b/kernel/src/trap.rs @@ -1,36 +1,22 @@ use process::*; use arch::interrupt::TrapFrame; +use arch::cpu; -/* -* @brief: -* process timer interrupt -*/ -pub fn timer() { - let mut processor = processor(); - processor.tick(); -} +pub static mut TICK: usize = 0; -pub fn before_return() { - if let Some(processor) = PROCESSOR.try() { - processor.lock().schedule(); +pub fn timer() { + processor().tick(); + if cpu::id() == 0 { + unsafe { TICK += 1; } } } -/* -* @param: -* TrapFrame: the error's trapframe -* @brief: -* process the error trap, if processor inited then exit else panic! -*/ pub fn error(tf: &TrapFrame) -> ! { - if let Some(processor) = PROCESSOR.try() { - let mut processor = processor.lock(); - let pid = processor.current_pid(); - error!("Process {} error:\n{:#x?}", pid, tf); - processor.exit(pid, 0x100); // TODO: Exit code for error - processor.schedule(); - unreachable!(); - } else { - panic!("Exception when processor not inited\n{:#x?}", tf); - } + error!("{:#x?}", tf); + let pid = processor().pid(); + error!("On CPU{} Process {}", cpu::id(), pid); + + processor().manager().exit(pid, 0x100); + processor().yield_now(); + unreachable!(); } \ No newline at end of file