Merge confilt

master
lcy1996 6 years ago
commit fcdee71f9d

@ -1,13 +1,5 @@
//! Port from sbi.h //! Port from sbi.h
//!
//! This code is used for OS to use hardware outside with calling these implements
/*
** @brief translate implement calling message to RISCV asm
** @param which: usize ecall type
** arg0, arg1, arg2: usize ecall args
** @retval ret: usize the result of the asm
*/
#[inline(always)] #[inline(always)]
fn sbi_call(which: usize, arg0: usize, arg1: usize, arg2: usize) -> usize { fn sbi_call(which: usize, arg0: usize, arg1: usize, arg2: usize) -> usize {
let ret; let ret;
@ -21,38 +13,18 @@ fn sbi_call(which: usize, arg0: usize, arg1: usize, arg2: usize) -> usize {
ret ret
} }
/*
** @brief output char to console
** @param ch: usize the char to output to console
** @retval none
*/
pub fn console_putchar(ch: usize) { pub fn console_putchar(ch: usize) {
sbi_call(SBI_CONSOLE_PUTCHAR, ch, 0, 0); sbi_call(SBI_CONSOLE_PUTCHAR, ch, 0, 0);
} }
/*
** @brief input char from console
** @param none
** @retval ch: usize the char get from console
*/
pub fn console_getchar() -> usize { pub fn console_getchar() -> usize {
sbi_call(SBI_CONSOLE_GETCHAR, 0, 0, 0) sbi_call(SBI_CONSOLE_GETCHAR, 0, 0, 0)
} }
/*
** @brief call this function to shutdown
** @param none
** @retval none
*/
pub fn shutdown() { pub fn shutdown() {
sbi_call(SBI_SHUTDOWN, 0, 0, 0); sbi_call(SBI_SHUTDOWN, 0, 0, 0);
} }
/*
** @brief set a timer when running
** @param stime_value: u64 time to be set
** @retval none
*/
pub fn set_timer(stime_value: u64) { pub fn set_timer(stime_value: u64) {
#[cfg(target_pointer_width = "32")] #[cfg(target_pointer_width = "32")]
sbi_call(SBI_SET_TIMER, stime_value as usize, (stime_value >> 32) as usize, 0); sbi_call(SBI_SET_TIMER, stime_value as usize, (stime_value >> 32) as usize, 0);
@ -60,49 +32,24 @@ pub fn set_timer(stime_value: u64) {
sbi_call(SBI_SET_TIMER, stime_value as usize, 0, 0); sbi_call(SBI_SET_TIMER, stime_value as usize, 0, 0);
} }
/*
** @brief clear the ipi
** @param none
** @retval none
*/
pub fn clear_ipi() { pub fn clear_ipi() {
sbi_call(SBI_CLEAR_IPI, 0, 0, 0); sbi_call(SBI_CLEAR_IPI, 0, 0, 0);
} }
/* pub fn send_ipi(hart_mask: usize) {
** @brief sbi_call(SBI_SEND_IPI, &hart_mask as *const _ as usize, 0, 0);
** @param
** @retval none
*/
pub fn send_ipi(hart_mask: *const usize) {
sbi_call(SBI_SEND_IPI, hart_mask as usize, 0, 0);
} }
/* pub fn remote_fence_i(hart_mask: usize) {
** @brief sbi_call(SBI_REMOTE_FENCE_I, &hart_mask as *const _ as usize, 0, 0);
** @param
** @retval none
*/
pub fn remote_fence_i(hart_mask: *const usize) {
sbi_call(SBI_REMOTE_FENCE_I, hart_mask as usize, 0, 0);
} }
/* pub fn remote_sfence_vma(hart_mask: usize, _start: usize, _size: usize) {
** @brief sbi_call(SBI_REMOTE_SFENCE_VMA, &hart_mask as *const _ as usize, 0, 0);
** @param
** @retval none
*/
pub fn remote_sfence_vma(hart_mask: *const usize, _start: usize, _size: usize) {
sbi_call(SBI_REMOTE_SFENCE_VMA, hart_mask as usize, 0, 0);
} }
/* pub fn remote_sfence_vma_asid(hart_mask: usize, _start: usize, _size: usize, _asid: usize) {
** @brief sbi_call(SBI_REMOTE_SFENCE_VMA_ASID, &hart_mask as *const _ as usize, 0, 0);
** @param
** @retval none
*/
pub fn remote_sfence_vma_asid(hart_mask: *const usize, _start: usize, _size: usize, _asid: usize) {
sbi_call(SBI_REMOTE_SFENCE_VMA_ASID, hart_mask as usize, 0, 0);
} }
const SBI_SET_TIMER: usize = 0; const SBI_SET_TIMER: usize = 0;

@ -5,3 +5,4 @@ authors = ["WangRunji <wangrunji0408@163.com>"]
[dependencies] [dependencies]
log = "0.4" log = "0.4"
spin = "0.4"

@ -1,18 +1,25 @@
#![no_std] #![no_std]
#![feature(alloc)] #![feature(alloc)]
#![feature(const_fn)] #![feature(const_fn)]
#![feature(linkage)]
#![feature(nll)]
#![feature(vec_resize_default)]
extern crate alloc; extern crate alloc;
#[macro_use] #[macro_use]
extern crate log; extern crate log;
extern crate spin;
// To use `println!` in test // To use `println!` in test
#[cfg(test)] #[cfg(test)]
#[macro_use] #[macro_use]
extern crate std; extern crate std;
pub mod processor; mod process_manager;
mod processor;
pub mod scheduler; pub mod scheduler;
pub mod thread; pub mod thread;
mod util;
mod event_hub; mod event_hub;
pub use process_manager::*;
pub use processor::Processor;

@ -0,0 +1,196 @@
use alloc::boxed::Box;
use alloc::sync::Arc;
use spin::Mutex;
use scheduler::Scheduler;
use core::cell::UnsafeCell;
use alloc::vec::Vec;
use event_hub::EventHub;
struct Process {
id: Pid,
status: Status,
status_after_stop: Status,
context: Option<Box<Context>>,
}
pub type Pid = usize;
type ExitCode = usize;
#[derive(Debug, Clone, Eq, PartialEq)]
pub enum Status {
Ready,
Running(usize),
Sleeping,
Waiting(Pid),
/// aka ZOMBIE. Its context was dropped.
Exited(ExitCode),
}
enum Event {
Wakeup(Pid),
}
pub trait Context {
unsafe fn switch_to(&mut self, target: &mut Context);
}
pub struct ProcessManager {
procs: Vec<Mutex<Option<Process>>>,
scheduler: Mutex<Box<Scheduler>>,
wait_queue: Vec<Mutex<Vec<Pid>>>,
event_hub: Mutex<EventHub<Event>>,
}
impl ProcessManager {
pub fn new(scheduler: Box<Scheduler>, max_proc_num: usize) -> Self {
ProcessManager {
procs: {
let mut vec = Vec::new();
vec.resize_default(max_proc_num);
vec
},
scheduler: Mutex::new(scheduler),
wait_queue: {
let mut vec = Vec::new();
vec.resize_default(max_proc_num);
vec
},
event_hub: Mutex::new(EventHub::new()),
}
}
fn alloc_pid(&self) -> Pid {
for (i, proc) in self.procs.iter().enumerate() {
if proc.lock().is_none() {
return i;
}
}
panic!("Process number exceeded");
}
/// Add a new process
pub fn add(&self, context: Box<Context>) -> Pid {
let pid = self.alloc_pid();
*(&self.procs[pid]).lock() = Some(Process {
id: pid,
status: Status::Ready,
status_after_stop: Status::Ready,
context: Some(context),
});
self.scheduler.lock().insert(pid);
pid
}
/// Make process `pid` time slice -= 1.
/// Return true if time slice == 0.
/// Called by timer interrupt handler.
pub fn tick(&self, pid: Pid) -> bool {
let mut event_hub = self.event_hub.lock();
event_hub.tick();
while let Some(event) = event_hub.pop() {
match event {
Event::Wakeup(pid) => self.set_status(pid, Status::Ready),
}
}
self.scheduler.lock().tick(pid)
}
/// Set the priority of process `pid`
pub fn set_priority(&self, pid: Pid, priority: u8) {
self.scheduler.lock().set_priority(pid, priority);
}
/// Called by Processor to get a process to run.
/// The manager first mark it `Running`,
/// then take out and return its Context.
pub fn run(&self, cpu_id: usize) -> (Pid, Box<Context>) {
let mut scheduler = self.scheduler.lock();
let pid = scheduler.select()
.expect("failed to select a runnable process");
scheduler.remove(pid);
let mut proc_lock = self.procs[pid].lock();
let mut proc = proc_lock.as_mut().unwrap();
proc.status = Status::Running(cpu_id);
(pid, proc.context.take().unwrap())
}
/// Called by Processor to finish running a process
/// and give its context back.
pub fn stop(&self, pid: Pid, context: Box<Context>) {
let mut proc_lock = self.procs[pid].lock();
let mut proc = proc_lock.as_mut().unwrap();
proc.status = proc.status_after_stop.clone();
proc.status_after_stop = Status::Ready;
proc.context = Some(context);
match proc.status {
Status::Ready => self.scheduler.lock().insert(pid),
Status::Exited(_) => proc.context = None,
_ => {}
}
}
/// Switch the status of a process.
/// Insert/Remove it to/from scheduler if necessary.
fn set_status(&self, pid: Pid, status: Status) {
let mut scheduler = self.scheduler.lock();
let mut proc_lock = self.procs[pid].lock();
let mut proc = proc_lock.as_mut().unwrap();
trace!("process {} {:?} -> {:?}", pid, proc.status, status);
match (&proc.status, &status) {
(Status::Ready, Status::Ready) => return,
(Status::Ready, _) => scheduler.remove(pid),
(Status::Running(_), _) => {},
(Status::Exited(_), _) => panic!("can not set status for a exited process"),
(Status::Waiting(target), Status::Exited(_)) =>
self.wait_queue[*target].lock().retain(|&i| i != pid),
// TODO: Sleep -> Exited Remove wakeup event.
(_, Status::Ready) => scheduler.insert(pid),
_ => {}
}
match proc.status {
Status::Running(_) => proc.status_after_stop = status,
_ => proc.status = status,
}
match proc.status {
Status::Exited(_) => proc.context = None,
_ => {}
}
}
pub fn get_status(&self, pid: Pid) -> Option<Status> {
self.procs[pid].lock().as_ref().map(|p| p.status.clone())
}
pub fn remove(&self, pid: Pid) {
let mut proc_lock = self.procs[pid].lock();
let proc = proc_lock.as_ref().unwrap();
match proc.status {
Status::Exited(_) => *proc_lock = None,
_ => panic!("can not remove non-exited process"),
}
}
pub fn sleep(&self, pid: Pid, time: usize) {
self.set_status(pid, Status::Sleeping);
if time != 0 {
self.event_hub.lock().push(time, Event::Wakeup(pid));
}
}
pub fn wakeup(&self, pid: Pid) {
self.set_status(pid, Status::Ready);
}
pub fn wait(&self, pid: Pid, target: Pid) {
self.set_status(pid, Status::Waiting(target));
self.wait_queue[target].lock().push(pid);
}
pub fn exit(&self, pid: Pid, code: ExitCode) {
self.set_status(pid, Status::Exited(code));
for waiter in self.wait_queue[pid].lock().drain(..) {
self.wakeup(waiter);
}
}
}

@ -1,400 +1,95 @@
use alloc::{boxed::Box, collections::BTreeMap}; use alloc::boxed::Box;
use scheduler::*; use alloc::sync::Arc;
use event_hub::EventHub; use spin::Mutex;
use util::GetMut2; use core::cell::UnsafeCell;
use core::fmt::Debug; use process_manager::*;
#[derive(Debug)] /// Process executor
pub struct Process<T> { ///
pid: Pid, /// Per-CPU struct. Defined at global.
parent: Pid, /// Only accessed by associated CPU with interrupt disabled.
status: Status, #[derive(Default)]
context: T, pub struct Processor {
} inner: UnsafeCell<Option<ProcessorInner>>,
pub type Pid = usize;
pub type ErrorCode = usize;
#[derive(Debug, Clone, Eq, PartialEq)]
pub enum Status {
Ready,
Running,
Waiting(Pid),
Sleeping,
Exited(ErrorCode),
} }
pub trait Context: Debug { unsafe impl Sync for Processor {}
unsafe fn switch(&mut self, target: &mut Self);
fn new_kernel(entry: extern fn(usize) -> !, arg: usize) -> Self;
}
pub struct Processor_<T: Context, S: Scheduler> {
procs: BTreeMap<Pid, Process<T>>,
current_pid: Pid,
event_hub: EventHub<Event>,
/// Choose what on next schedule ?
next: Option<Pid>,
// WARNING: if MAX_PROCESS_NUM is too large, will cause stack overflow
scheduler: S,
}
impl<T> Process<T> { struct ProcessorInner {
fn exit_code(&self) -> Option<ErrorCode> { id: usize,
match self.status { proc: Option<(Pid, Box<Context>)>,
Status::Exited(code) => Some(code), loop_context: Box<Context>,
_ => None, manager: Arc<ProcessManager>,
}
}
} }
// TODO: 除schedule()外的其它函数应该只设置进程状态不应调用schedule impl Processor {
impl<T: Context, S: Scheduler> Processor_<T, S> { pub const fn new() -> Self {
Processor { inner: UnsafeCell::new(None) }
/*
** @brief create a new Processor
** @param init_context: T initiate context
** scheduler: S the scheduler to use
** @retval the Processor created
*/
pub fn new(init_context: T, scheduler: S) -> Self {
let init_proc = Process {
pid: 0,
parent: 0,
status: Status::Running,
context: init_context,
};
Processor_ {
procs: {
let mut map = BTreeMap::<Pid, Process<T>>::new();
map.insert(0, init_proc);
map
},
current_pid: 0,
event_hub: EventHub::new(),
next: None,
scheduler,
}
}
/*
** @brief set the priority of current process
** @param priority: u8 the priority to set
** @retval none
*/
pub fn set_priority(&mut self, priority: u8) {
self.scheduler.set_priority(self.current_pid, priority);
}
/*
** @brief mark the current process to reschedule
** @param none
** @retval none
*/
pub fn set_reschedule(&mut self) {
let pid = self.current_pid;
self.set_status(pid, Status::Ready);
}
/*
** @brief allocate the pid of the process
** @param none
** @retval the pid allocated
*/
fn alloc_pid(&self) -> Pid {
let mut next: Pid = 0;
for &i in self.procs.keys() {
if i != next {
return next;
} else {
next = i + 1;
}
}
return next;
}
/*
** @brief set the status of the process
** @param pid: Pid the pid of process which needs to be set
** status: Status the status to be set
** @retval none
*/
fn set_status(&mut self, pid: Pid, status: Status) {
let status0 = self.get(pid).status.clone();
match (&status0, &status) {
(&Status::Ready, &Status::Ready) => return,
(&Status::Ready, _) => self.scheduler.remove(pid),
(_, &Status::Ready) => self.scheduler.insert(pid),
_ => {}
}
trace!("process {} {:?} -> {:?}", pid, status0, status);
self.get_mut(pid).status = status;
}
/*
** @brief Called by timer.
** Handle events.
** @param none
** @retval none
*/
pub fn tick(&mut self) {
let current_pid = self.current_pid;
if self.scheduler.tick(current_pid) {
self.set_reschedule();
}
self.event_hub.tick();
while let Some(event) = self.event_hub.pop() {
debug!("event {:?}", event);
match event {
Event::Schedule => {
self.event_hub.push(10, Event::Schedule);
self.set_reschedule();
},
Event::Wakeup(pid) => {
self.set_status(pid, Status::Ready);
self.set_reschedule();
self.next = Some(pid);
},
}
}
}
/*
** @brief get now time
** @param none
** @retval the time got
*/
pub fn get_time(&self) -> usize {
self.event_hub.get_time()
}
/*
** @brief add a new process
** @param context: T the context fo the process
** @retval the pid of new process
*/
pub fn add(&mut self, context: T) -> Pid {
let pid = self.alloc_pid();
let process = Process {
pid,
parent: self.current_pid,
status: Status::Ready,
context,
};
self.scheduler.insert(pid);
self.procs.insert(pid, process);
pid
} }
/* pub unsafe fn init(&self, id: usize, context: Box<Context>, manager: Arc<ProcessManager>) {
** @brief Called every interrupt end unsafe {
** Do schedule ONLY IF current status != Running *self.inner.get() = Some(ProcessorInner {
** @param none id,
** @retval none proc: None,
*/ loop_context: context,
pub fn schedule(&mut self) { manager,
if self.get(self.current_pid).status == Status::Running { });
return;
}
let pid = self.next.take().unwrap_or_else(|| self.scheduler.select().unwrap());
self.switch_to(pid);
}
/*
** @brief Switch process to `pid`, switch page table if necessary.
Store `rsp` and point it to target kernel stack.
The current status must be set before, and not be `Running`.
** @param the pid of the process to switch
** @retval none
*/
fn switch_to(&mut self, pid: Pid) {
// for debug print
let pid0 = self.current_pid;
if pid == self.current_pid {
if self.get(self.current_pid).status != Status::Running {
self.set_status(pid, Status::Running);
}
return;
}
self.current_pid = pid;
let (from, to) = self.procs.get_mut2(pid0, pid);
assert_ne!(from.status, Status::Running);
assert_eq!(to.status, Status::Ready);
to.status = Status::Running;
self.scheduler.remove(pid);
//info!("switch from {} to {} {:x?}", pid0, pid, to.context);
unsafe { from.context.switch(&mut to.context); }
}
/*
** @brief get process by pid
** @param pid: Pid the pid of the process
** @retval the process struct
*/
fn get(&self, pid: Pid) -> &Process<T> {
self.procs.get(&pid).unwrap()
}
/*
** @brief get mut process struct by pid
** @param pid: Pid the pid of the process
** @retval the mut process struct
*/
fn get_mut(&mut self, pid: Pid) -> &mut Process<T> {
self.procs.get_mut(&pid).unwrap()
}
/*
** @brief get context of current process
** @param none
** @retval current context
*/
pub fn current_context(&self) -> &T {
&self.get(self.current_pid).context
}
pub fn current_context_mut(&mut self) -> &mut T {
let id = self.current_pid;
&mut self.get_mut(id).context
} }
pub fn get_context_mut(&mut self, pid: Pid) -> &mut T{
&mut self.get_mut(pid).context
} }
/* fn inner(&self) -> &mut ProcessorInner {
** @brief get pid of current process unsafe { &mut *self.inner.get() }.as_mut()
** @param none .expect("Processor is not initialized")
** @retval current pid
*/
pub fn current_pid(&self) -> Pid {
self.current_pid
} }
/* /// Begin running processes after CPU setup.
** @brief kill a process by pid ///
** @param pid: Pid the pid of the process to kill /// This function never returns. It loops, doing:
** @retval none /// - choose a process to run
*/ /// - switch to start running that process
pub fn kill(&mut self, pid: Pid) { /// - eventually that process transfers control
self.exit(pid, 0x1000); // TODO: error code for killed /// via switch back to the scheduler.
pub fn run(&self) -> ! {
let inner = self.inner();
loop {
let proc = inner.manager.run(inner.id);
trace!("CPU{} begin running process {}", inner.id, proc.0);
inner.proc = Some(proc);
unsafe {
inner.loop_context.switch_to(&mut *inner.proc.as_mut().unwrap().1);
} }
let (pid, context) = inner.proc.take().unwrap();
/* trace!("CPU{} stop running process {}", inner.id, pid);
** @brief exit a process by pid inner.manager.stop(pid, context);
** @param pid: Pid the pid to exit
** error_code: ErrorCode the error code when exiting
** @retval none
*/
pub fn exit(&mut self, pid: Pid, error_code: ErrorCode) {
info!("{} exit, code: {}", pid, error_code);
self.set_status(pid, Status::Exited(error_code));
if let Some(waiter) = self.find_waiter(pid) {
info!(" then wakeup {}", waiter);
self.set_status(waiter, Status::Ready);
self.next = Some(waiter);
} }
} }
/* /// Called by process running on this Processor.
** @brief let a process to sleep for a while /// Yield and reschedule.
** @param pid: Pid the pid of the process to sleep pub fn yield_now(&self) {
** time: usize the time to sleep let inner = self.inner();
** @retval none unsafe {
*/ inner.proc.as_mut().unwrap().1.switch_to(&mut *inner.loop_context);
pub fn sleep(&mut self, pid: Pid, time: usize) {
self.set_status(pid, Status::Sleeping);
self.event_hub.push(time, Event::Wakeup(pid));
} }
/*
** @brief let a process to sleep until wake up
** @param pid: Pid the pid of the process to sleep
** @retval none
*/
pub fn sleep_(&mut self, pid: Pid) {
self.set_status(pid, Status::Sleeping);
} }
/* pub fn pid(&self) -> Pid {
** @brief wake up al sleeping process self.inner().proc.as_ref().unwrap().0
** @param pid: Pid the pid of the process to wake up
** @retval none
*/
pub fn wakeup_(&mut self, pid: Pid) {
self.set_status(pid, Status::Ready);
} }
/* pub fn context(&self) -> &Context {
** @brief Let current process wait for another &*self.inner().proc.as_ref().unwrap().1
** @param pid: Pid the pid of the process to wait for
** @retval the result of wait
*/
pub fn current_wait_for(&mut self, pid: Pid) -> WaitResult {
info!("current {} wait for {:?}", self.current_pid, pid);
if self.procs.values().filter(|&p| p.parent == self.current_pid).next().is_none() {
return WaitResult::NotExist;
}
let pid = self.try_wait(pid).unwrap_or_else(|| {
let current_pid = self.current_pid;
self.set_status(current_pid, Status::Waiting(pid));
self.schedule(); // yield
self.try_wait(pid).unwrap()
});
let exit_code = self.get(pid).exit_code().unwrap();
info!("{} wait end and remove {}", self.current_pid, pid);
self.procs.remove(&pid);
WaitResult::Ok(pid, exit_code)
} }
/* pub fn manager(&self) -> &ProcessManager {
** @brief Try to find a exited wait target &*self.inner().manager
** @param pid: Pid the pid of the process to wait for
** @retval the pid found or none
*/
fn try_wait(&mut self, pid: Pid) -> Option<Pid> {
match pid {
0 => self.procs.values()
.find(|&p| p.parent == self.current_pid && p.exit_code().is_some())
.map(|p| p.pid),
_ => self.get(pid).exit_code().map(|_| pid),
}
} }
/* pub fn tick(&self) {
** @brief find one process which is waiting for the input process let need_reschedule = self.manager().tick(self.pid());
** @param pid: Pid the pid of the target process if need_reschedule {
** @retval the pid of the waiting process or none self.yield_now();
*/
fn find_waiter(&self, pid: Pid) -> Option<Pid> {
self.procs.values().find(|&p| {
p.status == Status::Waiting(pid) ||
(p.status == Status::Waiting(0) && self.get(pid).parent == p.pid)
}).map(|ref p| p.pid)
} }
}
#[derive(Debug)]
pub enum WaitResult {
/// The target process is exited with `ErrorCode`.
Ok(Pid, ErrorCode),
/// The target process is not exist.
NotExist,
}
#[derive(Debug)]
enum Event {
Schedule,
Wakeup(Pid),
}
impl<T: Context> GetMut2<Pid> for BTreeMap<Pid, Process<T>> {
type Output = Process<T>;
fn get_mut(&mut self, id: Pid) -> &mut Process<T> {
self.get_mut(&id).unwrap()
} }
} }

@ -2,51 +2,19 @@ use alloc::{collections::BinaryHeap, vec::Vec};
type Pid = usize; type Pid = usize;
///
// implements of process scheduler
pub trait Scheduler { pub trait Scheduler {
/*
** @brief add a new process
** @param pid: Pid the pid of the process to add
** @retval none
*/
fn insert(&mut self, pid: Pid); fn insert(&mut self, pid: Pid);
/*
** @brief remove a processs from the list
** @param pid: Pid the pid of the process to remove
** @retval none
*/
fn remove(&mut self, pid: Pid); fn remove(&mut self, pid: Pid);
/*
** @brief choose a process to run next
** @param none
** @retval Option<Pid> the pid of the process to run or none
*/
fn select(&mut self) -> Option<Pid>; fn select(&mut self) -> Option<Pid>;
/*
** @brief when a clock interrupt occurs, update the list and check whether need to reschedule
** @param current: Pid the pid of the process which is running now
** @retval bool if need to reschedule
*/
fn tick(&mut self, current: Pid) -> bool; // need reschedule? fn tick(&mut self, current: Pid) -> bool; // need reschedule?
/*
** @brief set the priority of the process
** @param pid: Pid the pid of the process to be set
** priority: u8 the priority to be set
** @retval none
*/
fn set_priority(&mut self, pid: Pid, priority: u8); fn set_priority(&mut self, pid: Pid, priority: u8);
fn move_to_head(&mut self, pid: Pid);
} }
pub use self::rr::RRScheduler; pub use self::rr::RRScheduler;
pub use self::stride::StrideScheduler; pub use self::stride::StrideScheduler;
// use round-robin scheduling
mod rr { mod rr {
use super::*; use super::*;
@ -112,6 +80,14 @@ mod rr {
fn set_priority(&mut self, pid: usize, priority: u8) { fn set_priority(&mut self, pid: usize, priority: u8) {
} }
fn move_to_head(&mut self, pid: usize) {
let pid = pid + 1;
assert!(self.infos[pid].present);
self._list_remove(pid);
self._list_add_after(pid, 0);
trace!("rr move_to_head {}", pid - 1);
}
} }
impl RRScheduler { impl RRScheduler {
@ -128,6 +104,10 @@ mod rr {
self.infos[prev].next = i; self.infos[prev].next = i;
self.infos[at].prev = i; self.infos[at].prev = i;
} }
fn _list_add_after(&mut self, i: Pid, at: Pid) {
let next = self.infos[at].next;
self._list_add_before(i, next);
}
fn _list_remove(&mut self, i: Pid) { fn _list_remove(&mut self, i: Pid) {
let next = self.infos[i].next; let next = self.infos[i].next;
let prev = self.infos[i].prev; let prev = self.infos[i].prev;
@ -139,7 +119,6 @@ mod rr {
} }
} }
// use stride scheduling
mod stride { mod stride {
use super::*; use super::*;
@ -190,6 +169,9 @@ mod stride {
let info = &mut self.infos[pid]; let info = &mut self.infos[pid];
assert!(info.present); assert!(info.present);
info.present = false; info.present = false;
if self.queue.peek().is_some() && self.queue.peek().unwrap().1 == pid {
self.queue.pop();
} else {
// BinaryHeap only support pop the top. // BinaryHeap only support pop the top.
// So in order to remove an arbitrary element, // So in order to remove an arbitrary element,
// we have to take all elements into a Vec, // we have to take all elements into a Vec,
@ -197,6 +179,7 @@ mod stride {
let rest: Vec<_> = self.queue.drain().filter(|&p| p.1 != pid).collect(); let rest: Vec<_> = self.queue.drain().filter(|&p| p.1 != pid).collect();
use core::iter::FromIterator; use core::iter::FromIterator;
self.queue = BinaryHeap::from_iter(rest.into_iter()); self.queue = BinaryHeap::from_iter(rest.into_iter());
}
trace!("stride remove {}", pid); trace!("stride remove {}", pid);
} }
@ -229,6 +212,15 @@ mod stride {
self.infos[pid].priority = priority; self.infos[pid].priority = priority;
trace!("stride {} priority = {}", pid, priority); trace!("stride {} priority = {}", pid, priority);
} }
fn move_to_head(&mut self, pid: Pid) {
if self.queue.peek().is_some() {
let stride = -self.queue.peek().unwrap().0;
self.remove(pid);
self.infos[pid].stride = stride;
self.insert(pid);
}
}
} }
impl StrideScheduler { impl StrideScheduler {

@ -1,199 +1,165 @@
//! Thread std-like interface //! Thread std-like interface
//! //!
//! Based on Processor. //! Based on Processor. Used in kernel.
//! Used in the kernel.
//! //!
//! # Example //! You need to implement the following functions before use:
//! //! - `processor`: Get a reference of the current `Processor`
//! ``` //! - `new_kernel_context`: Construct a `Context` of the new kernel thread
//! // Define a support implementation struct
//! pub struct ThreadSupportImpl;
//!
//! // Impl `ThreadSupport` trait
//! impl ThreadSupport for ThreadSupportImpl { ... }
//!
//! // Export the full struct as `thread`.
//! #[allow(non_camel_case_types)]
//! pub type thread = ThreadMod<ThreadSupportImpl>;
//! ```
//!
//! ```
//! // Use it just like `std::thread`
//! use thread;
//! let t = thread::current();
//!
//! // But the other struct is not available ...
//! let t: thread::Thread; // ERROR!
//! ```
use alloc::boxed::Box; use alloc::boxed::Box;
use alloc::collections::BTreeMap; use alloc::collections::BTreeMap;
use core::any::Any;
use core::marker::PhantomData; use core::marker::PhantomData;
use core::ptr; use core::ptr;
use core::time::Duration; use core::time::Duration;
use core::ops::DerefMut;
use processor::*; use processor::*;
use process_manager::*;
use scheduler::Scheduler; use scheduler::Scheduler;
/// All dependencies for thread mod. #[linkage = "weak"]
pub trait ThreadSupport { #[no_mangle]
type Context: Context; /// Get a reference of the current `Processor`
type Scheduler: Scheduler; fn processor() -> &'static Processor {
type ProcessorGuard: DerefMut<Target=Processor_<Self::Context, Self::Scheduler>>; unimplemented!("thread: Please implement and export `processor`")
fn processor() -> Self::ProcessorGuard;
} }
/// Root structure served as thread mod #[linkage = "weak"]
pub struct ThreadMod<S: ThreadSupport> { #[no_mangle]
mark: PhantomData<S> /// Construct a `Context` of the new kernel thread
fn new_kernel_context(entry: extern fn(usize) -> !, arg: usize) -> Box<Context> {
unimplemented!("thread: Please implement and export `new_kernel_context`")
} }
impl<S: ThreadSupport> ThreadMod<S> {
/* /// Gets a handle to the thread that invokes it.
** @brief Gets a handle to the thread that invokes it. pub fn current() -> Thread {
** @param none
** @retval the thread to get
*/
pub fn current() -> Thread<S> {
Thread { Thread {
pid: S::processor().current_pid(), pid: processor().pid(),
mark: PhantomData,
}
} }
}
/* /// Puts the current thread to sleep for the specified amount of time.
** @brief Puts the current thread to sleep for the specified amount of time. pub fn sleep(dur: Duration) {
** @param dur: Duration the time to sleep
** @retval none
*/
pub fn sleep(dur: Duration) {
let time = dur_to_ticks(dur); let time = dur_to_ticks(dur);
info!("sleep: {:?} ticks", time); info!("sleep: {:?} ticks", time);
let mut processor = S::processor(); processor().manager().sleep(current().id(), time);
let pid = processor.current_pid(); park();
processor.sleep(pid, time);
processor.schedule();
fn dur_to_ticks(dur: Duration) -> usize { fn dur_to_ticks(dur: Duration) -> usize {
return dur.as_secs() as usize * 100 + dur.subsec_nanos() as usize / 10_000_000; return dur.as_secs() as usize * 100 + dur.subsec_nanos() as usize / 10_000_000;
} }
} }
/* /// Spawns a new thread, returning a JoinHandle for it.
** @brief Spawns a new thread, returning a JoinHandle for it. ///
** @param f: F the thread to start /// `F`: Type of the function `f`
** @retval JoinHandle the JoinHandle of the new thread /// `T`: Type of the return value of `f`
*/ pub fn spawn<F, T>(f: F) -> JoinHandle<T>
pub fn spawn<F, T>(f: F) -> JoinHandle<S, T>
where where
F: Send + 'static + FnOnce() -> T, F: Send + 'static + FnOnce() -> T,
T: Send + 'static, T: Send + 'static,
{ {
info!("spawn:"); info!("spawn:");
// 注意到下面的问题:
// Processor只能从入口地址entry+参数arg创建新线程
// 而我们现在需要让它执行一个未知类型的闭包函数f
// 首先把函数本体(代码数据)置于堆空间中
let f = Box::into_raw(Box::new(f)); let f = Box::into_raw(Box::new(f));
let pid = S::processor().add(Context::new_kernel(kernel_thread_entry::<S, F, T>, f as usize));
return JoinHandle {
thread: Thread { pid, mark: PhantomData },
mark: PhantomData,
};
extern fn kernel_thread_entry<S, F, T>(f: usize) -> ! // 定义一个静态函数作为新线程的入口点
// 其参数是函数f在堆上的指针
// 这样我们就把函数f传到了一个静态函数内部
//
// 注意到它具有泛型参数因此对每一次spawn调用
// 由于F类型是独特的因此都会生成一个新的kernel_thread_entry
extern fn kernel_thread_entry<F, T>(f: usize) -> !
where where
S: ThreadSupport,
F: Send + 'static + FnOnce() -> T, F: Send + 'static + FnOnce() -> T,
T: Send + 'static, T: Send + 'static,
{ {
// 在静态函数内部:
// 根据传进来的指针恢复f
let f = unsafe { Box::from_raw(f as *mut F) }; let f = unsafe { Box::from_raw(f as *mut F) };
// 调用f并将其返回值也放在堆上
let ret = Box::new(f()); let ret = Box::new(f());
// unsafe { LocalKey::<usize>::get_map() }.clear(); // 清理本地线程存储
let mut processor = S::processor(); // unsafe { LocalKey::<usize>::get_map() }.clear();
let pid = processor.current_pid(); // 让Processor退出当前线程
processor.exit(pid, Box::into_raw(ret) as usize); // 把f返回值在堆上的指针以线程返回码的形式传递出去
processor.schedule(); let exit_code = Box::into_raw(ret) as usize;
processor().manager().exit(current().id(), exit_code);
processor().yield_now();
// 再也不会被调度回来了
unreachable!() unreachable!()
} }
}
/* // 在Processor中创建新的线程
** @brief Cooperatively gives up a timeslice to the OS scheduler. let context = new_kernel_context(kernel_thread_entry::<F, T>, f as usize);
** @param none let pid = processor().manager().add(context);
** @retval none
*/ // 接下来看看`JoinHandle::join()`的实现
pub fn yield_now() { // 了解是如何获取f返回值的
return JoinHandle {
thread: Thread { pid },
mark: PhantomData,
};
}
/// Cooperatively gives up a timeslice to the OS scheduler.
pub fn yield_now() {
info!("yield:"); info!("yield:");
let mut processor = S::processor(); processor().yield_now();
processor.set_reschedule(); }
processor.schedule();
}
/* /// Blocks unless or until the current thread's token is made available.
** @brief Blocks unless or until the current thread's token is made available. pub fn park() {
** @param none
** @retval none
*/
pub fn park() {
info!("park:"); info!("park:");
let mut processor = S::processor(); processor().manager().sleep(current().id(), 0);
let pid = processor.current_pid(); processor().yield_now();
processor.sleep_(pid);
processor.schedule();
}
} }
/// A handle to a thread. /// A handle to a thread.
pub struct Thread<S: ThreadSupport> { pub struct Thread {
pid: usize, pid: usize,
mark: PhantomData<S>,
} }
impl<S: ThreadSupport> Thread<S> { impl Thread {
/* /// Atomically makes the handle's token available if it is not already.
** @brief Atomically makes the handle's token available if it is not already.
** @param none
** @retval none
*/
pub fn unpark(&self) { pub fn unpark(&self) {
let mut processor = S::processor(); processor().manager().wakeup(self.pid);
processor.wakeup_(self.pid);
} }
/* /// Gets the thread's unique identifier.
** @brief Gets the thread's unique identifier.
** @param none
** @retval usize the the thread's unique identifier
*/
pub fn id(&self) -> usize { pub fn id(&self) -> usize {
self.pid self.pid
} }
} }
/// An owned permission to join on a thread (block on its termination). /// An owned permission to join on a thread (block on its termination).
pub struct JoinHandle<S: ThreadSupport, T> { pub struct JoinHandle<T> {
thread: Thread<S>, thread: Thread,
mark: PhantomData<T>, mark: PhantomData<T>,
} }
impl<S: ThreadSupport, T> JoinHandle<S, T> { impl<T> JoinHandle<T> {
/* /// Extracts a handle to the underlying thread.
** @brief Extracts a handle to the underlying thread. pub fn thread(&self) -> &Thread {
** @param none
** @retval the thread of the handle
*/
pub fn thread(&self) -> &Thread<S> {
&self.thread &self.thread
} }
/* /// Waits for the associated thread to finish.
** @brief Waits for the associated thread to finish.
** @param none
** @retval Result<T, ()> the result of the associated thread
*/
pub fn join(self) -> Result<T, ()> { pub fn join(self) -> Result<T, ()> {
let mut processor = S::processor(); loop {
match processor.current_wait_for(self.thread.pid) { match processor().manager().get_status(self.thread.pid) {
WaitResult::Ok(_, exit_code) => unsafe { Some(Status::Exited(exit_code)) => {
Ok(*Box::from_raw(exit_code as *mut T)) processor().manager().remove(self.thread.pid);
// Find return value on the heap from the exit code.
return Ok(unsafe { *Box::from_raw(exit_code as *mut T) });
}
None => return Err(()),
_ => {}
} }
WaitResult::NotExist => Err(()), processor().manager().wait(current().id(), self.thread.pid);
processor().yield_now();
} }
} }
} }

1
kernel/Cargo.lock generated

@ -244,6 +244,7 @@ name = "ucore-process"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
"spin 0.4.9 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
[[package]] [[package]]

@ -1,14 +1,19 @@
.section .text.entry .section .text.entry
.globl _start .globl _start
_start: _start:
lui sp, %hi(bootstacktop) add t0, a0, 1
addi sp, sp, %lo(bootstacktop) slli t0, t0, 16
lui sp, %hi(bootstack)
addi sp, sp, %lo(bootstack)
add sp, sp, t0
call rust_main call rust_main
.section .bss .section .bss
.align 12 #PGSHIFT .align 12 #PGSHIFT
.global bootstack .global bootstack
bootstack: bootstack:
.space 4096 * 16 #KSTACKSIZE .space 4096 * 16 * 8
.global bootstacktop .global bootstacktop
bootstacktop: bootstacktop:

@ -13,7 +13,8 @@ _save_context:
# save x registers except x2 (sp) # save x registers except x2 (sp)
sw x1, 1*4(sp) sw x1, 1*4(sp)
sw x3, 3*4(sp) sw x3, 3*4(sp)
sw x4, 4*4(sp) # tp(x4) = hartid. DON'T change.
# sw x4, 4*4(sp)
sw x5, 5*4(sp) sw x5, 5*4(sp)
sw x6, 6*4(sp) sw x6, 6*4(sp)
sw x7, 7*4(sp) sw x7, 7*4(sp)
@ -61,7 +62,7 @@ _save_context:
lw s1, 32*4(sp) # s1 = sstatus lw s1, 32*4(sp) # s1 = sstatus
lw s2, 33*4(sp) # s2 = sepc lw s2, 33*4(sp) # s2 = sepc
andi s0, s1, 1 << 8 andi s0, s1, 1 << 8
bnez s0, _restore_context # back to U-mode? (sstatus.SPP = 1) bnez s0, _restore_context # back to S-mode? (sstatus.SPP = 1)
_save_kernel_sp: _save_kernel_sp:
addi s0, sp, 36*4 addi s0, sp, 36*4
csrw 0x140, s0 # sscratch = kernel-sp csrw 0x140, s0 # sscratch = kernel-sp
@ -73,7 +74,7 @@ _restore_context:
# restore x registers except x2 (sp) # restore x registers except x2 (sp)
lw x1, 1*4(sp) lw x1, 1*4(sp)
lw x3, 3*4(sp) lw x3, 3*4(sp)
lw x4, 4*4(sp) # lw x4, 4*4(sp)
lw x5, 5*4(sp) lw x5, 5*4(sp)
lw x6, 6*4(sp) lw x6, 6*4(sp)
lw x7, 7*4(sp) lw x7, 7*4(sp)

@ -0,0 +1,14 @@
// Physical address available on THINPAD:
// [0x80000000, 0x80800000]
const P2_SIZE: usize = 1 << 22;
const P2_MASK: usize = 0x3ff << 22;
pub const RECURSIVE_INDEX: usize = 0x3fe;
pub const KERNEL_OFFSET: usize = 0;
pub const KERNEL_P2_INDEX: usize = 0x8000_0000 >> 22;
pub const KERNEL_HEAP_OFFSET: usize = 0x8020_0000;
pub const KERNEL_HEAP_SIZE: usize = 0x0020_0000;
pub const MEMORY_OFFSET: usize = 0x8000_0000;
pub const MEMORY_END: usize = 0x8080_0000;
pub const USER_STACK_OFFSET: usize = 0x70000000;
pub const USER_STACK_SIZE: usize = 0x10000;
pub const USER32_STACK_OFFSET: usize = USER_STACK_OFFSET;

@ -1,6 +1,6 @@
use super::super::riscv::register::*; use super::super::riscv::register::*;
#[derive(Debug, Clone)] #[derive(Clone)]
#[repr(C)] #[repr(C)]
pub struct TrapFrame { pub struct TrapFrame {
pub x: [usize; 32], // general registers pub x: [usize; 32], // general registers
@ -53,9 +53,8 @@ impl TrapFrame {
tf.x[2] = sp; tf.x[2] = sp;
tf.sepc = entry_addr; tf.sepc = entry_addr;
tf.sstatus = sstatus::read(); tf.sstatus = sstatus::read();
// Supervisor Previous Interrupt Disable ? tf.sstatus.set_spie(true);
tf.sstatus.set_spie(false); // Enable interrupt tf.sstatus.set_sie(false);
// Supervisor Previous Privilege Mode is User
tf.sstatus.set_spp(sstatus::SPP::User); tf.sstatus.set_spp(sstatus::SPP::User);
tf tf
} }
@ -65,6 +64,29 @@ impl TrapFrame {
} }
} }
use core::fmt::{Debug, Formatter, Error};
impl Debug for TrapFrame {
fn fmt(&self, f: &mut Formatter) -> Result<(), Error> {
struct Regs<'a>(&'a [usize; 32]);
impl<'a> Debug for Regs<'a> {
fn fmt(&self, f: &mut Formatter) -> Result<(), Error> {
const REG_NAME: [&str; 32] = [
"zero", "ra", "sp", "gp", "tp", "t0", "t1", "t2",
"s0", "s1", "a0", "a1", "a2", "a3", "a4", "a5", "a6", "a7",
"s2", "s3", "s4", "s5", "s6", "s7", "s8", "s9", "s10", "s11",
"t3", "t4", "t5", "t6"];
f.debug_map().entries(REG_NAME.iter().zip(self.0)).finish()
}
}
f.debug_struct("TrapFrame")
.field("regs", &Regs(&self.x))
.field("sstatus", &self.sstatus)
.field("sepc", &self.sepc)
.field("sbadaddr", &self.sbadaddr)
.field("scause", &self.scause)
.finish()
}
}
/// kernel stack contents for a new thread /// kernel stack contents for a new thread
#[derive(Debug)] #[derive(Debug)]
#[repr(C)] #[repr(C)]

@ -0,0 +1,36 @@
use consts::MAX_CPU_NUM;
use core::ptr::{read_volatile, write_volatile};
use memory::*;
static mut STARTED: [bool; MAX_CPU_NUM] = [false; MAX_CPU_NUM];
pub unsafe fn set_cpu_id(cpu_id: usize) {
asm!("mv tp, $0" : : "r"(cpu_id));
}
pub fn id() -> usize {
let cpu_id;
unsafe { asm!("mv $0, tp" : "=r"(cpu_id)); }
cpu_id
}
pub fn send_ipi(cpu_id: usize) {
super::bbl::sbi::send_ipi(1 << cpu_id);
}
pub unsafe fn has_started(cpu_id: usize) -> bool {
read_volatile(&STARTED[cpu_id])
}
pub unsafe fn start_others(hart_mask: usize) {
for cpu_id in 0..MAX_CPU_NUM {
if (hart_mask >> cpu_id) & 1 != 0 {
write_volatile(&mut STARTED[cpu_id], true);
}
}
}
pub fn halt() {
use super::riscv::asm::wfi;
unsafe { wfi() }
}

@ -20,6 +20,8 @@ pub fn init() {
sscratch::write(0); sscratch::write(0);
// Set the exception vector address // Set the exception vector address
stvec::write(__alltraps as usize, stvec::TrapMode::Direct); stvec::write(__alltraps as usize, stvec::TrapMode::Direct);
// Enable IPI
sie::set_ssoft();
} }
info!("interrupt: init end"); info!("interrupt: init end");
} }
@ -78,10 +80,13 @@ pub extern fn rust_trap(tf: &mut TrapFrame) {
Trap::Exception(E::InstructionPageFault) => page_fault(tf), Trap::Exception(E::InstructionPageFault) => page_fault(tf),
_ => ::trap::error(tf), _ => ::trap::error(tf),
} }
::trap::before_return();
trace!("Interrupt end"); trace!("Interrupt end");
} }
fn ipi() {
debug!("IPI");
super::bbl::sbi::clear_ipi();
}
/* /*
* @brief: * @brief:
* process timer interrupt * process timer interrupt

@ -7,19 +7,35 @@ pub mod timer;
pub mod paging; pub mod paging;
pub mod memory; pub mod memory;
pub mod compiler_rt; pub mod compiler_rt;
pub mod consts;
pub mod cpu;
#[no_mangle] #[no_mangle]
pub extern fn rust_main() -> ! { pub extern fn rust_main(hartid: usize, dtb: usize, hart_mask: usize) -> ! {
println!("Hello RISCV! {}", 123); unsafe { cpu::set_cpu_id(hartid); }
// First init log mod, so that we can print log info. println!("Hello RISCV! in hart {}, {}, {}", hartid, dtb, hart_mask);
if hartid != 0 {
while unsafe { !cpu::has_started(hartid) } { }
others_main();
unreachable!();
}
::logging::init(); ::logging::init();
// Init interrupt handling.
interrupt::init(); interrupt::init();
// Init physical memory management and heap
memory::init(); memory::init();
// Init timer interrupt
timer::init(); timer::init();
::process::init();
::thread::spawn(::fs::shell);
unsafe { cpu::start_others(hart_mask); }
::kmain();
}
fn others_main() -> ! {
interrupt::init();
timer::init();
::kmain(); ::kmain();
} }

@ -1,4 +1,4 @@
use consts::{KERNEL_PML4, RECURSIVE_PAGE_PML4}; use consts::{KERNEL_P2_INDEX, RECURSIVE_INDEX};
// Depends on kernel // Depends on kernel
use memory::{active_table, alloc_frame, alloc_stack, dealloc_frame}; use memory::{active_table, alloc_frame, alloc_stack, dealloc_frame};
use super::riscv::addr::*; use super::riscv::addr::*;
@ -20,14 +20,14 @@ use ucore_memory::paging::*;
pub fn setup_page_table(frame: Frame) { pub fn setup_page_table(frame: Frame) {
let p2 = unsafe { &mut *(frame.start_address().as_u32() as *mut RvPageTable) }; let p2 = unsafe { &mut *(frame.start_address().as_u32() as *mut RvPageTable) };
p2.zero(); p2.zero();
p2.set_recursive(RECURSIVE_PAGE_PML4, frame.clone()); p2.set_recursive(RECURSIVE_INDEX, frame.clone());
// Set kernel identity map // Set kernel identity map
// 0x10000000 ~ 1K area // 0x10000000 ~ 1K area
p2.map_identity(0x40, EF::VALID | EF::READABLE | EF::WRITABLE); p2.map_identity(0x40, EF::VALID | EF::READABLE | EF::WRITABLE);
// 0x80000000 ~ 8K area // 0x80000000 ~ 8K area
p2.map_identity(KERNEL_PML4, EF::VALID | EF::READABLE | EF::WRITABLE | EF::EXECUTABLE); p2.map_identity(KERNEL_P2_INDEX, EF::VALID | EF::READABLE | EF::WRITABLE | EF::EXECUTABLE);
p2.map_identity(KERNEL_PML4 + 1, EF::VALID | EF::READABLE | EF::WRITABLE | EF::EXECUTABLE); p2.map_identity(KERNEL_P2_INDEX + 1, EF::VALID | EF::READABLE | EF::WRITABLE | EF::EXECUTABLE);
use super::riscv::register::satp; use super::riscv::register::satp;
unsafe { satp::set(satp::Mode::Sv32, 0, frame); } unsafe { satp::set(satp::Mode::Sv32, 0, frame); }
@ -88,7 +88,7 @@ impl PageTable for ActivePageTable {
let page = Page::of_addr(VirtAddr::new(addr)); let page = Page::of_addr(VirtAddr::new(addr));
// ??? // ???
let _ = self.0.translate_page(page); let _ = self.0.translate_page(page);
let entry_addr = ((addr >> 10) & 0x003ffffc) | (RECURSIVE_PAGE_PML4 << 22); let entry_addr = ((addr >> 10) & ((1 << 22) - 4)) | (RECURSIVE_INDEX << 22);
unsafe { &mut *(entry_addr as *mut PageEntry) } unsafe { &mut *(entry_addr as *mut PageEntry) }
} }
@ -130,7 +130,7 @@ impl PageTable for ActivePageTable {
// define the ROOT_PAGE_TABLE, and the virtual address of it? // define the ROOT_PAGE_TABLE, and the virtual address of it?
const ROOT_PAGE_TABLE: *mut RvPageTable = const ROOT_PAGE_TABLE: *mut RvPageTable =
(((RECURSIVE_PAGE_PML4 << 10) | (RECURSIVE_PAGE_PML4 + 1)) << 12) as *mut RvPageTable; (((RECURSIVE_INDEX << 10) | (RECURSIVE_INDEX + 1)) << 12) as *mut RvPageTable;
impl ActivePageTable { impl ActivePageTable {
pub unsafe fn new() -> Self { pub unsafe fn new() -> Self {
@ -189,7 +189,6 @@ impl Entry for PageEntry {
flags.set(EF::RESERVED2, !writable); flags.set(EF::RESERVED2, !writable);
} }
fn clear_shared(&mut self) { self.as_flags().remove(EF::RESERVED1 | EF::RESERVED2); } fn clear_shared(&mut self) { self.as_flags().remove(EF::RESERVED1 | EF::RESERVED2); }
// valid property must be 0 used when swapped
fn swapped(&self) -> bool { self.0.flags().contains(EF::RESERVED1) } fn swapped(&self) -> bool { self.0.flags().contains(EF::RESERVED1) }
fn set_swapped(&mut self, value: bool) { self.as_flags().set(EF::RESERVED1, value); } fn set_swapped(&mut self, value: bool) { self.as_flags().set(EF::RESERVED1, value); }
fn user(&self) -> bool { self.0.flags().contains(EF::USER) } fn user(&self) -> bool { self.0.flags().contains(EF::USER) }
@ -235,7 +234,7 @@ impl InactivePageTable for InactivePageTable0 {
.expect("failed to allocate frame"); .expect("failed to allocate frame");
active_table().with_temporary_map(&frame, |_, table: &mut RvPageTable| { active_table().with_temporary_map(&frame, |_, table: &mut RvPageTable| {
table.zero(); table.zero();
table.set_recursive(RECURSIVE_PAGE_PML4, frame.clone()); table.set_recursive(RECURSIVE_INDEX, frame.clone());
}); });
InactivePageTable0 { p2_frame: frame } InactivePageTable0 { p2_frame: frame }
} }
@ -248,17 +247,17 @@ impl InactivePageTable for InactivePageTable0 {
*/ */
fn edit(&mut self, f: impl FnOnce(&mut Self::Active)) { fn edit(&mut self, f: impl FnOnce(&mut Self::Active)) {
active_table().with_temporary_map(&satp::read().frame(), |active_table, p2_table: &mut RvPageTable| { active_table().with_temporary_map(&satp::read().frame(), |active_table, p2_table: &mut RvPageTable| {
let backup = p2_table[RECURSIVE_PAGE_PML4].clone(); let backup = p2_table[RECURSIVE_INDEX].clone();
// overwrite recursive mapping // overwrite recursive mapping
p2_table[RECURSIVE_PAGE_PML4].set(self.p2_frame.clone(), EF::VALID); p2_table[RECURSIVE_INDEX].set(self.p2_frame.clone(), EF::VALID);
sfence_vma_all(); sfence_vma_all();
// execute f in the new context // execute f in the new context
f(active_table); f(active_table);
// restore recursive mapping to original p4 table // restore recursive mapping to original p2 table
p2_table[RECURSIVE_PAGE_PML4] = backup; p2_table[RECURSIVE_INDEX] = backup;
sfence_vma_all(); sfence_vma_all();
}); });
} }
@ -355,12 +354,12 @@ impl InactivePageTable0 {
fn map_kernel(&mut self) { fn map_kernel(&mut self) {
let table = unsafe { &mut *ROOT_PAGE_TABLE }; let table = unsafe { &mut *ROOT_PAGE_TABLE };
let e0 = table[0x40]; let e0 = table[0x40];
let e1 = table[KERNEL_PML4]; let e1 = table[KERNEL_P2_INDEX];
assert!(!e1.is_unused()); assert!(!e1.is_unused());
self.edit(|_| { self.edit(|_| {
table[0x40] = e0; table[0x40] = e0;
table[KERNEL_PML4].set(e1.frame(), EF::VALID | EF::GLOBAL); table[KERNEL_P2_INDEX].set(e1.frame(), EF::VALID | EF::GLOBAL);
}); });
} }
} }

@ -0,0 +1,97 @@
// Copy from Redox consts.rs:
// Because the memory map is so important to not be aliased, it is defined here, in one place
// The lower 256 PML4 entries are reserved for userspace
// Each PML4 entry references up to 512 GB of memory
// The top (511) PML4 is reserved for recursive mapping
// The second from the top (510) PML4 is reserved for the kernel
/// The size of a single PML4
pub const PML4_SIZE: usize = 0x0000_0080_0000_0000;
pub const PML4_MASK: usize = 0x0000_ff80_0000_0000;
/// Offset of recursive paging
pub const RECURSIVE_PAGE_OFFSET: usize = (-(PML4_SIZE as isize)) as usize;
pub const RECURSIVE_PAGE_PML4: usize = (RECURSIVE_PAGE_OFFSET & PML4_MASK) / PML4_SIZE;
/// Offset of kernel
pub const KERNEL_OFFSET: usize = RECURSIVE_PAGE_OFFSET - PML4_SIZE;
pub const KERNEL_PML4: usize = (KERNEL_OFFSET & PML4_MASK) / PML4_SIZE;
pub const KERNEL_SIZE: usize = PML4_SIZE;
/// Offset to kernel heap
pub const KERNEL_HEAP_OFFSET: usize = KERNEL_OFFSET - PML4_SIZE;
pub const KERNEL_HEAP_PML4: usize = (KERNEL_HEAP_OFFSET & PML4_MASK) / PML4_SIZE;
/// Size of kernel heap
pub const KERNEL_HEAP_SIZE: usize = 8 * 1024 * 1024; // 8 MB
pub const MEMORY_OFFSET: usize = 0;
/// Offset to kernel percpu variables
//TODO: Use 64-bit fs offset to enable this pub const KERNEL_PERCPU_OFFSET: usize = KERNEL_HEAP_OFFSET - PML4_SIZE;
pub const KERNEL_PERCPU_OFFSET: usize = 0xC000_0000;
/// Size of kernel percpu variables
pub const KERNEL_PERCPU_SIZE: usize = 64 * 1024; // 64 KB
/// Offset to user image
pub const USER_OFFSET: usize = 0;
pub const USER_PML4: usize = (USER_OFFSET & PML4_MASK) / PML4_SIZE;
/// Offset to user TCB
pub const USER_TCB_OFFSET: usize = 0xB000_0000;
/// Offset to user arguments
pub const USER_ARG_OFFSET: usize = USER_OFFSET + PML4_SIZE / 2;
/// Offset to user heap
pub const USER_HEAP_OFFSET: usize = USER_OFFSET + PML4_SIZE;
pub const USER_HEAP_PML4: usize = (USER_HEAP_OFFSET & PML4_MASK) / PML4_SIZE;
/// Offset to user grants
pub const USER_GRANT_OFFSET: usize = USER_HEAP_OFFSET + PML4_SIZE;
pub const USER_GRANT_PML4: usize = (USER_GRANT_OFFSET & PML4_MASK) / PML4_SIZE;
/// Offset to user stack
pub const USER_STACK_OFFSET: usize = USER_GRANT_OFFSET + PML4_SIZE;
pub const USER32_STACK_OFFSET: usize = 0xB000_0000;
pub const USER_STACK_PML4: usize = (USER_STACK_OFFSET & PML4_MASK) / PML4_SIZE;
/// Size of user stack
pub const USER_STACK_SIZE: usize = 1024 * 1024; // 1 MB
/// Offset to user sigstack
pub const USER_SIGSTACK_OFFSET: usize = USER_STACK_OFFSET + PML4_SIZE;
pub const USER_SIGSTACK_PML4: usize = (USER_SIGSTACK_OFFSET & PML4_MASK) / PML4_SIZE;
/// Size of user sigstack
pub const USER_SIGSTACK_SIZE: usize = 256 * 1024; // 256 KB
/// Offset to user TLS
pub const USER_TLS_OFFSET: usize = USER_SIGSTACK_OFFSET + PML4_SIZE;
pub const USER_TLS_PML4: usize = (USER_TLS_OFFSET & PML4_MASK) / PML4_SIZE;
/// Offset to user temporary image (used when cloning)
pub const USER_TMP_OFFSET: usize = USER_TLS_OFFSET + PML4_SIZE;
pub const USER_TMP_PML4: usize = (USER_TMP_OFFSET & PML4_MASK) / PML4_SIZE;
/// Offset to user temporary heap (used when cloning)
pub const USER_TMP_HEAP_OFFSET: usize = USER_TMP_OFFSET + PML4_SIZE;
pub const USER_TMP_HEAP_PML4: usize = (USER_TMP_HEAP_OFFSET & PML4_MASK) / PML4_SIZE;
/// Offset to user temporary page for grants
pub const USER_TMP_GRANT_OFFSET: usize = USER_TMP_HEAP_OFFSET + PML4_SIZE;
pub const USER_TMP_GRANT_PML4: usize = (USER_TMP_GRANT_OFFSET & PML4_MASK) / PML4_SIZE;
/// Offset to user temporary stack (used when cloning)
pub const USER_TMP_STACK_OFFSET: usize = USER_TMP_GRANT_OFFSET + PML4_SIZE;
pub const USER_TMP_STACK_PML4: usize = (USER_TMP_STACK_OFFSET & PML4_MASK) / PML4_SIZE;
/// Offset to user temporary sigstack (used when cloning)
pub const USER_TMP_SIGSTACK_OFFSET: usize = USER_TMP_STACK_OFFSET + PML4_SIZE;
pub const USER_TMP_SIGSTACK_PML4: usize = (USER_TMP_SIGSTACK_OFFSET & PML4_MASK) / PML4_SIZE;
/// Offset to user temporary tls (used when cloning)
pub const USER_TMP_TLS_OFFSET: usize = USER_TMP_SIGSTACK_OFFSET + PML4_SIZE;
pub const USER_TMP_TLS_PML4: usize = (USER_TMP_TLS_OFFSET & PML4_MASK) / PML4_SIZE;
/// Offset for usage in other temporary pages
pub const USER_TMP_MISC_OFFSET: usize = USER_TMP_TLS_OFFSET + PML4_SIZE;
pub const USER_TMP_MISC_PML4: usize = (USER_TMP_MISC_OFFSET & PML4_MASK) / PML4_SIZE;

@ -1,3 +1,6 @@
use super::apic::{LocalApic, XApic};
use super::raw_cpuid::CpuId;
/// Exit qemu /// Exit qemu
/// See: https://wiki.osdev.org/Shutdown /// See: https://wiki.osdev.org/Shutdown
/// Must run qemu with `-device isa-debug-exit` /// Must run qemu with `-device isa-debug-exit`
@ -8,3 +11,22 @@ pub unsafe fn exit_in_qemu(error_code: u8) -> ! {
Port::new(0x501).write((error_code - 1) / 2); Port::new(0x501).write((error_code - 1) / 2);
unreachable!() unreachable!()
} }
pub fn id() -> usize {
CpuId::new().get_feature_info().unwrap().initial_local_apic_id() as usize
}
pub fn send_ipi(cpu_id: usize) {
let mut lapic = unsafe { XApic::new(0xffffff00_fee00000) };
unsafe { lapic.send_ipi(cpu_id as u8, 0x30); } // TODO: Find a IPI trap num
}
pub fn init() {
let mut lapic = unsafe { XApic::new(0xffffff00_fee00000) };
lapic.cpu_init();
}
pub fn halt() {
use x86_64::instructions::hlt;
hlt();
}

@ -5,267 +5,120 @@
use spin::Mutex; use spin::Mutex;
lazy_static! { lazy_static! {
pub static ref DISK0: LockedIde = LockedIde(Mutex::new(DmaController::new(0))); pub static ref DISK0: LockedIde = LockedIde(Mutex::new(IDE::new(0)));
pub static ref DISK1: LockedIde = LockedIde(Mutex::new(DmaController::new(1))); pub static ref DISK1: LockedIde = LockedIde(Mutex::new(IDE::new(1)));
} }
pub const BLOCK_SIZE: usize = 512; pub const BLOCK_SIZE: usize = 512;
pub struct LockedIde(pub Mutex<DmaController>); pub struct LockedIde(pub Mutex<IDE>);
pub struct DmaController { pub struct IDE {
num: u8, num: u8,
/// I/O Base
base: u16,
/// Control Base
ctrl: u16,
} }
impl DmaController impl IDE {
{ pub fn new(num: u8) -> Self {
/// Read ATA DMA. Block size = 512 bytes. let ide = match num {
pub fn read(&self, blockidx: u64, count: usize, dst: &mut [u32]) -> Result<usize, ()> { 0 => IDE { num: 0, base: 0x1f0, ctrl: 0x3f4 },
assert_eq!(dst.len(), count * SECTOR_SIZE); 1 => IDE { num: 1, base: 0x1f0, ctrl: 0x3f4 },
let dst = if count > MAX_DMA_SECTORS { &mut dst[..MAX_DMA_SECTORS * SECTOR_SIZE] } else { dst }; 2 => IDE { num: 2, base: 0x170, ctrl: 0x374 },
//self.do_dma(blockidx, DMABuffer::new_mut(dst, 32), disk, false); 3 => IDE { num: 3, base: 0x170, ctrl: 0x374 },
self.ide_read_secs(self.num, blockidx, dst, count as u8) _ => panic!("ide number should be 0,1,2,3"),
} };
/// Write ATA DMA. Block size = 512 bytes. ide.init();
pub fn write(&self, blockidx: u64, count: usize, dst: &[u32]) -> Result<usize, ()> {
assert_eq!(dst.len(), count * SECTOR_SIZE);
let dst = if count > MAX_DMA_SECTORS { &dst[..MAX_DMA_SECTORS * SECTOR_SIZE] } else { dst };
//println!("ide_write_secs: disk={},blockidx={},count={}",disk,blockidx,count);
self.ide_write_secs(self.num, blockidx, dst, count as u8)
}
/// Create structure and init
fn new(num: u8) -> Self {
assert!(num < MAX_IDE as u8);
let ide = DmaController { num };
ide.ide_init();
ide ide
} }
fn ide_wait_ready(&self, iobase: u16, check_error: usize) -> usize { /// Read ATA DMA. Block size = 512 bytes.
pub fn read(&self, sector: u64, count: usize, data: &mut [u32]) -> Result<(), ()> {
assert_eq!(data.len(), count * SECTOR_SIZE);
self.wait();
unsafe { unsafe {
let mut r = port::inb(iobase + ISA_STATUS); self.select(sector, count as u8);
//println!("iobase:{} ready:{}",iobase,r); port::outb(self.base + ISA_COMMAND, IDE_CMD_READ);
while (r & IDE_BSY) > 0 { for i in 0..count {
r = port::inb(iobase + ISA_STATUS); let ptr = &data[(i as usize) * SECTOR_SIZE];
//println!("busy"); if self.wait_error() {
return Err(());
} }
/* nothing */ asm!("rep insl" :: "{dx}"(self.base), "{rdi}"(ptr), "{cx}"(SECTOR_SIZE) : "rdi" : "volatile");
if check_error == 1 && (r & (IDE_DF | IDE_ERR)) != 0 {
return 1;
} }
} }
return 0; Ok(())
} }
/// Write ATA DMA. Block size = 512 bytes.
fn ide_init(&self) { pub fn write(&self, sector: u64, count: usize, data: &[u32]) -> Result<(), ()> {
//static_assert((SECTSIZE % 4) == 0); assert_eq!(data.len(), count * SECTOR_SIZE);
let ideno = self.num; self.wait();
//println!("ideno:{}",ideno);
/* assume that no device here */
//ide_devices[ideno].valid = 0;
//let iobase = IO_BASE(ideno);
let iobase = CHANNELS[if ideno > 2 { 1 } else { 0 }].0;
/* wait device ready */
self.ide_wait_ready(iobase, 0);
//println!("ide_wait_ready");
unsafe { unsafe {
/* step1: select drive */ self.select(sector, count as u8);
//println!("outb"); port::outb(self.base + ISA_COMMAND, IDE_CMD_WRITE);
port::outb(iobase + ISA_SDH, (0xE0 | ((ideno & 1) << 4)) as u8); for i in 0..count {
self.ide_wait_ready(iobase, 0); let ptr = &data[(i as usize) * SECTOR_SIZE];
if self.wait_error() {
/* step2: send ATA identify command */ return Err(());
//println!("outb");
port::outb(iobase + ISA_COMMAND, IDE_CMD_IDENTIFY);
self.ide_wait_ready(iobase, 0);
/* step3: polling */
//println!("inb");
if port::inb(iobase + ISA_STATUS) == 0 || self.ide_wait_ready(iobase, 1) != 0 {
return;
}
//println!("insl");
let mut buffer: [u32; 128] = [0; 128];
for i in 0..buffer.len() {
buffer[i] = i as u32;
if i == 1 {
//println!("{:#x}",&buffer[i] as *const u32 as usize - ::consts::KERNEL_OFFSET)
} }
asm!("rep outsl" :: "{dx}"(self.base), "{rsi}"(ptr), "{cx}"(SECTOR_SIZE) : "rsi" : "volatile");
} }
//println!("insl {:#x}",&buffer as *const u32 as usize - ::consts::KERNEL_OFFSET);
//println!("insl {:#x}",buffer.as_ptr() as usize - ::consts::KERNEL_OFFSET);
//port::insl(iobase + ISA_DATA, &mut buffer);
let port = iobase + ISA_DATA;
//let buf=&mut buffer;
for i in 0..buffer.len() {
asm!("insl %dx, (%rdi)"
:: "{dx}"(port), "{rdi}"(&buffer[i])
: "rdi" : "volatile");
}
//println!("insl");
for i in 0..4 {
info!("ide init: {}", buffer[i]);
} }
Ok(())
} }
/* device is ok */
//ide_devices[ideno].valid = 1;
/* read identification space of the device */
/*let buffer[128];
insl(iobase + ISA_DATA, buffer, sizeof(buffer) / sizeof(unsigned int));
unsigned char *ident = (unsigned char *)buffer; fn wait(&self) {
unsigned int sectors; while unsafe { port::inb(self.base + ISA_STATUS) } & IDE_BUSY != 0 {}
unsigned int cmdsets = *(unsigned int *)(ident + IDE_IDENT_CMDSETS);
/* device use 48-bits or 28-bits addressing */
if (cmdsets & (1 << 26)) {
sectors = *(unsigned int *)(ident + IDE_IDENT_MAX_LBA_EXT);
} }
else {
sectors = *(unsigned int *)(ident + IDE_IDENT_MAX_LBA);
}
ide_devices[ideno].sets = cmdsets;
ide_devices[ideno].size = sectors;
/* check if supports LBA */
assert((*(unsigned short *)(ident + IDE_IDENT_CAPABILITIES) & 0x200) != 0);
unsigned char *model = ide_devices[ideno].model, *data = ident + IDE_IDENT_MODEL; fn wait_error(&self) -> bool {
unsigned int i, length = 40; self.wait();
for (i = 0; i < length; i += 2) { let status = unsafe { port::inb(self.base + ISA_STATUS) };
model[i] = data[i + 1], model[i + 1] = data[i]; status & (IDE_DF | IDE_ERR) != 0
} }
do {
model[i] = '\0';
} while (i -- > 0 && model[i] == ' ');
cprintf("ide %d: %10u(sectors), '%s'.\n", ideno, ide_devices[ideno].size, ide_devices[ideno].model);*/ fn init(&self) {
self.wait();
unsafe {
// step1: select drive
port::outb(self.base + ISA_SDH, (0xE0 | ((self.num & 1) << 4)) as u8);
self.wait();
// enable ide interrupt // step2: send ATA identify command
//pic_enable(IRQ_IDE1); port::outb(self.base + ISA_COMMAND, IDE_CMD_IDENTIFY);
//pic_enable(IRQ_IDE2); self.wait();
info!("ide {} init end", self.num); // step3: polling
if port::inb(self.base + ISA_STATUS) == 0 || self.wait_error() {
return;
} }
fn ide_read_secs<'a>(&'a self, ideno: u8, secno: u64, dst: &'a mut [u32], nsecs: u8) -> Result<usize, ()> {
//assert(nsecs <= MAX_NSECS && VALID_IDE(ideno));
//assert(secno < MAX_DISK_NSECS && secno + nsecs <= MAX_DISK_NSECS);
let iobase = CHANNELS[if ideno > 2 { 1 } else { 0 }].0;
let ioctrl = CHANNELS[if ideno > 2 { 1 } else { 0 }].1;
//ide_wait_ready(iobase, 0);
self.ide_wait_ready(iobase, 0); // ???
let mut data = [0; SECTOR_SIZE];
let ret = 0; asm!("rep insl" :: "{dx}"(self.base + ISA_DATA), "{rdi}"(data.as_ptr()), "{cx}"(SECTOR_SIZE) : "rdi" : "volatile");
// generate interrupt
unsafe {
port::outb(ioctrl + ISA_CTRL, 0);
port::outb(iobase + ISA_SECCNT, nsecs);
port::outb(iobase + ISA_SECTOR, (secno & 0xFF) as u8);
port::outb(iobase + ISA_CYL_LO, ((secno >> 8) & 0xFF) as u8);
port::outb(iobase + ISA_CYL_HI, ((secno >> 16) & 0xFF) as u8);
port::outb(iobase + ISA_SDH, 0xE0 | ((ideno & 1) << 4) | (((secno >> 24) & 0xF) as u8));
//port::outb(iobase + ISA_SDH, (0xE0 | ((ideno & 1) << 4)) as u8);
//self.ide_wait_ready(iobase, 0);
port::outb(iobase + ISA_COMMAND, IDE_CMD_READ);
//self.ide_wait_ready(iobase, 0);
// if port::inb(iobase + ISA_STATUS) == 0 || self.ide_wait_ready(iobase, 1) != 0 {
// println!("error?");
// }
for i in 0..nsecs {
//dst = dst + SECTSIZE;
let tmp = &mut dst[(i as usize) * SECTOR_SIZE..((i + 1) as usize) * SECTOR_SIZE];
if self.ide_wait_ready(iobase, 1) != 0 {
println!("wait ready error");
}
//self.ide_wait_ready(iobase, 1);
//port::insl(iobase, tmp);
let port = iobase;
//let buf=&mut buffer;
for i in 0..tmp.len() {
asm!("insl %dx, (%rdi)"
:: "{dx}"(port), "{rdi}"(&tmp[i])
: "rdi" : "volatile");
}
//println!("read :{}",i);
}
} }
Ok(ret)
} }
fn ide_write_secs<'a>(&'a self, ideno: u8, secno: u64, src: &'a [u32], nsecs: u8) -> Result<usize, ()> { fn select(&self, sector: u64, count: u8) {
//assert(nsecs <= MAX_NSECS && VALID_IDE(ideno)); assert_ne!(count, 0);
//assert(secno < MAX_DISK_NSECS && secno + nsecs <= MAX_DISK_NSECS); self.wait();
let iobase = CHANNELS[if ideno > 2 { 1 } else { 0 }].0;
let ioctrl = CHANNELS[if ideno > 2 { 1 } else { 0 }].1;
//ide_wait_ready(iobase, 0);
self.ide_wait_ready(iobase, 0);
let ret = 0;
// generate interrupt
unsafe { unsafe {
port::outb(ioctrl + ISA_CTRL, 0); // generate interrupt
port::outb(iobase + ISA_SECCNT, nsecs); port::outb(self.ctrl + ISA_CTRL, 0);
port::outb(iobase + ISA_SECTOR, (secno & 0xFF) as u8); port::outb(self.base + ISA_SECCNT, count);
port::outb(iobase + ISA_CYL_LO, ((secno >> 8) & 0xFF) as u8); port::outb(self.base + ISA_SECTOR, (sector & 0xFF) as u8);
port::outb(iobase + ISA_CYL_HI, ((secno >> 16) & 0xFF) as u8); port::outb(self.base + ISA_CYL_LO, ((sector >> 8) & 0xFF) as u8);
port::outb(iobase + ISA_SDH, 0xE0 | ((ideno & 1) << 4) | (((secno >> 24) & 0xF) as u8)); port::outb(self.base + ISA_CYL_HI, ((sector >> 16) & 0xFF) as u8);
port::outb(iobase + ISA_COMMAND, IDE_CMD_WRITE); port::outb(self.base + ISA_SDH, 0xE0 | ((self.num & 1) << 4) | (((sector >> 24) & 0xF) as u8));
//println!("{}",nsecs);
for i in 0..nsecs {
//dst = dst + SECTSIZE;
// if ((ret = ide_wait_ready(iobase, 1)) != 0) {
// goto out;
// }
//port::insb(iobase, dst);
//println!("i={}",i);
let tmp = &src[(i as usize) * SECTOR_SIZE..((i + 1) as usize) * SECTOR_SIZE];
if self.ide_wait_ready(iobase, 1) != 0 {
println!("wait ready error");
}
//println!("write {}:{}",i,src[i as usize]);
//println!("outsl");
//port::outsl(iobase, tmp);
let port = iobase;
//let buf=&mut buffer;
for i in 0..tmp.len() {
asm!("outsl (%rsi), %dx"
:: "{dx}"(port), "{rsi}"(&tmp[i])
: "rsi");
}
//println!("write :{}",i);
// for i in 0..4 {
// println!("{}",src[i as usize]);
// }
//port::outb(iobase, src[i as usize]);
}
} }
Ok(ret)
} }
} }
const SECTOR_SIZE: usize = 128; const SECTOR_SIZE: usize = 128;
//const MAX_DMA_SECTORS: usize = 0x2_0000 / SECTOR_SIZE; // Limited by sector count (and PRDT entries)
const MAX_DMA_SECTORS: usize = 0x1F_F000 / SECTOR_SIZE; // Limited by sector count (and PRDT entries) const MAX_DMA_SECTORS: usize = 0x1F_F000 / SECTOR_SIZE; // Limited by sector count (and PRDT entries)
// 512 PDRT entries, assume maximum fragmentation = 512 * 4K max = 2^21 = 2MB per transfer // 512 PDRT entries, assume maximum fragmentation = 512 * 4K max = 2^21 = 2MB per transfer
const HDD_PIO_W28: u8 = 0x30;
const HDD_PIO_R28: u8 = 0x20;
const HDD_PIO_W48: u8 = 0x34;
const HDD_PIO_R48: u8 = 0x24;
const HDD_IDENTIFY: u8 = 0xEC;
const HDD_DMA_R28: u8 = 0xC8;
const HDD_DMA_W28: u8 = 0xCA;
const HDD_DMA_R48: u8 = 0x25;
const HDD_DMA_W48: u8 = 0x35;
const ISA_DATA: u16 = 0x00; const ISA_DATA: u16 = 0x00;
const ISA_ERROR: u16 = 0x01; const ISA_ERROR: u16 = 0x01;
const ISA_PRECOMP: u16 = 0x01; const ISA_PRECOMP: u16 = 0x01;
@ -278,7 +131,7 @@ const ISA_SDH: u16 = 0x06;
const ISA_COMMAND: u16 = 0x07; const ISA_COMMAND: u16 = 0x07;
const ISA_STATUS: u16 = 0x07; const ISA_STATUS: u16 = 0x07;
const IDE_BSY: u8 = 0x80; const IDE_BUSY: u8 = 0x80;
const IDE_DRDY: u8 = 0x40; const IDE_DRDY: u8 = 0x40;
const IDE_DF: u8 = 0x20; const IDE_DF: u8 = 0x20;
const IDE_DRQ: u8 = 0x08; const IDE_DRQ: u8 = 0x08;
@ -288,33 +141,7 @@ const IDE_CMD_READ: u8 = 0x20;
const IDE_CMD_WRITE: u8 = 0x30; const IDE_CMD_WRITE: u8 = 0x30;
const IDE_CMD_IDENTIFY: u8 = 0xEC; const IDE_CMD_IDENTIFY: u8 = 0xEC;
const IDE_IDENT_SECTORS: usize = 20;
const IDE_IDENT_MODEL: usize = 54;
const IDE_IDENT_CAPABILITIES: usize = 98;
const IDE_IDENT_CMDSETS: usize = 164;
const IDE_IDENT_MAX_LBA: usize = 120;
const IDE_IDENT_MAX_LBA_EXT: usize = 200;
const IO_BASE0: u16 = 0x1F0;
const IO_BASE1: u16 = 0x170;
const IO_CTRL0: u16 = 0x3F4;
const IO_CTRL1: u16 = 0x374;
const MAX_IDE: usize = 4;
const MAX_NSECS: usize = 128; const MAX_NSECS: usize = 128;
//const MAX_DISK_NSECS 0x10000000U;
//const VALID_IDE(ideno) (((ideno) >= 0) && ((ideno) < MAX_IDE) && (ide_devices[ideno].valid))
struct Channels {
base: u16,
// I/O Base
ctrl: u16, // Control Base
}
const CHANNELS: [(u16, u16); 2] = [(IO_BASE0, IO_CTRL0), (IO_BASE1, IO_CTRL1)];
//const IO_BASE(ideno) (CHANNELS[(ideno) >> 1].base)
//const IO_CTRL(ideno) (CHANNELS[(ideno) >> 1].ctrl)
mod port { mod port {
use x86_64::instructions::port::Port; use x86_64::instructions::port::Port;

@ -1,7 +1,6 @@
extern crate syscall as redox_syscall; extern crate syscall as redox_syscall;
pub mod vga; pub mod vga;
pub mod apic;
pub mod serial; pub mod serial;
pub mod pic; pub mod pic;
pub mod keyboard; pub mod keyboard;
@ -11,13 +10,12 @@ pub mod ide;
pub fn init() { pub fn init() {
assert_has_not_been_called!(); assert_has_not_been_called!();
if cfg!(feature = "use_apic") { // Use IOAPIC instead of PIC
pic::disable(); pic::disable();
apic::init();
} else { // Use APIC Timer instead of PIT
pic::init(); // pit::init();
}
pit::init();
serial::init(); serial::init();
keyboard::init(); keyboard::init();
} }

@ -1,5 +1,4 @@
use alloc::boxed::Box; use alloc::boxed::Box;
use arch::driver::apic::lapic_id;
use consts::MAX_CPU_NUM; use consts::MAX_CPU_NUM;
use core::fmt; use core::fmt;
use core::fmt::Debug; use core::fmt::Debug;
@ -50,7 +49,7 @@ pub fn init() {
load_tss(TSS_SELECTOR); load_tss(TSS_SELECTOR);
} }
CPUS[lapic_id() as usize].call_once(|| CPUS[super::cpu::id() as usize].call_once(||
Mutex::new(Cpu { gdt, tss: unsafe { &mut *tss } })); Mutex::new(Cpu { gdt, tss: unsafe { &mut *tss } }));
} }
@ -67,7 +66,7 @@ pub struct Cpu {
impl Cpu { impl Cpu {
pub fn current() -> MutexGuard<'static, Cpu> { pub fn current() -> MutexGuard<'static, Cpu> {
CPUS[lapic_id() as usize].try().unwrap().lock() CPUS[super::cpu::id()].try().unwrap().lock()
} }
/// 设置从Ring3跳到Ring0时自动切换栈的地址 /// 设置从Ring3跳到Ring0时自动切换栈的地址

@ -72,7 +72,7 @@ global_asm!(include_str!("vector.asm"));
#[no_mangle] #[no_mangle]
pub extern fn rust_trap(tf: &mut TrapFrame) { pub extern fn rust_trap(tf: &mut TrapFrame) {
trace!("Interrupt: {:#x}", tf.trap_num); trace!("Interrupt: {:#x} @ CPU{}", tf.trap_num, super::super::cpu::id());
// Dispatch // Dispatch
match tf.trap_num as u8 { match tf.trap_num as u8 {
T_BRKPT => breakpoint(), T_BRKPT => breakpoint(),
@ -88,11 +88,7 @@ pub extern fn rust_trap(tf: &mut TrapFrame) {
IRQ_IDE => ide(), IRQ_IDE => ide(),
_ => panic!("Invalid IRQ number: {}", irq), _ => panic!("Invalid IRQ number: {}", irq),
} }
#[cfg(feature = "use_apic")] super::ack(irq);
use arch::driver::apic::ack;
#[cfg(not(feature = "use_apic"))]
use arch::driver::pic::ack;
ack(irq);
} }
T_SWITCH_TOK => to_kernel(tf), T_SWITCH_TOK => to_kernel(tf),
T_SWITCH_TOU => to_user(tf), T_SWITCH_TOU => to_user(tf),
@ -101,7 +97,6 @@ pub extern fn rust_trap(tf: &mut TrapFrame) {
T_DIVIDE | T_GPFLT | T_ILLOP => error(tf), T_DIVIDE | T_GPFLT | T_ILLOP => error(tf),
_ => panic!("Unhandled interrupt {:x}", tf.trap_num), _ => panic!("Unhandled interrupt {:x}", tf.trap_num),
} }
::trap::before_return();
} }
fn breakpoint() { fn breakpoint() {

@ -1,5 +1,4 @@
use x86_64; use x86_64;
use arch::driver::{apic::IOAPIC, pic};
pub mod consts; pub mod consts;
mod handler; mod handler;
@ -7,6 +6,8 @@ mod trapframe;
pub use self::trapframe::*; pub use self::trapframe::*;
pub use self::handler::*; pub use self::handler::*;
use super::apic::*;
use consts::KERNEL_OFFSET;
#[inline(always)] #[inline(always)]
pub unsafe fn enable() { pub unsafe fn enable() {
@ -39,9 +40,12 @@ pub fn no_interrupt(f: impl FnOnce()) {
#[inline(always)] #[inline(always)]
pub fn enable_irq(irq: u8) { pub fn enable_irq(irq: u8) {
if cfg!(feature = "use_apic") { let mut ioapic = unsafe { IoApic::new(KERNEL_OFFSET + IOAPIC_ADDR as usize) };
IOAPIC.lock().enable(irq, 0); ioapic.enable(irq, 0);
} else { }
pic::enable_irq(irq);
} #[inline(always)]
pub fn ack(irq: u8) {
let mut lapic = unsafe { XApic::new(KERNEL_OFFSET + LAPIC_ADDR) };
lapic.eoi();
} }

@ -1,14 +1,15 @@
use bit_allocator::BitAlloc; use bit_allocator::BitAlloc;
use consts::KERNEL_OFFSET; use consts::KERNEL_OFFSET;
// Depends on kernel // Depends on kernel
use memory::{FRAME_ALLOCATOR, init_heap}; use memory::{FRAME_ALLOCATOR, init_heap, active_table};
use super::{BootInfo, MemoryRegionType}; use super::{BootInfo, MemoryRegionType};
use ucore_memory::PAGE_SIZE; use ucore_memory::PAGE_SIZE;
use ucore_memory::paging::PageTable; use ucore_memory::paging::*;
pub fn init(boot_info: &BootInfo) { pub fn init(boot_info: &BootInfo) {
assert_has_not_been_called!("memory::init must be called only once"); assert_has_not_been_called!("memory::init must be called only once");
init_frame_allocator(boot_info); init_frame_allocator(boot_info);
init_device_vm_map();
init_heap(); init_heap();
info!("memory: init end"); info!("memory: init end");
} }
@ -22,3 +23,11 @@ fn init_frame_allocator(boot_info: &BootInfo) {
} }
} }
} }
fn init_device_vm_map() {
let mut page_table = active_table();
// IOAPIC
page_table.map(KERNEL_OFFSET + 0xfec00000, 0xfec00000).update();
// LocalAPIC
page_table.map(KERNEL_OFFSET + 0xfee00000, 0xfee00000).update();
}

@ -1,6 +1,10 @@
extern crate bootloader; extern crate bootloader;
extern crate apic;
extern crate raw_cpuid;
use self::bootloader::bootinfo::{BootInfo, MemoryRegionType}; use self::bootloader::bootinfo::{BootInfo, MemoryRegionType};
use core::sync::atomic::*;
use consts::KERNEL_OFFSET;
pub mod driver; pub mod driver;
pub mod cpu; pub mod cpu;
@ -8,14 +12,23 @@ pub mod interrupt;
pub mod paging; pub mod paging;
pub mod gdt; pub mod gdt;
pub mod idt; pub mod idt;
// TODO: Move multi-core init to bootloader
//pub mod smp;
pub mod memory; pub mod memory;
pub mod io; pub mod io;
pub mod consts;
static AP_CAN_INIT: AtomicBool = ATOMIC_BOOL_INIT;
/// The entry point of kernel /// The entry point of kernel
#[no_mangle] // don't mangle the name of this function #[no_mangle] // don't mangle the name of this function
pub extern "C" fn _start(boot_info: &'static BootInfo) -> ! { pub extern "C" fn _start(boot_info: &'static BootInfo) -> ! {
let cpu_id = cpu::id();
println!("Hello world! from CPU {}!", cpu_id);
if cpu_id != 0 {
while !AP_CAN_INIT.load(Ordering::Relaxed) {}
other_start();
}
// First init log mod, so that we can print log info. // First init log mod, so that we can print log info.
::logging::init(); ::logging::init();
info!("Hello world!"); info!("Hello world!");
@ -30,20 +43,22 @@ pub extern "C" fn _start(boot_info: &'static BootInfo) -> ! {
// Now heap is available // Now heap is available
gdt::init(); gdt::init();
cpu::init();
driver::init(); driver::init();
::process::init();
::thread::spawn(::fs::shell);
AP_CAN_INIT.store(true, Ordering::Relaxed);
::kmain(); ::kmain();
} }
/// The entry point for another processors /// The entry point for other processors
#[no_mangle] fn other_start() -> ! {
pub extern "C" fn other_main() -> ! {
idt::init(); idt::init();
gdt::init(); gdt::init();
driver::apic::other_init(); cpu::init();
let cpu_id = driver::apic::lapic_id(); ::kmain();
// let ms = unsafe { smp::notify_started(cpu_id) };
println!("Hello world! from CPU {}!", cpu_id);
// unsafe{ let a = *(0xdeadbeaf as *const u8); } // Page fault
loop {}
} }

@ -1,6 +1,6 @@
use bit_allocator::{BitAlloc, BitAlloc64K}; use bit_allocator::{BitAlloc, BitAlloc64K};
// Depends on kernel // Depends on kernel
use memory::{active_table, alloc_frame, alloc_stack, dealloc_frame}; use memory::{active_table, alloc_frame, dealloc_frame};
use spin::{Mutex, MutexGuard}; use spin::{Mutex, MutexGuard};
use ucore_memory::cow::CowExt; use ucore_memory::cow::CowExt;
use ucore_memory::memory_set::*; use ucore_memory::memory_set::*;
@ -49,7 +49,7 @@ impl PageTable for ActivePageTable {
let flags = EF::PRESENT | EF::WRITABLE | EF::NO_EXECUTE; let flags = EF::PRESENT | EF::WRITABLE | EF::NO_EXECUTE;
self.0.map_to(Page::of_addr(addr), Frame::of_addr(target), flags, &mut FrameAllocatorForX86) self.0.map_to(Page::of_addr(addr), Frame::of_addr(target), flags, &mut FrameAllocatorForX86)
.unwrap().flush(); .unwrap().flush();
self.get_entry(addr) unsafe { &mut *(get_entry_ptr(addr, 1)) }
} }
fn unmap(&mut self, addr: usize) { fn unmap(&mut self, addr: usize) {
@ -57,9 +57,12 @@ impl PageTable for ActivePageTable {
flush.flush(); flush.flush();
} }
fn get_entry(&mut self, addr: usize) -> &mut PageEntry { fn get_entry(&mut self, addr: usize) -> Option<&mut PageEntry> {
let entry_addr = ((addr >> 9) & 0o777_777_777_7770) | 0xffffff80_00000000; for level in 0..3 {
unsafe { &mut *(entry_addr as *mut PageEntry) } let entry = get_entry_ptr(addr, 4 - level);
if unsafe { !(*entry).present() } { return None; }
}
unsafe { Some(&mut *(get_entry_ptr(addr, 1))) }
} }
fn get_page_slice_mut<'a, 'b>(&'a mut self, addr: usize) -> &'b mut [u8] { fn get_page_slice_mut<'a, 'b>(&'a mut self, addr: usize) -> &'b mut [u8] {
@ -140,6 +143,12 @@ impl Entry for PageEntry {
fn set_execute(&mut self, value: bool) { self.as_flags().set(EF::NO_EXECUTE, !value); } fn set_execute(&mut self, value: bool) { self.as_flags().set(EF::NO_EXECUTE, !value); }
} }
fn get_entry_ptr(addr: usize, level: u8) -> *mut PageEntry {
debug_assert!(level <= 4);
let entry_addr = ((addr >> (level * 9)) & !0x7) | !((1 << (48 - level * 9)) - 1);
entry_addr as *mut PageEntry
}
impl PageEntry { impl PageEntry {
fn as_flags(&mut self) -> &mut EF { fn as_flags(&mut self) -> &mut EF {
unsafe { &mut *(self as *mut _ as *mut EF) } unsafe { &mut *(self as *mut _ as *mut EF) }
@ -222,10 +231,6 @@ impl InactivePageTable for InactivePageTable0 {
fn dealloc_frame(target: usize) { fn dealloc_frame(target: usize) {
dealloc_frame(target) dealloc_frame(target)
} }
fn alloc_stack() -> Stack {
alloc_stack()
}
} }
impl InactivePageTable0 { impl InactivePageTable0 {

@ -1,133 +1,6 @@
#![allow(dead_code)] #![allow(dead_code)]
#[cfg(target_arch = "riscv32")] pub use arch::consts::*;
pub use self::riscv::*;
#[cfg(target_arch = "x86_64")]
pub use self::x86_64::*;
pub const MAX_CPU_NUM: usize = 8; pub const MAX_CPU_NUM: usize = 8;
pub const MAX_PROCESS_NUM: usize = 48; pub const MAX_PROCESS_NUM: usize = 48;
// Memory address for riscv32
#[cfg(target_arch = "riscv32")]
mod riscv {
// Physical address available on THINPAD:
// [0x80000000, 0x80800000]
const P2_SIZE: usize = 1 << 22;
const P2_MASK: usize = 0x3ff << 22;
// RECURSIVE_PAGE_PML4 indicate the index of the self-maping entry in root pagetable
pub const RECURSIVE_PAGE_PML4: usize = 0x3fe;
// KERNEL_OFFSET indicate (virtual kernel address - physical kernel address) ???
pub const KERNEL_OFFSET: usize = 0;
// KERNEL_PML4 indicate the index of the kernel entry in root pagetable
pub const KERNEL_PML4: usize = 0x8000_0000 >> 22;
pub const KERNEL_HEAP_OFFSET: usize = 0x8020_0000;
pub const KERNEL_HEAP_SIZE: usize = 0x0020_0000;
pub const MEMORY_OFFSET: usize = 0x8000_0000;
pub const MEMORY_END: usize = 0x8080_0000;
pub const USER_STACK_OFFSET: usize = 0x70000000;
pub const USER_STACK_SIZE: usize = 0x10000;
pub const USER32_STACK_OFFSET: usize = USER_STACK_OFFSET;
}
// Memory address for x86_64
#[cfg(target_arch = "x86_64")]
mod x86_64 {
// Copy from Redox consts.rs:
// Because the memory map is so important to not be aliased, it is defined here, in one place
// The lower 256 PML4 entries are reserved for userspace
// Each PML4 entry references up to 512 GB of memory
// The top (511) PML4 is reserved for recursive mapping
// The second from the top (510) PML4 is reserved for the kernel
/// The size of a single PML4
pub const PML4_SIZE: usize = 0x0000_0080_0000_0000;
pub const PML4_MASK: usize = 0x0000_ff80_0000_0000;
/// Offset of recursive paging
pub const RECURSIVE_PAGE_OFFSET: usize = (-(PML4_SIZE as isize)) as usize;
pub const RECURSIVE_PAGE_PML4: usize = (RECURSIVE_PAGE_OFFSET & PML4_MASK) / PML4_SIZE;
/// Offset of kernel
pub const KERNEL_OFFSET: usize = RECURSIVE_PAGE_OFFSET - PML4_SIZE;
pub const KERNEL_PML4: usize = (KERNEL_OFFSET & PML4_MASK) / PML4_SIZE;
pub const KERNEL_SIZE: usize = PML4_SIZE;
/// Offset to kernel heap
pub const KERNEL_HEAP_OFFSET: usize = KERNEL_OFFSET - PML4_SIZE;
pub const KERNEL_HEAP_PML4: usize = (KERNEL_HEAP_OFFSET & PML4_MASK) / PML4_SIZE;
/// Size of kernel heap
pub const KERNEL_HEAP_SIZE: usize = 8 * 1024 * 1024; // 8 MB
pub const MEMORY_OFFSET: usize = 0;
/// Offset to kernel percpu variables
//TODO: Use 64-bit fs offset to enable this pub const KERNEL_PERCPU_OFFSET: usize = KERNEL_HEAP_OFFSET - PML4_SIZE;
pub const KERNEL_PERCPU_OFFSET: usize = 0xC000_0000;
/// Size of kernel percpu variables
pub const KERNEL_PERCPU_SIZE: usize = 64 * 1024; // 64 KB
/// Offset to user image
pub const USER_OFFSET: usize = 0;
pub const USER_PML4: usize = (USER_OFFSET & PML4_MASK) / PML4_SIZE;
/// Offset to user TCB
pub const USER_TCB_OFFSET: usize = 0xB000_0000;
/// Offset to user arguments
pub const USER_ARG_OFFSET: usize = USER_OFFSET + PML4_SIZE / 2;
/// Offset to user heap
pub const USER_HEAP_OFFSET: usize = USER_OFFSET + PML4_SIZE;
pub const USER_HEAP_PML4: usize = (USER_HEAP_OFFSET & PML4_MASK) / PML4_SIZE;
/// Offset to user grants
pub const USER_GRANT_OFFSET: usize = USER_HEAP_OFFSET + PML4_SIZE;
pub const USER_GRANT_PML4: usize = (USER_GRANT_OFFSET & PML4_MASK) / PML4_SIZE;
/// Offset to user stack
pub const USER_STACK_OFFSET: usize = USER_GRANT_OFFSET + PML4_SIZE;
pub const USER32_STACK_OFFSET: usize = 0xB000_0000;
pub const USER_STACK_PML4: usize = (USER_STACK_OFFSET & PML4_MASK) / PML4_SIZE;
/// Size of user stack
pub const USER_STACK_SIZE: usize = 1024 * 1024; // 1 MB
/// Offset to user sigstack
pub const USER_SIGSTACK_OFFSET: usize = USER_STACK_OFFSET + PML4_SIZE;
pub const USER_SIGSTACK_PML4: usize = (USER_SIGSTACK_OFFSET & PML4_MASK) / PML4_SIZE;
/// Size of user sigstack
pub const USER_SIGSTACK_SIZE: usize = 256 * 1024; // 256 KB
/// Offset to user TLS
pub const USER_TLS_OFFSET: usize = USER_SIGSTACK_OFFSET + PML4_SIZE;
pub const USER_TLS_PML4: usize = (USER_TLS_OFFSET & PML4_MASK) / PML4_SIZE;
/// Offset to user temporary image (used when cloning)
pub const USER_TMP_OFFSET: usize = USER_TLS_OFFSET + PML4_SIZE;
pub const USER_TMP_PML4: usize = (USER_TMP_OFFSET & PML4_MASK) / PML4_SIZE;
/// Offset to user temporary heap (used when cloning)
pub const USER_TMP_HEAP_OFFSET: usize = USER_TMP_OFFSET + PML4_SIZE;
pub const USER_TMP_HEAP_PML4: usize = (USER_TMP_HEAP_OFFSET & PML4_MASK) / PML4_SIZE;
/// Offset to user temporary page for grants
pub const USER_TMP_GRANT_OFFSET: usize = USER_TMP_HEAP_OFFSET + PML4_SIZE;
pub const USER_TMP_GRANT_PML4: usize = (USER_TMP_GRANT_OFFSET & PML4_MASK) / PML4_SIZE;
/// Offset to user temporary stack (used when cloning)
pub const USER_TMP_STACK_OFFSET: usize = USER_TMP_GRANT_OFFSET + PML4_SIZE;
pub const USER_TMP_STACK_PML4: usize = (USER_TMP_STACK_OFFSET & PML4_MASK) / PML4_SIZE;
/// Offset to user temporary sigstack (used when cloning)
pub const USER_TMP_SIGSTACK_OFFSET: usize = USER_TMP_STACK_OFFSET + PML4_SIZE;
pub const USER_TMP_SIGSTACK_PML4: usize = (USER_TMP_SIGSTACK_OFFSET & PML4_MASK) / PML4_SIZE;
/// Offset to user temporary tls (used when cloning)
pub const USER_TMP_TLS_OFFSET: usize = USER_TMP_SIGSTACK_OFFSET + PML4_SIZE;
pub const USER_TMP_TLS_PML4: usize = (USER_TMP_TLS_OFFSET & PML4_MASK) / PML4_SIZE;
/// Offset for usage in other temporary pages
pub const USER_TMP_MISC_OFFSET: usize = USER_TMP_TLS_OFFSET + PML4_SIZE;
pub const USER_TMP_MISC_PML4: usize = (USER_TMP_MISC_OFFSET & PML4_MASK) / PML4_SIZE;
}

@ -52,12 +52,12 @@ pub fn shell() {
if let Ok(file) = root.borrow().lookup(name.as_str()) { if let Ok(file) = root.borrow().lookup(name.as_str()) {
use process::*; use process::*;
let len = file.borrow().read_at(0, &mut *buf).unwrap(); let len = file.borrow().read_at(0, &mut *buf).unwrap();
let mut new_context = Context::new_user(&buf[..len]); let context = ContextImpl::new_user(&buf[..len]);
//memory_set_record().push_back((new_context.get_memory_set_mut() as *mut MemorySet) as usize); memory_set_map_swappable(context.get_memory_set_mut());
let pid = processor().add(new_context); let pid = processor().manager().add(context);
// map swappable for the new user process's memroy areas (only for the page which has been allocated) //memory_set_map_swappable(processor().get_context_mut(pid).get_memory_set_mut());
memory_set_map_swappable(processor().get_context_mut(pid).get_memory_set_mut()); processor().manager().wait(thread::current().id(), pid);
processor().current_wait_for(pid); processor().yield_now();
} else { } else {
println!("Program not exist"); println!("Program not exist");
} }

@ -13,7 +13,8 @@ pub fn panic(info: &PanicInfo) -> ! {
let location = info.location().unwrap(); let location = info.location().unwrap();
let message = info.message().unwrap(); let message = info.message().unwrap();
error!("\n\nPANIC in {} at line {}\n {}", location.file(), location.line(), message); error!("\n\nPANIC in {} at line {}\n {}", location.file(), location.line(), message);
loop { } use arch::cpu::halt;
loop { halt() }
} }
#[lang = "oom"] #[lang = "oom"]

@ -9,6 +9,8 @@
#![feature(panic_info_message)] #![feature(panic_info_message)]
#![feature(global_asm)] #![feature(global_asm)]
#![feature(compiler_builtins_lib)] #![feature(compiler_builtins_lib)]
#![feature(raw)]
#![feature(vec_resize_default)]
#![no_std] #![no_std]
@ -34,6 +36,8 @@ extern crate volatile;
extern crate x86_64; extern crate x86_64;
extern crate xmas_elf; extern crate xmas_elf;
pub use process::{processor, new_kernel_context};
use ucore_process::thread;
use linked_list_allocator::LockedHeap; use linked_list_allocator::LockedHeap;
#[macro_use] // print! #[macro_use] // print!
@ -45,8 +49,6 @@ mod consts;
mod process; mod process;
mod syscall; mod syscall;
mod fs; mod fs;
use process::{thread, thread_};
mod sync; mod sync;
mod trap; mod trap;
mod console; mod console;
@ -61,22 +63,13 @@ pub mod arch;
pub mod arch; pub mod arch;
pub fn kmain() -> ! { pub fn kmain() -> ! {
// Init the first kernel process(idle proc) process::processor().run();
process::init();
// enable the interrupt
unsafe { arch::interrupt::enable(); }
// the test is not supported in riscv32(maybe)
//thread::test::local_key();
//thread::test::unpack();
//sync::test::philosopher_using_mutex();
//sync::test::philosopher_using_monitor();
//sync::mpsc::test::test_all();
// come into shell
fs::shell();
loop {} // thread::test::local_key();
// thread::test::unpack();
// sync::test::philosopher_using_mutex();
// sync::test::philosopher_using_monitor();
// sync::mpsc::test::test_all();
} }
/// Global heap allocator /// Global heap allocator

@ -7,10 +7,14 @@ use ucore_memory::{*, paging::PageTable};
use ucore_memory::cow::CowExt; use ucore_memory::cow::CowExt;
pub use ucore_memory::memory_set::{MemoryArea, MemoryAttr, MemorySet as MemorySet_, Stack, InactivePageTable}; pub use ucore_memory::memory_set::{MemoryArea, MemoryAttr, MemorySet as MemorySet_, Stack, InactivePageTable};
use ucore_memory::swap::*; use ucore_memory::swap::*;
<<<<<<< HEAD
use process::{processor, PROCESSOR}; use process::{processor, PROCESSOR};
use sync::{SpinNoIrqLock, SpinNoIrq, MutexGuard}; use sync::{SpinNoIrqLock, SpinNoIrq, MutexGuard};
use alloc::collections::VecDeque; use alloc::collections::VecDeque;
=======
use process::{processor, process};
>>>>>>> 87506b000dc9a8f08c0040fee9570f5913bdd5b8
pub type MemorySet = MemorySet_<InactivePageTable0>; pub type MemorySet = MemorySet_<InactivePageTable0>;
@ -84,6 +88,26 @@ pub fn alloc_stack() -> Stack {
Stack { top, bottom } Stack { top, bottom }
} }
pub struct KernelStack(usize);
const STACK_SIZE: usize = 0x8000;
impl KernelStack {
pub fn new() -> Self {
use alloc::alloc::{alloc, Layout};
let bottom = unsafe{ alloc(Layout::from_size_align(STACK_SIZE, STACK_SIZE).unwrap()) } as usize;
KernelStack(bottom)
}
pub fn top(&self) -> usize {
self.0 + STACK_SIZE
}
}
impl Drop for KernelStack {
fn drop(&mut self) {
use alloc::alloc::{dealloc, Layout};
unsafe{ dealloc(self.0 as _, Layout::from_size_align(STACK_SIZE, STACK_SIZE).unwrap()); }
}
}
/* /*
@ -97,6 +121,7 @@ pub fn alloc_stack() -> Stack {
pub fn page_fault_handler(addr: usize) -> bool { pub fn page_fault_handler(addr: usize) -> bool {
info!("start handling swap in/out page fault"); info!("start handling swap in/out page fault");
unsafe { ACTIVE_TABLE_SWAP.force_unlock(); } unsafe { ACTIVE_TABLE_SWAP.force_unlock(); }
<<<<<<< HEAD
unsafe {PROCESSOR.try().unwrap().force_unlock();} unsafe {PROCESSOR.try().unwrap().force_unlock();}
let mut temp_proc = processor(); let mut temp_proc = processor();
@ -133,6 +158,10 @@ pub fn page_fault_handler(addr: usize) -> bool {
// Handle copy on write (not being used now) // Handle copy on write (not being used now)
unsafe { ACTIVE_TABLE.force_unlock(); } unsafe { ACTIVE_TABLE.force_unlock(); }
if active_table().page_fault_handler(addr, || alloc_frame().unwrap()){ if active_table().page_fault_handler(addr, || alloc_frame().unwrap()){
=======
let pt = process().get_memory_set_mut().get_page_table_mut();
if active_table_swap().page_fault_handler(pt as *mut InactivePageTable0, addr, || alloc_frame().unwrap()){
>>>>>>> 87506b000dc9a8f08c0040fee9570f5913bdd5b8
return true; return true;
} }
false false

@ -1,55 +1,44 @@
use arch::interrupt::{TrapFrame, Context as ArchContext}; use arch::interrupt::{TrapFrame, Context as ArchContext};
use memory::{MemoryArea, MemoryAttr, MemorySet, active_table_swap, alloc_frame}; use memory::{MemoryArea, MemoryAttr, MemorySet, KernelStack, active_table_swap, alloc_frame};
use xmas_elf::{ElfFile, header, program::{Flags, ProgramHeader, Type}}; use xmas_elf::{ElfFile, header, program::{Flags, ProgramHeader, Type}};
use core::fmt::{Debug, Error, Formatter}; use core::fmt::{Debug, Error, Formatter};
use ucore_process::Context;
use alloc::boxed::Box;
use ucore_memory::{Page}; use ucore_memory::{Page};
use ::memory::{InactivePageTable0, memory_set_record}; use ::memory::{InactivePageTable0, memory_set_record};
use ucore_memory::memory_set::*; use ucore_memory::memory_set::*;
pub struct Context { pub struct ContextImpl {
arch: ArchContext, arch: ArchContext,
memory_set: MemorySet, memory_set: MemorySet,
kstack: KernelStack,
} }
impl ::ucore_process::processor::Context for Context { impl Context for ContextImpl {
/* unsafe fn switch_to(&mut self, target: &mut Context) {
* @param: use core::mem::transmute;
* target: the target process context let (target, _): (&mut ContextImpl, *const ()) = transmute(target);
* @brief:
* switch to the target process context
*/
unsafe fn switch(&mut self, target: &mut Self) {
super::PROCESSOR.try().unwrap().force_unlock();
self.arch.switch(&mut target.arch); self.arch.switch(&mut target.arch);
use core::mem::forget;
// don't run the distructor of processor()
forget(super::processor());
}
/*
* @param:
* entry: the program entry for the process
* arg: a0 (a parameter)
* @brief:
* new a kernel thread Context
* @retval:
* the new kernel thread Context
*/
fn new_kernel(entry: extern fn(usize) -> !, arg: usize) -> Self {
let ms = MemorySet::new();
Context {
arch: unsafe { ArchContext::new_kernel_thread(entry, arg, ms.kstack_top(), ms.token()) },
memory_set: ms,
}
} }
} }
impl Context { impl ContextImpl {
pub unsafe fn new_init() -> Self { pub unsafe fn new_init() -> Box<Context> {
Context { Box::new(ContextImpl {
arch: ArchContext::null(), arch: ArchContext::null(),
memory_set: MemorySet::new(), memory_set: MemorySet::new(),
kstack: KernelStack::new(),
})
} }
pub fn new_kernel(entry: extern fn(usize) -> !, arg: usize) -> Box<Context> {
let memory_set = MemorySet::new();
let kstack = KernelStack::new();
Box::new(ContextImpl {
arch: unsafe { ArchContext::new_kernel_thread(entry, arg, kstack.top(), memory_set.token()) },
memory_set,
kstack,
})
} }
/// Make a new user thread from ELF data /// Make a new user thread from ELF data
@ -61,8 +50,7 @@ impl Context {
* @retval: * @retval:
* the new user thread Context * the new user thread Context
*/ */
pub fn new_user(data: &[u8]) -> Self { pub fn new_user(data: &[u8]) -> Box<Context> {
debug!("come into new user");
// Parse elf // Parse elf
let elf = ElfFile::new(data).expect("failed to read elf"); let elf = ElfFile::new(data).expect("failed to read elf");
let is32 = match elf.header.pt2 { let is32 = match elf.header.pt2 {
@ -116,23 +104,29 @@ impl Context {
}); });
} }
let kstack = KernelStack::new();
// map the memory set swappable
//memory_set_map_swappable(&mut memory_set);
//set the user Memory pages in the memory set swappable //set the user Memory pages in the memory set swappable
//memory_set_map_swappable(&mut memory_set); //memory_set_map_swappable(&mut memory_set);
let id = memory_set_record().iter() let id = memory_set_record().iter()
.position(|x| x.clone() == mmset_ptr).unwrap(); .position(|x| x.clone() == mmset_ptr).unwrap();
memory_set_record().remove(id); memory_set_record().remove(id);
Context {
Box::new(ContextImpl {
arch: unsafe { arch: unsafe {
ArchContext::new_user_thread( ArchContext::new_user_thread(
entry_addr, user_stack_top - 8, memory_set.kstack_top(), is32, memory_set.token()) entry_addr, user_stack_top - 8, kstack.top(), is32, memory_set.token())
}, },
memory_set, memory_set,
} kstack,
})
} }
/// Fork /// Fork
pub fn fork(&self, tf: &TrapFrame) -> Self { pub fn fork(&self, tf: &TrapFrame) -> Box<Context> {
debug!("Come in to fork!");
// Clone memory set, make a new page table // Clone memory set, make a new page table
let mut memory_set = self.memory_set.clone(); let mut memory_set = self.memory_set.clone();
@ -156,14 +150,20 @@ impl Context {
}); });
} }
let kstack = KernelStack::new();
// map the memory set swappable
//memory_set_map_swappable(&mut memory_set);
// remove the raw pointer for the memory set since it will // remove the raw pointer for the memory set since it will
let id = memory_set_record().iter() let id = memory_set_record().iter()
.position(|x| x.clone() == mmset_ptr).unwrap(); .position(|x| x.clone() == mmset_ptr).unwrap();
memory_set_record().remove(id); memory_set_record().remove(id);
Context {
arch: unsafe { ArchContext::new_fork(tf, memory_set.kstack_top(), memory_set.token()) }, Box::new(ContextImpl {
arch: unsafe { ArchContext::new_fork(tf, kstack.top(), memory_set.token()) },
memory_set, memory_set,
} kstack,
})
} }
pub fn get_memory_set_mut(&mut self) -> &mut MemorySet { pub fn get_memory_set_mut(&mut self) -> &mut MemorySet {
@ -172,7 +172,7 @@ impl Context {
} }
impl Drop for Context{ impl Drop for ContextImpl{
fn drop(&mut self){ fn drop(&mut self){
// remove the new memory set to the recorder (deprecated in the latest version) // remove the new memory set to the recorder (deprecated in the latest version)
/* /*
@ -185,7 +185,7 @@ impl Drop for Context{
*/ */
//set the user Memory pages in the memory set unswappable //set the user Memory pages in the memory set unswappable
let Self {ref mut arch, ref mut memory_set} = self; let Self {ref mut arch, ref mut memory_set, ref mut kstack} = self;
let pt = { let pt = {
memory_set.get_page_table_mut() as *mut InactivePageTable0 memory_set.get_page_table_mut() as *mut InactivePageTable0
}; };
@ -202,7 +202,7 @@ impl Drop for Context{
} }
} }
impl Debug for Context { impl Debug for ContextImpl {
fn fmt(&self, f: &mut Formatter) -> Result<(), Error> { fn fmt(&self, f: &mut Formatter) -> Result<(), Error> {
write!(f, "{:x?}", self.arch) write!(f, "{:x?}", self.arch)
} }
@ -225,7 +225,7 @@ fn memory_set_from<'a>(elf: &'a ElfFile<'a>) -> MemorySet {
} }
let (virt_addr, mem_size, flags) = match ph { let (virt_addr, mem_size, flags) = match ph {
ProgramHeader::Ph32(ph) => (ph.virtual_addr as usize, ph.mem_size as usize, ph.flags), ProgramHeader::Ph32(ph) => (ph.virtual_addr as usize, ph.mem_size as usize, ph.flags),
ProgramHeader::Ph64(ph) => (ph.virtual_addr as usize, ph.mem_size as usize, ph.flags),//??? ProgramHeader::Ph64(ph) => (ph.virtual_addr as usize, ph.mem_size as usize, ph.flags),
}; };
set.push(MemoryArea::new(virt_addr, virt_addr + mem_size, memory_attr_from(flags), "")); set.push(MemoryArea::new(virt_addr, virt_addr + mem_size, memory_attr_from(flags), ""));
@ -256,5 +256,6 @@ pub fn memory_set_map_swappable(memory_set: &mut MemorySet){
unsafe { active_table_swap().set_swappable(pt, addr); } unsafe { active_table_swap().set_swappable(pt, addr); }
} }
} }
debug!("Finishing setting pages swappable"); info!("Finishing setting pages swappable");
} }

@ -1,57 +1,94 @@
use spin::Once; use spin::Mutex;
use sync::{SpinNoIrqLock, Mutex, MutexGuard, SpinNoIrq}; pub use self::context::ContextImpl;
pub use self::context::Context; pub use ucore_process::*;
pub use ucore_process::processor::{*, Context as _whatever}; use consts::{MAX_CPU_NUM, MAX_PROCESS_NUM};
pub use ucore_process::scheduler::*; use arch::cpu;
pub use ucore_process::thread::*; use alloc::{boxed::Box, sync::Arc, vec::Vec};
use sync::Condvar;
use core::sync::atomic::*;
pub mod context; pub mod context;
type Processor = Processor_<Context, StrideScheduler>;
/*
* @brief:
* initialize a new kernel process (idleproc)
*/
pub fn init() { pub fn init() {
PROCESSOR.call_once(||
SpinNoIrqLock::new({
let mut processor = Processor::new(
unsafe { Context::new_init() },
// NOTE: max_time_slice <= 5 to ensure 'priority' test pass // NOTE: max_time_slice <= 5 to ensure 'priority' test pass
StrideScheduler::new(5), let scheduler = Box::new(scheduler::RRScheduler::new(5));
); let manager = Arc::new(ProcessManager::new(scheduler, MAX_PROCESS_NUM));
extern fn idle(arg: usize) -> ! {
loop {} extern fn idle(_arg: usize) -> ! {
} loop { cpu::halt(); }
processor.add(Context::new_kernel(idle, 0)); }
processor for i in 0..4 {
}) manager.add(ContextImpl::new_kernel(idle, i));
); }
unsafe {
for cpu_id in 0..MAX_CPU_NUM {
PROCESSORS[cpu_id].init(cpu_id, ContextImpl::new_init(), manager.clone());
}
}
info!("process init end"); info!("process init end");
} }
pub static PROCESSOR: Once<SpinNoIrqLock<Processor>> = Once::new(); static PROCESSORS: [Processor; MAX_CPU_NUM] = [Processor::new(), Processor::new(), Processor::new(), Processor::new(), Processor::new(), Processor::new(), Processor::new(), Processor::new()];
pub fn processor() -> MutexGuard<'static, Processor, SpinNoIrq> { /// Ugly solution for sys_wait(0) (wait for any child)
PROCESSOR.try().unwrap().lock() #[derive(Default)]
pub struct Process {
parent: AtomicUsize,
children: Mutex<Vec<usize>>,
subproc_exit: Condvar, // Trigger parent's when exit
} }
#[allow(non_camel_case_types)] impl Process {
pub type thread = ThreadMod<ThreadSupportImpl>; pub fn new_fork(pid: usize, parent: usize) {
PROCESS[pid].parent.store(parent, Ordering::Relaxed);
PROCESS[pid].subproc_exit._clear();
PROCESS[parent].children.lock().push(pid);
}
pub fn proc_exit(pid: usize) {
let parent = PROCESS[pid].parent.load(Ordering::Relaxed);
PROCESS[parent].subproc_exit.notify_all();
}
pub fn wait_child() {
Self::current().subproc_exit._wait();
}
pub fn get_children() -> Vec<usize> {
Self::current().children.lock().clone()
}
pub fn do_wait(pid: usize) {
Self::current().children.lock().retain(|&p| p != pid);
}
fn current() -> &'static Self {
&PROCESS[thread::current().id()]
}
}
pub mod thread_ { lazy_static! {
pub type Thread = super::Thread<super::ThreadSupportImpl>; pub static ref PROCESS: Vec<Process> = {
let mut vec = Vec::new();
vec.resize_default(MAX_PROCESS_NUM);
vec
};
} }
pub struct ThreadSupportImpl; /// Get current thread struct
pub fn process() -> &'static mut ContextImpl {
use core::mem::transmute;
let (process, _): (&mut ContextImpl, *const ()) = unsafe {
transmute(processor().context())
};
process
}
impl ThreadSupport for ThreadSupportImpl {
type Context = Context;
type Scheduler = StrideScheduler;
type ProcessorGuard = MutexGuard<'static, Processor, SpinNoIrq>;
fn processor() -> Self::ProcessorGuard { // Implement dependencies for std::thread
processor()
} #[no_mangle]
pub fn processor() -> &'static Processor {
&PROCESSORS[cpu::id()]
}
#[no_mangle]
pub fn new_kernel_context(entry: extern fn(usize) -> !, arg: usize) -> Box<Context> {
ContextImpl::new_kernel(entry, arg)
} }

@ -0,0 +1,4 @@
pub struct cpu {
pub id: usize
}

@ -1,11 +1,10 @@
use alloc::collections::VecDeque; use alloc::collections::VecDeque;
use super::*; use super::*;
use thread; use thread;
use thread_;
#[derive(Default)] #[derive(Default)]
pub struct Condvar { pub struct Condvar {
wait_queue: SpinNoIrqLock<VecDeque<thread_::Thread>>, wait_queue: SpinNoIrqLock<VecDeque<thread::Thread>>,
} }
impl Condvar { impl Condvar {
@ -34,4 +33,7 @@ impl Condvar {
t.unpark(); t.unpark();
} }
} }
pub fn _clear(&self) {
self.wait_queue.lock().clear();
}
} }

@ -60,27 +60,47 @@ fn sys_close(fd: usize) -> i32 {
/// Fork the current process. Return the child's PID. /// Fork the current process. Return the child's PID.
fn sys_fork(tf: &TrapFrame) -> i32 { fn sys_fork(tf: &TrapFrame) -> i32 {
let mut processor = processor(); let mut context = process().fork(tf);
let context = processor.current_context().fork(tf); memory_set_map_swappable(context.get_memory_set_mut());
let pid = processor.add(context); let pid = processor().manager().add(context);
// map swappable for the forked process's memroy areas (only for the page which has been allocated) Process::new_fork(pid, thread::current().id());
memory_set_map_swappable(processor.get_context_mut(pid).get_memory_set_mut()); //memory_set_map_swappable(processor.get_context_mut(pid).get_memory_set_mut());
info!("fork: {} -> {}", processor.current_pid(), pid); info!("fork: {} -> {}", thread::current().id(), pid);
pid as i32 pid as i32
} }
/// Wait the process exit. /// Wait the process exit.
/// Return the PID. Store exit code to `code` if it's not null. /// Return the PID. Store exit code to `code` if it's not null.
fn sys_wait(pid: usize, code: *mut i32) -> i32 { fn sys_wait(pid: usize, code: *mut i32) -> i32 {
let mut processor = processor(); loop {
match processor.current_wait_for(pid) { let wait_procs = match pid {
WaitResult::Ok(pid, error_code) => { 0 => Process::get_children(),
_ => vec![pid],
};
if wait_procs.is_empty() {
return -1;
}
for pid in wait_procs {
match processor().manager().get_status(pid) {
Some(Status::Exited(exit_code)) => {
if !code.is_null() { if !code.is_null() {
unsafe { *code = error_code as i32 }; unsafe { code.write(exit_code as i32); }
}
processor().manager().remove(pid);
Process::do_wait(pid);
info!("wait: {} -> {}", thread::current().id(), pid);
return 0;
}
None => return -1,
_ => {}
} }
0
} }
WaitResult::NotExist => -1, if pid == 0 {
Process::wait_child();
} else {
processor().manager().wait(thread::current().id(), pid);
processor().yield_now();
}
} }
} }
@ -91,7 +111,11 @@ fn sys_yield() -> i32 {
/// Kill the process /// Kill the process
fn sys_kill(pid: usize) -> i32 { fn sys_kill(pid: usize) -> i32 {
processor().kill(pid); processor().manager().exit(pid, 0x100);
Process::proc_exit(pid);
if pid == thread::current().id() {
processor().yield_now();
}
0 0
} }
@ -101,11 +125,12 @@ fn sys_getpid() -> i32 {
} }
/// Exit the current process /// Exit the current process
fn sys_exit(error_code: usize) -> i32 { fn sys_exit(exit_code: usize) -> i32 {
let mut processor = processor(); let pid = thread::current().id();
let pid = processor.current_pid(); processor().manager().exit(pid, exit_code);
processor.exit(pid, error_code); Process::proc_exit(pid);
0 processor().yield_now();
unreachable!();
} }
fn sys_sleep(time: usize) -> i32 { fn sys_sleep(time: usize) -> i32 {
@ -115,13 +140,12 @@ fn sys_sleep(time: usize) -> i32 {
} }
fn sys_get_time() -> i32 { fn sys_get_time() -> i32 {
let processor = processor(); unsafe { ::trap::TICK as i32 }
processor.get_time() as i32
} }
fn sys_lab6_set_priority(priority: usize) -> i32 { fn sys_lab6_set_priority(priority: usize) -> i32 {
let mut processor = processor(); let pid = thread::current().id();
processor.set_priority(priority as u8); processor().manager().set_priority(pid, priority as u8);
0 0
} }

@ -1,40 +1,23 @@
use process::*; use process::*;
use arch::interrupt::TrapFrame; use arch::interrupt::TrapFrame;
use arch::cpu;
/* pub static mut TICK: usize = 0;
* @brief:
* process timer interrupt
*/
pub fn timer() {
let mut processor = processor();
processor.tick();
}
pub fn before_return() { pub fn timer() {
if let Some(processor) = PROCESSOR.try() { processor().tick();
// try lock for delayed frame allocated to avoid deadlock if cpu::id() == 0 {
if processor.try_lock().is_some() { unsafe { TICK += 1; }
processor.lock().schedule();
}
} }
//info!("finish before return!"); //info!("finish before return!");
} }
/*
* @param:
* TrapFrame: the error's trapframe
* @brief:
* process the error trap, if processor inited then exit else panic!
*/
pub fn error(tf: &TrapFrame) -> ! { pub fn error(tf: &TrapFrame) -> ! {
if let Some(processor) = PROCESSOR.try() { error!("{:#x?}", tf);
let mut processor = processor.lock(); let pid = processor().pid();
let pid = processor.current_pid(); error!("On CPU{} Process {}", cpu::id(), pid);
error!("Process {} error:\n{:#x?}", pid, tf);
processor.exit(pid, 0x100); // TODO: Exit code for error processor().manager().exit(pid, 0x100);
processor.schedule(); processor().yield_now();
unreachable!(); unreachable!();
} else {
panic!("Exception when processor not inited\n{:#x?}", tf);
}
} }
Loading…
Cancel
Save