parent
66b18772c6
commit
2002ddd5fa
@ -1,18 +1,25 @@
|
|||||||
#![no_std]
|
#![no_std]
|
||||||
#![feature(alloc)]
|
#![feature(alloc)]
|
||||||
#![feature(const_fn)]
|
#![feature(const_fn)]
|
||||||
|
#![feature(linkage)]
|
||||||
|
#![feature(nll)]
|
||||||
|
#![feature(vec_resize_default)]
|
||||||
|
|
||||||
extern crate alloc;
|
extern crate alloc;
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
extern crate log;
|
extern crate log;
|
||||||
|
extern crate spin;
|
||||||
|
|
||||||
// To use `println!` in test
|
// To use `println!` in test
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
extern crate std;
|
extern crate std;
|
||||||
|
|
||||||
pub mod processor;
|
mod process_manager;
|
||||||
|
mod processor;
|
||||||
pub mod scheduler;
|
pub mod scheduler;
|
||||||
pub mod thread;
|
pub mod thread;
|
||||||
mod util;
|
|
||||||
mod event_hub;
|
mod event_hub;
|
||||||
|
|
||||||
|
pub use process_manager::*;
|
||||||
|
pub use processor::Processor;
|
||||||
|
@ -0,0 +1,196 @@
|
|||||||
|
use alloc::boxed::Box;
|
||||||
|
use alloc::sync::Arc;
|
||||||
|
use spin::Mutex;
|
||||||
|
use scheduler::Scheduler;
|
||||||
|
use core::cell::UnsafeCell;
|
||||||
|
use alloc::vec::Vec;
|
||||||
|
use event_hub::EventHub;
|
||||||
|
|
||||||
|
struct Process {
|
||||||
|
id: Pid,
|
||||||
|
status: Status,
|
||||||
|
status_after_stop: Status,
|
||||||
|
context: Option<Box<Context>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub type Pid = usize;
|
||||||
|
type ExitCode = usize;
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||||
|
pub enum Status {
|
||||||
|
Ready,
|
||||||
|
Running(usize),
|
||||||
|
Sleeping,
|
||||||
|
Waiting(Pid),
|
||||||
|
/// aka ZOMBIE. Its context was dropped.
|
||||||
|
Exited(ExitCode),
|
||||||
|
}
|
||||||
|
|
||||||
|
enum Event {
|
||||||
|
Wakeup(Pid),
|
||||||
|
}
|
||||||
|
|
||||||
|
pub trait Context {
|
||||||
|
unsafe fn switch_to(&mut self, target: &mut Context);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct ProcessManager {
|
||||||
|
procs: Vec<Mutex<Option<Process>>>,
|
||||||
|
scheduler: Mutex<Box<Scheduler>>,
|
||||||
|
wait_queue: Vec<Mutex<Vec<Pid>>>,
|
||||||
|
event_hub: Mutex<EventHub<Event>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ProcessManager {
|
||||||
|
pub fn new(scheduler: Box<Scheduler>, max_proc_num: usize) -> Self {
|
||||||
|
ProcessManager {
|
||||||
|
procs: {
|
||||||
|
let mut vec = Vec::new();
|
||||||
|
vec.resize_default(max_proc_num);
|
||||||
|
vec
|
||||||
|
},
|
||||||
|
scheduler: Mutex::new(scheduler),
|
||||||
|
wait_queue: {
|
||||||
|
let mut vec = Vec::new();
|
||||||
|
vec.resize_default(max_proc_num);
|
||||||
|
vec
|
||||||
|
},
|
||||||
|
event_hub: Mutex::new(EventHub::new()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn alloc_pid(&self) -> Pid {
|
||||||
|
for (i, proc) in self.procs.iter().enumerate() {
|
||||||
|
if proc.lock().is_none() {
|
||||||
|
return i;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
panic!("Process number exceeded");
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Add a new process
|
||||||
|
pub fn add(&self, context: Box<Context>) -> Pid {
|
||||||
|
let pid = self.alloc_pid();
|
||||||
|
*(&self.procs[pid]).lock() = Some(Process {
|
||||||
|
id: pid,
|
||||||
|
status: Status::Ready,
|
||||||
|
status_after_stop: Status::Ready,
|
||||||
|
context: Some(context),
|
||||||
|
});
|
||||||
|
self.scheduler.lock().insert(pid);
|
||||||
|
pid
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Make process `pid` time slice -= 1.
|
||||||
|
/// Return true if time slice == 0.
|
||||||
|
/// Called by timer interrupt handler.
|
||||||
|
pub fn tick(&self, pid: Pid) -> bool {
|
||||||
|
let mut event_hub = self.event_hub.lock();
|
||||||
|
event_hub.tick();
|
||||||
|
while let Some(event) = event_hub.pop() {
|
||||||
|
match event {
|
||||||
|
Event::Wakeup(pid) => self.set_status(pid, Status::Ready),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
self.scheduler.lock().tick(pid)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Set the priority of process `pid`
|
||||||
|
pub fn set_priority(&self, pid: Pid, priority: u8) {
|
||||||
|
self.scheduler.lock().set_priority(pid, priority);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Called by Processor to get a process to run.
|
||||||
|
/// The manager first mark it `Running`,
|
||||||
|
/// then take out and return its Context.
|
||||||
|
pub fn run(&self, cpu_id: usize) -> (Pid, Box<Context>) {
|
||||||
|
let mut scheduler = self.scheduler.lock();
|
||||||
|
let pid = scheduler.select()
|
||||||
|
.expect("failed to select a runnable process");
|
||||||
|
scheduler.remove(pid);
|
||||||
|
let mut proc_lock = self.procs[pid].lock();
|
||||||
|
let mut proc = proc_lock.as_mut().unwrap();
|
||||||
|
proc.status = Status::Running(cpu_id);
|
||||||
|
(pid, proc.context.take().unwrap())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Called by Processor to finish running a process
|
||||||
|
/// and give its context back.
|
||||||
|
pub fn stop(&self, pid: Pid, context: Box<Context>) {
|
||||||
|
let mut proc_lock = self.procs[pid].lock();
|
||||||
|
let mut proc = proc_lock.as_mut().unwrap();
|
||||||
|
proc.status = proc.status_after_stop.clone();
|
||||||
|
proc.status_after_stop = Status::Ready;
|
||||||
|
proc.context = Some(context);
|
||||||
|
match proc.status {
|
||||||
|
Status::Ready => self.scheduler.lock().insert(pid),
|
||||||
|
Status::Exited(_) => proc.context = None,
|
||||||
|
_ => {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Switch the status of a process.
|
||||||
|
/// Insert/Remove it to/from scheduler if necessary.
|
||||||
|
fn set_status(&self, pid: Pid, status: Status) {
|
||||||
|
let mut scheduler = self.scheduler.lock();
|
||||||
|
let mut proc_lock = self.procs[pid].lock();
|
||||||
|
let mut proc = proc_lock.as_mut().unwrap();
|
||||||
|
trace!("process {} {:?} -> {:?}", pid, proc.status, status);
|
||||||
|
match (&proc.status, &status) {
|
||||||
|
(Status::Ready, Status::Ready) => return,
|
||||||
|
(Status::Ready, _) => scheduler.remove(pid),
|
||||||
|
(Status::Running(_), _) => {},
|
||||||
|
(Status::Exited(_), _) => panic!("can not set status for a exited process"),
|
||||||
|
(Status::Waiting(target), Status::Exited(_)) =>
|
||||||
|
self.wait_queue[*target].lock().retain(|&i| i != pid),
|
||||||
|
// TODO: Sleep -> Exited Remove wakeup event.
|
||||||
|
(_, Status::Ready) => scheduler.insert(pid),
|
||||||
|
_ => {}
|
||||||
|
}
|
||||||
|
match proc.status {
|
||||||
|
Status::Running(_) => proc.status_after_stop = status,
|
||||||
|
_ => proc.status = status,
|
||||||
|
}
|
||||||
|
match proc.status {
|
||||||
|
Status::Exited(_) => proc.context = None,
|
||||||
|
_ => {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
pub fn get_status(&self, pid: Pid) -> Option<Status> {
|
||||||
|
self.procs[pid].lock().as_ref().map(|p| p.status.clone())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn remove(&self, pid: Pid) {
|
||||||
|
let mut proc_lock = self.procs[pid].lock();
|
||||||
|
let proc = proc_lock.as_ref().unwrap();
|
||||||
|
match proc.status {
|
||||||
|
Status::Exited(_) => *proc_lock = None,
|
||||||
|
_ => panic!("can not remove non-exited process"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn sleep(&self, pid: Pid, time: usize) {
|
||||||
|
self.set_status(pid, Status::Sleeping);
|
||||||
|
if time != 0 {
|
||||||
|
self.event_hub.lock().push(time, Event::Wakeup(pid));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn wakeup(&self, pid: Pid) {
|
||||||
|
self.set_status(pid, Status::Ready);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn wait(&self, pid: Pid, target: Pid) {
|
||||||
|
self.set_status(pid, Status::Waiting(target));
|
||||||
|
self.wait_queue[target].lock().push(pid);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn exit(&self, pid: Pid, code: ExitCode) {
|
||||||
|
self.set_status(pid, Status::Exited(code));
|
||||||
|
for waiter in self.wait_queue[pid].lock().drain(..) {
|
||||||
|
self.wakeup(waiter);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -1 +1 @@
|
|||||||
Subproject commit a37a65fa13a00c5aa0068c3f2b5d55af6a37dd93
|
Subproject commit f358204af01f2374ab6ed6ea059f724cd5f2fe6f
|
@ -1,14 +1,19 @@
|
|||||||
.section .text.entry
|
.section .text.entry
|
||||||
.globl _start
|
.globl _start
|
||||||
_start:
|
_start:
|
||||||
lui sp, %hi(bootstacktop)
|
add t0, a0, 1
|
||||||
addi sp, sp, %lo(bootstacktop)
|
slli t0, t0, 16
|
||||||
|
|
||||||
|
lui sp, %hi(bootstack)
|
||||||
|
addi sp, sp, %lo(bootstack)
|
||||||
|
add sp, sp, t0
|
||||||
|
|
||||||
call rust_main
|
call rust_main
|
||||||
|
|
||||||
.section .bss
|
.section .bss
|
||||||
.align 12 #PGSHIFT
|
.align 12 #PGSHIFT
|
||||||
.global bootstack
|
.global bootstack
|
||||||
bootstack:
|
bootstack:
|
||||||
.space 4096 * 16 #KSTACKSIZE
|
.space 4096 * 16 * 8
|
||||||
.global bootstacktop
|
.global bootstacktop
|
||||||
bootstacktop:
|
bootstacktop:
|
||||||
|
@ -0,0 +1,14 @@
|
|||||||
|
// Physical address available on THINPAD:
|
||||||
|
// [0x80000000, 0x80800000]
|
||||||
|
const P2_SIZE: usize = 1 << 22;
|
||||||
|
const P2_MASK: usize = 0x3ff << 22;
|
||||||
|
pub const RECURSIVE_INDEX: usize = 0x3fe;
|
||||||
|
pub const KERNEL_OFFSET: usize = 0;
|
||||||
|
pub const KERNEL_P2_INDEX: usize = 0x8000_0000 >> 22;
|
||||||
|
pub const KERNEL_HEAP_OFFSET: usize = 0x8020_0000;
|
||||||
|
pub const KERNEL_HEAP_SIZE: usize = 0x0020_0000;
|
||||||
|
pub const MEMORY_OFFSET: usize = 0x8000_0000;
|
||||||
|
pub const MEMORY_END: usize = 0x8080_0000;
|
||||||
|
pub const USER_STACK_OFFSET: usize = 0x70000000;
|
||||||
|
pub const USER_STACK_SIZE: usize = 0x10000;
|
||||||
|
pub const USER32_STACK_OFFSET: usize = USER_STACK_OFFSET;
|
@ -0,0 +1,36 @@
|
|||||||
|
use consts::MAX_CPU_NUM;
|
||||||
|
use core::ptr::{read_volatile, write_volatile};
|
||||||
|
use memory::*;
|
||||||
|
|
||||||
|
static mut STARTED: [bool; MAX_CPU_NUM] = [false; MAX_CPU_NUM];
|
||||||
|
|
||||||
|
pub unsafe fn set_cpu_id(cpu_id: usize) {
|
||||||
|
asm!("mv tp, $0" : : "r"(cpu_id));
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn id() -> usize {
|
||||||
|
let cpu_id;
|
||||||
|
unsafe { asm!("mv $0, tp" : "=r"(cpu_id)); }
|
||||||
|
cpu_id
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn send_ipi(cpu_id: usize) {
|
||||||
|
super::bbl::sbi::send_ipi(1 << cpu_id);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub unsafe fn has_started(cpu_id: usize) -> bool {
|
||||||
|
read_volatile(&STARTED[cpu_id])
|
||||||
|
}
|
||||||
|
|
||||||
|
pub unsafe fn start_others(hart_mask: usize) {
|
||||||
|
for cpu_id in 0..MAX_CPU_NUM {
|
||||||
|
if (hart_mask >> cpu_id) & 1 != 0 {
|
||||||
|
write_volatile(&mut STARTED[cpu_id], true);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn halt() {
|
||||||
|
use super::riscv::asm::wfi;
|
||||||
|
unsafe { wfi() }
|
||||||
|
}
|
@ -0,0 +1,97 @@
|
|||||||
|
// Copy from Redox consts.rs:
|
||||||
|
|
||||||
|
// Because the memory map is so important to not be aliased, it is defined here, in one place
|
||||||
|
// The lower 256 PML4 entries are reserved for userspace
|
||||||
|
// Each PML4 entry references up to 512 GB of memory
|
||||||
|
// The top (511) PML4 is reserved for recursive mapping
|
||||||
|
// The second from the top (510) PML4 is reserved for the kernel
|
||||||
|
/// The size of a single PML4
|
||||||
|
pub const PML4_SIZE: usize = 0x0000_0080_0000_0000;
|
||||||
|
pub const PML4_MASK: usize = 0x0000_ff80_0000_0000;
|
||||||
|
|
||||||
|
/// Offset of recursive paging
|
||||||
|
pub const RECURSIVE_PAGE_OFFSET: usize = (-(PML4_SIZE as isize)) as usize;
|
||||||
|
pub const RECURSIVE_PAGE_PML4: usize = (RECURSIVE_PAGE_OFFSET & PML4_MASK) / PML4_SIZE;
|
||||||
|
|
||||||
|
/// Offset of kernel
|
||||||
|
pub const KERNEL_OFFSET: usize = RECURSIVE_PAGE_OFFSET - PML4_SIZE;
|
||||||
|
pub const KERNEL_PML4: usize = (KERNEL_OFFSET & PML4_MASK) / PML4_SIZE;
|
||||||
|
|
||||||
|
pub const KERNEL_SIZE: usize = PML4_SIZE;
|
||||||
|
|
||||||
|
/// Offset to kernel heap
|
||||||
|
pub const KERNEL_HEAP_OFFSET: usize = KERNEL_OFFSET - PML4_SIZE;
|
||||||
|
pub const KERNEL_HEAP_PML4: usize = (KERNEL_HEAP_OFFSET & PML4_MASK) / PML4_SIZE;
|
||||||
|
/// Size of kernel heap
|
||||||
|
pub const KERNEL_HEAP_SIZE: usize = 8 * 1024 * 1024; // 8 MB
|
||||||
|
|
||||||
|
pub const MEMORY_OFFSET: usize = 0;
|
||||||
|
|
||||||
|
/// Offset to kernel percpu variables
|
||||||
|
//TODO: Use 64-bit fs offset to enable this pub const KERNEL_PERCPU_OFFSET: usize = KERNEL_HEAP_OFFSET - PML4_SIZE;
|
||||||
|
pub const KERNEL_PERCPU_OFFSET: usize = 0xC000_0000;
|
||||||
|
/// Size of kernel percpu variables
|
||||||
|
pub const KERNEL_PERCPU_SIZE: usize = 64 * 1024; // 64 KB
|
||||||
|
|
||||||
|
/// Offset to user image
|
||||||
|
pub const USER_OFFSET: usize = 0;
|
||||||
|
pub const USER_PML4: usize = (USER_OFFSET & PML4_MASK) / PML4_SIZE;
|
||||||
|
|
||||||
|
/// Offset to user TCB
|
||||||
|
pub const USER_TCB_OFFSET: usize = 0xB000_0000;
|
||||||
|
|
||||||
|
/// Offset to user arguments
|
||||||
|
pub const USER_ARG_OFFSET: usize = USER_OFFSET + PML4_SIZE / 2;
|
||||||
|
|
||||||
|
/// Offset to user heap
|
||||||
|
pub const USER_HEAP_OFFSET: usize = USER_OFFSET + PML4_SIZE;
|
||||||
|
pub const USER_HEAP_PML4: usize = (USER_HEAP_OFFSET & PML4_MASK) / PML4_SIZE;
|
||||||
|
|
||||||
|
/// Offset to user grants
|
||||||
|
pub const USER_GRANT_OFFSET: usize = USER_HEAP_OFFSET + PML4_SIZE;
|
||||||
|
pub const USER_GRANT_PML4: usize = (USER_GRANT_OFFSET & PML4_MASK) / PML4_SIZE;
|
||||||
|
|
||||||
|
/// Offset to user stack
|
||||||
|
pub const USER_STACK_OFFSET: usize = USER_GRANT_OFFSET + PML4_SIZE;
|
||||||
|
pub const USER32_STACK_OFFSET: usize = 0xB000_0000;
|
||||||
|
pub const USER_STACK_PML4: usize = (USER_STACK_OFFSET & PML4_MASK) / PML4_SIZE;
|
||||||
|
/// Size of user stack
|
||||||
|
pub const USER_STACK_SIZE: usize = 1024 * 1024; // 1 MB
|
||||||
|
|
||||||
|
/// Offset to user sigstack
|
||||||
|
pub const USER_SIGSTACK_OFFSET: usize = USER_STACK_OFFSET + PML4_SIZE;
|
||||||
|
pub const USER_SIGSTACK_PML4: usize = (USER_SIGSTACK_OFFSET & PML4_MASK) / PML4_SIZE;
|
||||||
|
/// Size of user sigstack
|
||||||
|
pub const USER_SIGSTACK_SIZE: usize = 256 * 1024; // 256 KB
|
||||||
|
|
||||||
|
/// Offset to user TLS
|
||||||
|
pub const USER_TLS_OFFSET: usize = USER_SIGSTACK_OFFSET + PML4_SIZE;
|
||||||
|
pub const USER_TLS_PML4: usize = (USER_TLS_OFFSET & PML4_MASK) / PML4_SIZE;
|
||||||
|
|
||||||
|
/// Offset to user temporary image (used when cloning)
|
||||||
|
pub const USER_TMP_OFFSET: usize = USER_TLS_OFFSET + PML4_SIZE;
|
||||||
|
pub const USER_TMP_PML4: usize = (USER_TMP_OFFSET & PML4_MASK) / PML4_SIZE;
|
||||||
|
|
||||||
|
/// Offset to user temporary heap (used when cloning)
|
||||||
|
pub const USER_TMP_HEAP_OFFSET: usize = USER_TMP_OFFSET + PML4_SIZE;
|
||||||
|
pub const USER_TMP_HEAP_PML4: usize = (USER_TMP_HEAP_OFFSET & PML4_MASK) / PML4_SIZE;
|
||||||
|
|
||||||
|
/// Offset to user temporary page for grants
|
||||||
|
pub const USER_TMP_GRANT_OFFSET: usize = USER_TMP_HEAP_OFFSET + PML4_SIZE;
|
||||||
|
pub const USER_TMP_GRANT_PML4: usize = (USER_TMP_GRANT_OFFSET & PML4_MASK) / PML4_SIZE;
|
||||||
|
|
||||||
|
/// Offset to user temporary stack (used when cloning)
|
||||||
|
pub const USER_TMP_STACK_OFFSET: usize = USER_TMP_GRANT_OFFSET + PML4_SIZE;
|
||||||
|
pub const USER_TMP_STACK_PML4: usize = (USER_TMP_STACK_OFFSET & PML4_MASK) / PML4_SIZE;
|
||||||
|
|
||||||
|
/// Offset to user temporary sigstack (used when cloning)
|
||||||
|
pub const USER_TMP_SIGSTACK_OFFSET: usize = USER_TMP_STACK_OFFSET + PML4_SIZE;
|
||||||
|
pub const USER_TMP_SIGSTACK_PML4: usize = (USER_TMP_SIGSTACK_OFFSET & PML4_MASK) / PML4_SIZE;
|
||||||
|
|
||||||
|
/// Offset to user temporary tls (used when cloning)
|
||||||
|
pub const USER_TMP_TLS_OFFSET: usize = USER_TMP_SIGSTACK_OFFSET + PML4_SIZE;
|
||||||
|
pub const USER_TMP_TLS_PML4: usize = (USER_TMP_TLS_OFFSET & PML4_MASK) / PML4_SIZE;
|
||||||
|
|
||||||
|
/// Offset for usage in other temporary pages
|
||||||
|
pub const USER_TMP_MISC_OFFSET: usize = USER_TMP_TLS_OFFSET + PML4_SIZE;
|
||||||
|
pub const USER_TMP_MISC_PML4: usize = (USER_TMP_MISC_OFFSET & PML4_MASK) / PML4_SIZE;
|
@ -1,133 +1,6 @@
|
|||||||
#![allow(dead_code)]
|
#![allow(dead_code)]
|
||||||
|
|
||||||
#[cfg(target_arch = "riscv32")]
|
pub use arch::consts::*;
|
||||||
pub use self::riscv::*;
|
|
||||||
#[cfg(target_arch = "x86_64")]
|
|
||||||
pub use self::x86_64::*;
|
|
||||||
|
|
||||||
pub const MAX_CPU_NUM: usize = 8;
|
pub const MAX_CPU_NUM: usize = 8;
|
||||||
pub const MAX_PROCESS_NUM: usize = 48;
|
pub const MAX_PROCESS_NUM: usize = 48;
|
||||||
|
|
||||||
// Memory address for riscv32
|
|
||||||
#[cfg(target_arch = "riscv32")]
|
|
||||||
mod riscv {
|
|
||||||
// Physical address available on THINPAD:
|
|
||||||
// [0x80000000, 0x80800000]
|
|
||||||
const P2_SIZE: usize = 1 << 22;
|
|
||||||
const P2_MASK: usize = 0x3ff << 22;
|
|
||||||
// RECURSIVE_PAGE_PML4 indicate the index of the self-maping entry in root pagetable
|
|
||||||
pub const RECURSIVE_PAGE_PML4: usize = 0x3fe;
|
|
||||||
// KERNEL_OFFSET indicate (virtual kernel address - physical kernel address) ???
|
|
||||||
pub const KERNEL_OFFSET: usize = 0;
|
|
||||||
// KERNEL_PML4 indicate the index of the kernel entry in root pagetable
|
|
||||||
pub const KERNEL_PML4: usize = 0x8000_0000 >> 22;
|
|
||||||
pub const KERNEL_HEAP_OFFSET: usize = 0x8020_0000;
|
|
||||||
pub const KERNEL_HEAP_SIZE: usize = 0x0020_0000;
|
|
||||||
pub const MEMORY_OFFSET: usize = 0x8000_0000;
|
|
||||||
pub const MEMORY_END: usize = 0x8080_0000;
|
|
||||||
pub const USER_STACK_OFFSET: usize = 0x70000000;
|
|
||||||
pub const USER_STACK_SIZE: usize = 0x10000;
|
|
||||||
pub const USER32_STACK_OFFSET: usize = USER_STACK_OFFSET;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Memory address for x86_64
|
|
||||||
#[cfg(target_arch = "x86_64")]
|
|
||||||
mod x86_64 {
|
|
||||||
// Copy from Redox consts.rs:
|
|
||||||
|
|
||||||
// Because the memory map is so important to not be aliased, it is defined here, in one place
|
|
||||||
// The lower 256 PML4 entries are reserved for userspace
|
|
||||||
// Each PML4 entry references up to 512 GB of memory
|
|
||||||
// The top (511) PML4 is reserved for recursive mapping
|
|
||||||
// The second from the top (510) PML4 is reserved for the kernel
|
|
||||||
/// The size of a single PML4
|
|
||||||
pub const PML4_SIZE: usize = 0x0000_0080_0000_0000;
|
|
||||||
pub const PML4_MASK: usize = 0x0000_ff80_0000_0000;
|
|
||||||
|
|
||||||
/// Offset of recursive paging
|
|
||||||
pub const RECURSIVE_PAGE_OFFSET: usize = (-(PML4_SIZE as isize)) as usize;
|
|
||||||
pub const RECURSIVE_PAGE_PML4: usize = (RECURSIVE_PAGE_OFFSET & PML4_MASK) / PML4_SIZE;
|
|
||||||
|
|
||||||
/// Offset of kernel
|
|
||||||
pub const KERNEL_OFFSET: usize = RECURSIVE_PAGE_OFFSET - PML4_SIZE;
|
|
||||||
pub const KERNEL_PML4: usize = (KERNEL_OFFSET & PML4_MASK) / PML4_SIZE;
|
|
||||||
|
|
||||||
pub const KERNEL_SIZE: usize = PML4_SIZE;
|
|
||||||
|
|
||||||
/// Offset to kernel heap
|
|
||||||
pub const KERNEL_HEAP_OFFSET: usize = KERNEL_OFFSET - PML4_SIZE;
|
|
||||||
pub const KERNEL_HEAP_PML4: usize = (KERNEL_HEAP_OFFSET & PML4_MASK) / PML4_SIZE;
|
|
||||||
/// Size of kernel heap
|
|
||||||
pub const KERNEL_HEAP_SIZE: usize = 8 * 1024 * 1024; // 8 MB
|
|
||||||
|
|
||||||
pub const MEMORY_OFFSET: usize = 0;
|
|
||||||
|
|
||||||
/// Offset to kernel percpu variables
|
|
||||||
//TODO: Use 64-bit fs offset to enable this pub const KERNEL_PERCPU_OFFSET: usize = KERNEL_HEAP_OFFSET - PML4_SIZE;
|
|
||||||
pub const KERNEL_PERCPU_OFFSET: usize = 0xC000_0000;
|
|
||||||
/// Size of kernel percpu variables
|
|
||||||
pub const KERNEL_PERCPU_SIZE: usize = 64 * 1024; // 64 KB
|
|
||||||
|
|
||||||
/// Offset to user image
|
|
||||||
pub const USER_OFFSET: usize = 0;
|
|
||||||
pub const USER_PML4: usize = (USER_OFFSET & PML4_MASK) / PML4_SIZE;
|
|
||||||
|
|
||||||
/// Offset to user TCB
|
|
||||||
pub const USER_TCB_OFFSET: usize = 0xB000_0000;
|
|
||||||
|
|
||||||
/// Offset to user arguments
|
|
||||||
pub const USER_ARG_OFFSET: usize = USER_OFFSET + PML4_SIZE / 2;
|
|
||||||
|
|
||||||
/// Offset to user heap
|
|
||||||
pub const USER_HEAP_OFFSET: usize = USER_OFFSET + PML4_SIZE;
|
|
||||||
pub const USER_HEAP_PML4: usize = (USER_HEAP_OFFSET & PML4_MASK) / PML4_SIZE;
|
|
||||||
|
|
||||||
/// Offset to user grants
|
|
||||||
pub const USER_GRANT_OFFSET: usize = USER_HEAP_OFFSET + PML4_SIZE;
|
|
||||||
pub const USER_GRANT_PML4: usize = (USER_GRANT_OFFSET & PML4_MASK) / PML4_SIZE;
|
|
||||||
|
|
||||||
/// Offset to user stack
|
|
||||||
pub const USER_STACK_OFFSET: usize = USER_GRANT_OFFSET + PML4_SIZE;
|
|
||||||
pub const USER32_STACK_OFFSET: usize = 0xB000_0000;
|
|
||||||
pub const USER_STACK_PML4: usize = (USER_STACK_OFFSET & PML4_MASK) / PML4_SIZE;
|
|
||||||
/// Size of user stack
|
|
||||||
pub const USER_STACK_SIZE: usize = 1024 * 1024; // 1 MB
|
|
||||||
|
|
||||||
/// Offset to user sigstack
|
|
||||||
pub const USER_SIGSTACK_OFFSET: usize = USER_STACK_OFFSET + PML4_SIZE;
|
|
||||||
pub const USER_SIGSTACK_PML4: usize = (USER_SIGSTACK_OFFSET & PML4_MASK) / PML4_SIZE;
|
|
||||||
/// Size of user sigstack
|
|
||||||
pub const USER_SIGSTACK_SIZE: usize = 256 * 1024; // 256 KB
|
|
||||||
|
|
||||||
/// Offset to user TLS
|
|
||||||
pub const USER_TLS_OFFSET: usize = USER_SIGSTACK_OFFSET + PML4_SIZE;
|
|
||||||
pub const USER_TLS_PML4: usize = (USER_TLS_OFFSET & PML4_MASK) / PML4_SIZE;
|
|
||||||
|
|
||||||
/// Offset to user temporary image (used when cloning)
|
|
||||||
pub const USER_TMP_OFFSET: usize = USER_TLS_OFFSET + PML4_SIZE;
|
|
||||||
pub const USER_TMP_PML4: usize = (USER_TMP_OFFSET & PML4_MASK) / PML4_SIZE;
|
|
||||||
|
|
||||||
/// Offset to user temporary heap (used when cloning)
|
|
||||||
pub const USER_TMP_HEAP_OFFSET: usize = USER_TMP_OFFSET + PML4_SIZE;
|
|
||||||
pub const USER_TMP_HEAP_PML4: usize = (USER_TMP_HEAP_OFFSET & PML4_MASK) / PML4_SIZE;
|
|
||||||
|
|
||||||
/// Offset to user temporary page for grants
|
|
||||||
pub const USER_TMP_GRANT_OFFSET: usize = USER_TMP_HEAP_OFFSET + PML4_SIZE;
|
|
||||||
pub const USER_TMP_GRANT_PML4: usize = (USER_TMP_GRANT_OFFSET & PML4_MASK) / PML4_SIZE;
|
|
||||||
|
|
||||||
/// Offset to user temporary stack (used when cloning)
|
|
||||||
pub const USER_TMP_STACK_OFFSET: usize = USER_TMP_GRANT_OFFSET + PML4_SIZE;
|
|
||||||
pub const USER_TMP_STACK_PML4: usize = (USER_TMP_STACK_OFFSET & PML4_MASK) / PML4_SIZE;
|
|
||||||
|
|
||||||
/// Offset to user temporary sigstack (used when cloning)
|
|
||||||
pub const USER_TMP_SIGSTACK_OFFSET: usize = USER_TMP_STACK_OFFSET + PML4_SIZE;
|
|
||||||
pub const USER_TMP_SIGSTACK_PML4: usize = (USER_TMP_SIGSTACK_OFFSET & PML4_MASK) / PML4_SIZE;
|
|
||||||
|
|
||||||
/// Offset to user temporary tls (used when cloning)
|
|
||||||
pub const USER_TMP_TLS_OFFSET: usize = USER_TMP_SIGSTACK_OFFSET + PML4_SIZE;
|
|
||||||
pub const USER_TMP_TLS_PML4: usize = (USER_TMP_TLS_OFFSET & PML4_MASK) / PML4_SIZE;
|
|
||||||
|
|
||||||
/// Offset for usage in other temporary pages
|
|
||||||
pub const USER_TMP_MISC_OFFSET: usize = USER_TMP_TLS_OFFSET + PML4_SIZE;
|
|
||||||
pub const USER_TMP_MISC_PML4: usize = (USER_TMP_MISC_OFFSET & PML4_MASK) / PML4_SIZE;
|
|
||||||
}
|
|
@ -1,57 +1,94 @@
|
|||||||
use spin::Once;
|
use spin::Mutex;
|
||||||
use sync::{SpinNoIrqLock, Mutex, MutexGuard, SpinNoIrq};
|
pub use self::context::ContextImpl;
|
||||||
pub use self::context::Context;
|
pub use ucore_process::*;
|
||||||
pub use ucore_process::processor::{*, Context as _whatever};
|
use consts::{MAX_CPU_NUM, MAX_PROCESS_NUM};
|
||||||
pub use ucore_process::scheduler::*;
|
use arch::cpu;
|
||||||
pub use ucore_process::thread::*;
|
use alloc::{boxed::Box, sync::Arc, vec::Vec};
|
||||||
|
use sync::Condvar;
|
||||||
|
use core::sync::atomic::*;
|
||||||
|
|
||||||
mod context;
|
mod context;
|
||||||
|
|
||||||
type Processor = Processor_<Context, StrideScheduler>;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* @brief:
|
|
||||||
* initialize a new kernel process (idleproc)
|
|
||||||
*/
|
|
||||||
pub fn init() {
|
pub fn init() {
|
||||||
PROCESSOR.call_once(||
|
|
||||||
SpinNoIrqLock::new({
|
|
||||||
let mut processor = Processor::new(
|
|
||||||
unsafe { Context::new_init() },
|
|
||||||
// NOTE: max_time_slice <= 5 to ensure 'priority' test pass
|
// NOTE: max_time_slice <= 5 to ensure 'priority' test pass
|
||||||
StrideScheduler::new(5),
|
let scheduler = Box::new(scheduler::RRScheduler::new(5));
|
||||||
);
|
let manager = Arc::new(ProcessManager::new(scheduler, MAX_PROCESS_NUM));
|
||||||
extern fn idle(arg: usize) -> ! {
|
|
||||||
loop {}
|
extern fn idle(_arg: usize) -> ! {
|
||||||
}
|
loop { cpu::halt(); }
|
||||||
processor.add(Context::new_kernel(idle, 0));
|
}
|
||||||
processor
|
for i in 0..4 {
|
||||||
})
|
manager.add(ContextImpl::new_kernel(idle, i));
|
||||||
);
|
}
|
||||||
|
|
||||||
|
unsafe {
|
||||||
|
for cpu_id in 0..MAX_CPU_NUM {
|
||||||
|
PROCESSORS[cpu_id].init(cpu_id, ContextImpl::new_init(), manager.clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
info!("process init end");
|
info!("process init end");
|
||||||
}
|
}
|
||||||
|
|
||||||
pub static PROCESSOR: Once<SpinNoIrqLock<Processor>> = Once::new();
|
static PROCESSORS: [Processor; MAX_CPU_NUM] = [Processor::new(), Processor::new(), Processor::new(), Processor::new(), Processor::new(), Processor::new(), Processor::new(), Processor::new()];
|
||||||
|
|
||||||
pub fn processor() -> MutexGuard<'static, Processor, SpinNoIrq> {
|
/// Ugly solution for sys_wait(0) (wait for any child)
|
||||||
PROCESSOR.try().unwrap().lock()
|
#[derive(Default)]
|
||||||
|
pub struct Process {
|
||||||
|
parent: AtomicUsize,
|
||||||
|
children: Mutex<Vec<usize>>,
|
||||||
|
subproc_exit: Condvar, // Trigger parent's when exit
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(non_camel_case_types)]
|
impl Process {
|
||||||
pub type thread = ThreadMod<ThreadSupportImpl>;
|
pub fn new_fork(pid: usize, parent: usize) {
|
||||||
|
PROCESS[pid].parent.store(parent, Ordering::Relaxed);
|
||||||
|
PROCESS[pid].subproc_exit._clear();
|
||||||
|
PROCESS[parent].children.lock().push(pid);
|
||||||
|
}
|
||||||
|
pub fn proc_exit(pid: usize) {
|
||||||
|
let parent = PROCESS[pid].parent.load(Ordering::Relaxed);
|
||||||
|
PROCESS[parent].subproc_exit.notify_all();
|
||||||
|
}
|
||||||
|
pub fn wait_child() {
|
||||||
|
Self::current().subproc_exit._wait();
|
||||||
|
}
|
||||||
|
pub fn get_children() -> Vec<usize> {
|
||||||
|
Self::current().children.lock().clone()
|
||||||
|
}
|
||||||
|
pub fn do_wait(pid: usize) {
|
||||||
|
Self::current().children.lock().retain(|&p| p != pid);
|
||||||
|
}
|
||||||
|
fn current() -> &'static Self {
|
||||||
|
&PROCESS[thread::current().id()]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
lazy_static! {
|
||||||
|
pub static ref PROCESS: Vec<Process> = {
|
||||||
|
let mut vec = Vec::new();
|
||||||
|
vec.resize_default(MAX_PROCESS_NUM);
|
||||||
|
vec
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
pub mod thread_ {
|
/// Get current thread struct
|
||||||
pub type Thread = super::Thread<super::ThreadSupportImpl>;
|
pub fn process() -> &'static mut ContextImpl {
|
||||||
|
use core::mem::transmute;
|
||||||
|
let (process, _): (&mut ContextImpl, *const ()) = unsafe {
|
||||||
|
transmute(processor().context())
|
||||||
|
};
|
||||||
|
process
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct ThreadSupportImpl;
|
|
||||||
|
|
||||||
impl ThreadSupport for ThreadSupportImpl {
|
// Implement dependencies for std::thread
|
||||||
type Context = Context;
|
|
||||||
type Scheduler = StrideScheduler;
|
|
||||||
type ProcessorGuard = MutexGuard<'static, Processor, SpinNoIrq>;
|
|
||||||
|
|
||||||
fn processor() -> Self::ProcessorGuard {
|
#[no_mangle]
|
||||||
processor()
|
pub fn processor() -> &'static Processor {
|
||||||
|
&PROCESSORS[cpu::id()]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[no_mangle]
|
||||||
|
pub fn new_kernel_context(entry: extern fn(usize) -> !, arg: usize) -> Box<Context> {
|
||||||
|
ContextImpl::new_kernel(entry, arg)
|
||||||
}
|
}
|
@ -0,0 +1,4 @@
|
|||||||
|
pub struct cpu {
|
||||||
|
pub id: usize
|
||||||
|
}
|
||||||
|
|
@ -1,36 +1,22 @@
|
|||||||
use process::*;
|
use process::*;
|
||||||
use arch::interrupt::TrapFrame;
|
use arch::interrupt::TrapFrame;
|
||||||
|
use arch::cpu;
|
||||||
|
|
||||||
/*
|
pub static mut TICK: usize = 0;
|
||||||
* @brief:
|
|
||||||
* process timer interrupt
|
|
||||||
*/
|
|
||||||
pub fn timer() {
|
|
||||||
let mut processor = processor();
|
|
||||||
processor.tick();
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn before_return() {
|
pub fn timer() {
|
||||||
if let Some(processor) = PROCESSOR.try() {
|
processor().tick();
|
||||||
processor.lock().schedule();
|
if cpu::id() == 0 {
|
||||||
|
unsafe { TICK += 1; }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* @param:
|
|
||||||
* TrapFrame: the error's trapframe
|
|
||||||
* @brief:
|
|
||||||
* process the error trap, if processor inited then exit else panic!
|
|
||||||
*/
|
|
||||||
pub fn error(tf: &TrapFrame) -> ! {
|
pub fn error(tf: &TrapFrame) -> ! {
|
||||||
if let Some(processor) = PROCESSOR.try() {
|
error!("{:#x?}", tf);
|
||||||
let mut processor = processor.lock();
|
let pid = processor().pid();
|
||||||
let pid = processor.current_pid();
|
error!("On CPU{} Process {}", cpu::id(), pid);
|
||||||
error!("Process {} error:\n{:#x?}", pid, tf);
|
|
||||||
processor.exit(pid, 0x100); // TODO: Exit code for error
|
processor().manager().exit(pid, 0x100);
|
||||||
processor.schedule();
|
processor().yield_now();
|
||||||
unreachable!();
|
unreachable!();
|
||||||
} else {
|
|
||||||
panic!("Exception when processor not inited\n{:#x?}", tf);
|
|
||||||
}
|
|
||||||
}
|
}
|
Loading…
Reference in new issue