Compare commits

..

36 Commits

Author SHA1 Message Date
Yu Chen ad28f5f627 update signal impl and apps
3 years ago
liusm18 a6b278fe10 Fix coding style & add one more test
3 years ago
liusm18 ae1a50673d Remove sigret.S
3 years ago
liusm18 7a97faee6c Add signal user lib
3 years ago
liusm18 1d7b8141f0 Add sigret and finish signal handling
3 years ago
liusm18 fae8641f36 Add sigaction struct and sys_call
3 years ago
liusm18 598d8d4538 Add sigprocmask
3 years ago
liusm18 c8542d6107 Modify signal.rs and sys_kill to support more signals
3 years ago
csuastt 639643bc5f
Merge pull request #1 from rcore-os/ch7
3 years ago
Yifan Wu 3edce0932f Bump Rust to nightly-2022-04-11 && support debugging in release mode
3 years ago
Yu Chen 50db31e463 update README
3 years ago
Yu Chen 10fe9b8ffe add CI for build-doc
3 years ago
Yifan Wu 7408b1a7b2 Add boards/ && cargo clippy
3 years ago
Yifan Wu 91710ba1a5 Cargo fmt
3 years ago
Yifan Wu 9df03206f9 Add infloop&until_timeout
3 years ago
Yifan Wu d1233cbb69 Support SIGABRT when the application panics.
3 years ago
Yifan Wu 1f55fbe4a2 Add some tests from ch2 && support SIGSEGV&SIGILL
3 years ago
Yifan Wu 4a169450f8 Fix cat
3 years ago
Yifan Wu 0d189e9ad7 Cargo fmt
3 years ago
Yifan Wu 2f5cff7e21 cargo clippy
3 years ago
Yifan Wu ad85266da1 Remove unmeaningful messages of initproc/user_shell
3 years ago
Yifan Wu 87e61ef7e9 Drop fd_table when a process exits; user_shell supports pipes
3 years ago
Yifan Wu 2ec8a4d28b Bump Rust to nightly-2022-01-19
3 years ago
Yifan Wu 675fe88fea Maximum concurrent processes from 40/35->30.
3 years ago
Yifan Wu 53855b9997 Update .gitignore
3 years ago
Yifan Wu d01b99d3f9 Remove os/src/loader.rs && Update testcases cat and huge_write
4 years ago
Yifan Wu 4822f6253a Add easy-fs-fuse/.gitignore
4 years ago
Yifan Wu a97e29fdb1 Now PageTable::unmap calls PageTable::find_pte instead of PageTable::find_pte_create.
4 years ago
Yifan Wu 39c9c80d35 Kernel cannot dump now.
4 years ago
Yifan Wu 11cdc5f2e6 Bump to rust nightly-2022-01-01, feature global_asm,asm->stable
4 years ago
Yifan Wu b001d3c98e Bump to Rust nightly 2021-12-15
4 years ago
Yifan Wu c0d41dccf6 Update os/Makefile, rm ... -f -> rm -f ...
4 years ago
Yu Chen 913ea57a94 update .gitignore, README.md, dev-env-info.md
4 years ago
Yifan Wu 940e88a002 Now construction of PA/VA only uses 56/39 bits.
4 years ago
Yifan Wu bb98f7f88c rust->nightly-2021-10-15,cargo-binutils->0.3.3
4 years ago
Yifan Wu 7eda37a407 user base from 0x0->0x10000; user image size from 128MiB->16MiB
4 years ago

@ -0,0 +1,25 @@
name: Build Rust Doc
on: [push]
env:
CARGO_TERM_COLOR: always
jobs:
build-doc:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Build doc
run: |
rustup target add riscv64gc-unknown-none-elf
rustup component add llvm-tools-preview
rustup component add rust-src
cd os
cargo doc --no-deps --verbose
- name: Deploy to Github Pages
uses: peaceiris/actions-gh-pages@v3
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
publish_dir: ./os/target/riscv64gc-unknown-none-elf/doc
destination_dir: ${{ github.ref_name }}

@ -1,66 +0,0 @@
name: Build Rust Doc And Run tests
on: [push]
env:
CARGO_TERM_COLOR: always
jobs:
build-doc:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: nightly-2022-04-11
components: rust-src, llvm-tools-preview
target: riscv64gc-unknown-none-elf
- name: Build doc
run: cd os && cargo doc --no-deps --verbose
- name: Deploy to Github Pages
uses: peaceiris/actions-gh-pages@v3
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
publish_dir: ./os/target/riscv64gc-unknown-none-elf/doc
destination_dir: ${{ github.ref_name }}
run-tests:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: nightly-2022-04-11
components: rust-src, llvm-tools-preview
target: riscv64gc-unknown-none-elf
- uses: actions-rs/install@v0.1
with:
crate: cargo-binutils
version: latest
use-tool-cache: true
- name: Cache QEMU
uses: actions/cache@v3
with:
path: qemu-7.0.0
key: qemu-7.0.0-x86_64-riscv64
- name: Install QEMU
run: |
sudo apt-get update
sudo apt-get install ninja-build -y
if [ ! -d qemu-7.0.0 ]; then
wget https://download.qemu.org/qemu-7.0.0.tar.xz
tar -xf qemu-7.0.0.tar.xz
cd qemu-7.0.0
./configure --target-list=riscv64-softmmu
make -j
else
cd qemu-7.0.0
fi
sudo make install
qemu-system-riscv64 --version
- name: Run usertests
run: cd os && make run TEST=1
timeout-minutes: 10

19
.gitignore vendored

@ -1,13 +1,18 @@
.*/*
!.github/*
!.vscode/settings.json
**/target/
**/Cargo.lock
.idea
Cargo.lock
target
os/src/link_app.S
os/src/linker.ld
os/last-*
os/Cargo.lock
os/.gdb_history
user/build
user/target/*
user/.idea/*
user/Cargo.lock
easy-fs/Cargo.lock
easy-fs/target/*
easy-fs-fuse/Cargo.lock
easy-fs-fuse/target/*
tools/
pushall.sh

@ -1,10 +0,0 @@
{
// Prevent "can't find crate for `test`" error on no_std
// Ref: https://github.com/rust-lang/vscode-rust/issues/729
// For vscode-rust plugin users:
"rust.target": "riscv64gc-unknown-none-elf",
"rust.all_targets": false,
// For Rust Analyzer plugin users:
"rust-analyzer.cargo.target": "riscv64gc-unknown-none-elf",
"rust-analyzer.checkOnSave.allTargets": false
}

@ -6,5 +6,3 @@ docker:
build_docker:
docker build -t ${DOCKER_NAME} .
fmt:
cd easy-fs; cargo fmt; cd ../easy-fs-fuse cargo fmt; cd ../os ; cargo fmt; cd ../user; cargo fmt; cd ..

@ -3,8 +3,6 @@ rCore-Tutorial version 3.5. See the [Documentation in Chinese](https://rcore-os.
rCore-Tutorial API Docs. See the [API Docs of Ten OSes ](#OS-API-DOCS)
If you don't know Rust Language and try to learn it, please visit [Rust Learning Resources](https://github.com/rcore-os/rCore/wiki/study-resource-of-system-programming-in-RUST)
Official QQ group number: 735045051
## news
@ -190,44 +188,6 @@ $ make run BOARD=k210
Type `Ctrl+]` to disconnect from K210.
## Show runtime debug info of OS kernel version
The branch of ch9-log contains a lot of debug info. You could try to run rcore tutorial
for understand the internal behavior of os kernel.
```sh
$ git clone https://github.com/rcore-os/rCore-Tutorial-v3.git
$ cd rCore-Tutorial-v3/os
$ git checkout ch9-log
$ make run
......
[rustsbi] RustSBI version 0.2.0-alpha.10, adapting to RISC-V SBI v0.3
.______ __ __ _______.___________. _______..______ __
| _ \ | | | | / | | / || _ \ | |
| |_) | | | | | | (----`---| |----`| (----`| |_) || |
| / | | | | \ \ | | \ \ | _ < | |
| |\ \----.| `--' |.----) | | | .----) | | |_) || |
| _| `._____| \______/ |_______/ |__| |_______/ |______/ |__|
[rustsbi] Implementation: RustSBI-QEMU Version 0.0.2
[rustsbi-dtb] Hart count: cluster0 with 1 cores
[rustsbi] misa: RV64ACDFIMSU
[rustsbi] mideleg: ssoft, stimer, sext (0x222)
[rustsbi] medeleg: ima, ia, bkpt, la, sa, uecall, ipage, lpage, spage (0xb1ab)
[rustsbi] pmp0: 0x10000000 ..= 0x10001fff (rw-)
[rustsbi] pmp1: 0x2000000 ..= 0x200ffff (rw-)
[rustsbi] pmp2: 0xc000000 ..= 0xc3fffff (rw-)
[rustsbi] pmp3: 0x80000000 ..= 0x8fffffff (rwx)
[rustsbi] enter supervisor 0x80200000
[KERN] rust_main() begin
[KERN] clear_bss() begin
[KERN] clear_bss() end
[KERN] mm::init() begin
[KERN] mm::init_heap() begin
[KERN] mm::init_heap() end
[KERN] mm::init_frame_allocator() begin
[KERN] mm::frame_allocator::lazy_static!FRAME_ALLOCATOR begin
......
```
## Rustdoc
Currently it can only help you view the code since only a tiny part of the code has been documented.

Binary file not shown.

@ -0,0 +1,3 @@
.idea/
target/
Cargo.lock

@ -23,8 +23,6 @@ impl BlockDevice for BlockFile {
.expect("Error when seeking!");
assert_eq!(file.write(buf).unwrap(), BLOCK_SZ, "Not a complete block!");
}
fn handle_irq(&self) { unimplemented!(); }
}
fn main() {

@ -3,5 +3,4 @@ use core::any::Any;
pub trait BlockDevice: Send + Sync + Any {
fn read_block(&self, block_id: usize, buf: &mut [u8]);
fn write_block(&self, block_id: usize, buf: &[u8]);
fn handle_irq(&self);
}

@ -12,7 +12,6 @@ lazy_static = { version = "1.4.0", features = ["spin_no_std"] }
buddy_system_allocator = "0.6"
bitflags = "1.2.1"
xmas-elf = "0.7.0"
volatile = "0.3"
virtio-drivers = { git = "https://github.com/rcore-os/virtio-drivers" }
k210-pac = { git = "https://github.com/wyfcyx/k210-pac" }
k210-hal = { git = "https://github.com/wyfcyx/k210-hal" }

@ -37,9 +37,6 @@ OBJCOPY := rust-objcopy --binary-architecture=riscv64
# Disassembly
DISASM ?= -x
# Run usertests or usershell
TEST ?=
build: env switch-check $(KERNEL_BIN) fs-img
switch-check:
@ -64,7 +61,7 @@ $(KERNEL_BIN): kernel
@$(OBJCOPY) $(KERNEL_ELF) --strip-all -O binary $@
fs-img: $(APPS)
@cd ../user && make build TEST=$(TEST)
@cd ../user && make build
@rm -f $(FS_IMG)
@cd ../easy-fs-fuse && cargo run --release -- -s ../user/src/bin/ -t ../user/target/riscv64gc-unknown-none-elf/release/
@ -84,7 +81,7 @@ disasm: kernel
disasm-vim: kernel
@$(OBJDUMP) $(DISASM) $(KERNEL_ELF) > $(DISASM_TMP)
@nvim $(DISASM_TMP)
@vim $(DISASM_TMP)
@rm $(DISASM_TMP)
run: run-inner

@ -21,10 +21,3 @@ pub const MMIO: &[(usize, usize)] = &[
pub type BlockDeviceImpl = crate::drivers::block::SDCardWrapper;
pub fn device_init() {
unimplemented!();
}
pub fn irq_handler() {
unimplemented!();
}

@ -1,126 +1,6 @@
pub const CLOCK_FREQ: usize = 12500000;
pub const MMIO: &[(usize, usize)] = &[
(0x1000_0000, 0x1000), // VIRT_UART0 in virt machine
(0x1000_1000, 0x1000), // VIRT_VIRTIO in virt machine
(0x0C00_0000, 0x40_0000), // VIRT_PLIC in virt machine
(0x0010_0000, 0x00_2000), // VIRT_TEST/RTC in virt machine
];
pub const MMIO: &[(usize, usize)] = &[(0x10001000, 0x1000)];
pub type BlockDeviceImpl = crate::drivers::block::VirtIOBlock;
pub type CharDeviceImpl = crate::drivers::chardev::NS16550a<VIRT_UART>;
pub const VIRT_PLIC: usize = 0xC00_0000;
pub const VIRT_UART: usize = 0x1000_0000;
use crate::drivers::block::BLOCK_DEVICE;
use crate::drivers::chardev::{CharDevice, UART};
use crate::drivers::plic::{IntrTargetPriority, PLIC};
pub fn device_init() {
use riscv::register::sie;
let mut plic = unsafe { PLIC::new(VIRT_PLIC) };
let hart_id: usize = 0;
let supervisor = IntrTargetPriority::Supervisor;
let machine = IntrTargetPriority::Machine;
plic.set_threshold(hart_id, supervisor, 0);
plic.set_threshold(hart_id, machine, 1);
for intr_src_id in [1usize, 10] {
plic.enable(hart_id, supervisor, intr_src_id);
plic.set_priority(intr_src_id, 1);
}
unsafe {
sie::set_sext();
}
}
pub fn irq_handler() {
let mut plic = unsafe { PLIC::new(VIRT_PLIC) };
let intr_src_id = plic.claim(0, IntrTargetPriority::Supervisor);
match intr_src_id {
1 => BLOCK_DEVICE.handle_irq(),
10 => UART.handle_irq(),
_ => panic!("unsupported IRQ {}", intr_src_id),
}
plic.complete(0, IntrTargetPriority::Supervisor, intr_src_id);
}
//ref:: https://github.com/andre-richter/qemu-exit
use core::arch::asm;
const EXIT_SUCCESS: u32 = 0x5555; // Equals `exit(0)`. qemu successful exit
const EXIT_FAILURE_FLAG: u32 = 0x3333;
const EXIT_FAILURE: u32 = exit_code_encode(1); // Equals `exit(1)`. qemu failed exit
const EXIT_RESET: u32 = 0x7777; // qemu reset
pub trait QEMUExit {
/// Exit with specified return code.
///
/// Note: For `X86`, code is binary-OR'ed with `0x1` inside QEMU.
fn exit(&self, code: u32) -> !;
/// Exit QEMU using `EXIT_SUCCESS`, aka `0`, if possible.
///
/// Note: Not possible for `X86`.
fn exit_success(&self) -> !;
/// Exit QEMU using `EXIT_FAILURE`, aka `1`.
fn exit_failure(&self) -> !;
}
/// RISCV64 configuration
pub struct RISCV64 {
/// Address of the sifive_test mapped device.
addr: u64,
}
/// Encode the exit code using EXIT_FAILURE_FLAG.
const fn exit_code_encode(code: u32) -> u32 {
(code << 16) | EXIT_FAILURE_FLAG
}
impl RISCV64 {
/// Create an instance.
pub const fn new(addr: u64) -> Self {
RISCV64 { addr }
}
}
impl QEMUExit for RISCV64 {
/// Exit qemu with specified exit code.
fn exit(&self, code: u32) -> ! {
// If code is not a special value, we need to encode it with EXIT_FAILURE_FLAG.
let code_new = match code {
EXIT_SUCCESS | EXIT_FAILURE | EXIT_RESET => code,
_ => exit_code_encode(code),
};
unsafe {
asm!(
"sw {0}, 0({1})",
in(reg)code_new, in(reg)self.addr
);
// For the case that the QEMU exit attempt did not work, transition into an infinite
// loop. Calling `panic!()` here is unfeasible, since there is a good chance
// this function here is the last expression in the `panic!()` handler
// itself. This prevents a possible infinite loop.
loop {
asm!("wfi", options(nomem, nostack));
}
}
}
fn exit_success(&self) -> ! {
self.exit(EXIT_SUCCESS);
}
fn exit_failure(&self) -> ! {
self.exit(EXIT_FAILURE);
}
}
const VIRT_TEST: u64 = 0x100000;
pub const QEMU_EXIT_HANDLE: RISCV64 = RISCV64::new(VIRT_TEST);

@ -8,6 +8,7 @@ pub const PAGE_SIZE: usize = 0x1000;
pub const PAGE_SIZE_BITS: usize = 0xc;
pub const TRAMPOLINE: usize = usize::MAX - PAGE_SIZE + 1;
pub const TRAP_CONTEXT_BASE: usize = TRAMPOLINE - PAGE_SIZE;
pub const TRAP_CONTEXT: usize = TRAMPOLINE - PAGE_SIZE;
pub use crate::board::{CLOCK_FREQ, MMIO};

@ -1,4 +1,4 @@
use crate::drivers::chardev::{CharDevice, UART};
use crate::sbi::console_putchar;
use core::fmt::{self, Write};
struct Stdout;
@ -6,7 +6,7 @@ struct Stdout;
impl Write for Stdout {
fn write_str(&mut self, s: &str) -> fmt::Result {
for c in s.chars() {
UART.write(c as u8);
console_putchar(c as usize);
}
Ok(())
}
@ -19,13 +19,13 @@ pub fn print(args: fmt::Arguments) {
#[macro_export]
macro_rules! print {
($fmt: literal $(, $($arg: tt)+)?) => {
$crate::console::print(format_args!($fmt $(, $($arg)+)?))
$crate::console::print(format_args!($fmt $(, $($arg)+)?));
}
}
#[macro_export]
macro_rules! println {
($fmt: literal $(, $($arg: tt)+)?) => {
$crate::console::print(format_args!(concat!($fmt, "\n") $(, $($arg)+)?))
$crate::console::print(format_args!(concat!($fmt, "\n") $(, $($arg)+)?));
}
}

@ -1,13 +1,13 @@
mod sdcard;
mod virtio_blk;
pub use sdcard::SDCardWrapper;
pub use virtio_blk::VirtIOBlock;
pub use sdcard::SDCardWrapper;
use crate::board::BlockDeviceImpl;
use alloc::sync::Arc;
use easy_fs::BlockDevice;
use lazy_static::*;
use crate::board::BlockDeviceImpl;
lazy_static! {
pub static ref BLOCK_DEVICE: Arc<dyn BlockDevice> = Arc::new(BlockDeviceImpl::new());

@ -3,7 +3,7 @@
#![allow(unused)]
use super::BlockDevice;
use crate::sync::UPIntrFreeCell;
use crate::sync::UPSafeCell;
use core::convert::TryInto;
use k210_hal::prelude::*;
use k210_pac::{Peripherals, SPI0};
@ -321,14 +321,14 @@ impl</*'a,*/ X: SPI> SDCard</*'a,*/ X> {
* Get SD card data response.
* @param None
* @retval The SD status: Read data response xxx0<status>1
* - status 010: Data accepted
* - status 010: Data accecpted
* - status 101: Data rejected due to a crc error
* - status 110: Data rejected due to a Write error.
* - status 111: Data rejected due to other error.
*/
fn get_dataresponse(&self) -> u8 {
let response = &mut [0u8];
/* Read response */
/* Read resonse */
self.read_data(response);
/* Mask unused bits */
response[0] &= 0x1F;
@ -423,7 +423,7 @@ impl</*'a,*/ X: SPI> SDCard</*'a,*/ X> {
/* Byte 15 */
CSD_CRC: (csd_tab[15] & 0xFE) >> 1,
Reserved4: 1,
/* Return the response */
/* Return the reponse */
})
}
@ -715,8 +715,8 @@ fn io_init() {
}
lazy_static! {
static ref PERIPHERALS: UPIntrFreeCell<Peripherals> =
unsafe { UPIntrFreeCell::new(Peripherals::take().unwrap()) };
static ref PERIPHERALS: UPSafeCell<Peripherals> =
unsafe { UPSafeCell::new(Peripherals::take().unwrap()) };
}
fn init_sdcard() -> SDCard<SPIImpl<SPI0>> {
@ -740,11 +740,11 @@ fn init_sdcard() -> SDCard<SPIImpl<SPI0>> {
sd
}
pub struct SDCardWrapper(UPIntrFreeCell<SDCard<SPIImpl<SPI0>>>);
pub struct SDCardWrapper(UPSafeCell<SDCard<SPIImpl<SPI0>>>);
impl SDCardWrapper {
pub fn new() -> Self {
unsafe { Self(UPIntrFreeCell::new(init_sdcard())) }
unsafe { Self(UPSafeCell::new(init_sdcard())) }
}
}
@ -761,7 +761,4 @@ impl BlockDevice for SDCardWrapper {
.write_sector(buf, block_id as u32)
.unwrap();
}
fn handle_irq(&self) {
unimplemented!();
}
}

@ -3,93 +3,42 @@ use crate::mm::{
frame_alloc, frame_dealloc, kernel_token, FrameTracker, PageTable, PhysAddr, PhysPageNum,
StepByOne, VirtAddr,
};
use crate::sync::{Condvar, UPIntrFreeCell};
use crate::task::schedule;
use crate::DEV_NON_BLOCKING_ACCESS;
use alloc::collections::BTreeMap;
use crate::sync::UPSafeCell;
use alloc::vec::Vec;
use lazy_static::*;
use virtio_drivers::{BlkResp, RespStatus, VirtIOBlk, VirtIOHeader};
use virtio_drivers::{VirtIOBlk, VirtIOHeader};
#[allow(unused)]
const VIRTIO0: usize = 0x10001000;
pub struct VirtIOBlock {
virtio_blk: UPIntrFreeCell<VirtIOBlk<'static>>,
condvars: BTreeMap<u16, Condvar>,
}
pub struct VirtIOBlock(UPSafeCell<VirtIOBlk<'static>>);
lazy_static! {
static ref QUEUE_FRAMES: UPIntrFreeCell<Vec<FrameTracker>> =
unsafe { UPIntrFreeCell::new(Vec::new()) };
static ref QUEUE_FRAMES: UPSafeCell<Vec<FrameTracker>> = unsafe { UPSafeCell::new(Vec::new()) };
}
impl BlockDevice for VirtIOBlock {
fn read_block(&self, block_id: usize, buf: &mut [u8]) {
let nb = *DEV_NON_BLOCKING_ACCESS.exclusive_access();
if nb {
let mut resp = BlkResp::default();
let task_cx_ptr = self.virtio_blk.exclusive_session(|blk| {
let token = unsafe { blk.read_block_nb(block_id, buf, &mut resp).unwrap() };
self.condvars.get(&token).unwrap().wait_no_sched()
});
schedule(task_cx_ptr);
assert_eq!(
resp.status(),
RespStatus::Ok,
"Error when reading VirtIOBlk"
);
} else {
self.virtio_blk
.exclusive_access()
.read_block(block_id, buf)
.expect("Error when reading VirtIOBlk");
}
self.0
.exclusive_access()
.read_block(block_id, buf)
.expect("Error when reading VirtIOBlk");
}
fn write_block(&self, block_id: usize, buf: &[u8]) {
let nb = *DEV_NON_BLOCKING_ACCESS.exclusive_access();
if nb {
let mut resp = BlkResp::default();
let task_cx_ptr = self.virtio_blk.exclusive_session(|blk| {
let token = unsafe { blk.write_block_nb(block_id, buf, &mut resp).unwrap() };
self.condvars.get(&token).unwrap().wait_no_sched()
});
schedule(task_cx_ptr);
assert_eq!(
resp.status(),
RespStatus::Ok,
"Error when writing VirtIOBlk"
);
} else {
self.virtio_blk
.exclusive_access()
.write_block(block_id, buf)
.expect("Error when writing VirtIOBlk");
}
}
fn handle_irq(&self) {
self.virtio_blk.exclusive_session(|blk| {
while let Ok(token) = blk.pop_used() {
self.condvars.get(&token).unwrap().signal();
}
});
self.0
.exclusive_access()
.write_block(block_id, buf)
.expect("Error when writing VirtIOBlk");
}
}
impl VirtIOBlock {
#[allow(unused)]
pub fn new() -> Self {
let virtio_blk = unsafe {
UPIntrFreeCell::new(VirtIOBlk::new(&mut *(VIRTIO0 as *mut VirtIOHeader)).unwrap())
};
let mut condvars = BTreeMap::new();
let channels = virtio_blk.exclusive_access().virt_queue_size();
for i in 0..channels {
let condvar = Condvar::new();
condvars.insert(i, condvar);
}
Self {
virtio_blk,
condvars,
unsafe {
Self(UPSafeCell::new(
VirtIOBlk::new(&mut *(VIRTIO0 as *mut VirtIOHeader)).unwrap(),
))
}
}
}

@ -1,17 +0,0 @@
mod ns16550a;
pub use ns16550a::NS16550a;
use crate::board::CharDeviceImpl;
use alloc::sync::Arc;
use lazy_static::*;
pub trait CharDevice {
fn read(&self) -> u8;
fn write(&self, ch: u8);
fn handle_irq(&self);
}
lazy_static! {
pub static ref UART: Arc<CharDeviceImpl> = Arc::new(CharDeviceImpl::new());
}

@ -1,175 +0,0 @@
///! Ref: https://www.lammertbies.nl/comm/info/serial-uart
///! Ref: ns16550a datasheet: https://datasheetspdf.com/pdf-file/605590/NationalSemiconductor/NS16550A/1
///! Ref: ns16450 datasheet: https://datasheetspdf.com/pdf-file/1311818/NationalSemiconductor/NS16450/1
use super::CharDevice;
use crate::sync::{Condvar, UPIntrFreeCell};
use crate::task::schedule;
use alloc::collections::VecDeque;
use bitflags::*;
use volatile::{ReadOnly, Volatile, WriteOnly};
bitflags! {
/// InterruptEnableRegister
pub struct IER: u8 {
const RX_AVAILABLE = 1 << 0;
const TX_EMPTY = 1 << 1;
}
/// LineStatusRegister
pub struct LSR: u8 {
const DATA_AVAILABLE = 1 << 0;
const THR_EMPTY = 1 << 5;
}
/// Model Control Register
pub struct MCR: u8 {
const DATA_TERMINAL_READY = 1 << 0;
const REQUEST_TO_SEND = 1 << 1;
const AUX_OUTPUT1 = 1 << 2;
const AUX_OUTPUT2 = 1 << 3;
}
}
#[repr(C)]
#[allow(dead_code)]
struct ReadWithoutDLAB {
/// receiver buffer register
pub rbr: ReadOnly<u8>,
/// interrupt enable register
pub ier: Volatile<IER>,
/// interrupt identification register
pub iir: ReadOnly<u8>,
/// line control register
pub lcr: Volatile<u8>,
/// model control register
pub mcr: Volatile<MCR>,
/// line status register
pub lsr: ReadOnly<LSR>,
/// ignore MSR
_padding1: ReadOnly<u8>,
/// ignore SCR
_padding2: ReadOnly<u8>,
}
#[repr(C)]
#[allow(dead_code)]
struct WriteWithoutDLAB {
/// transmitter holding register
pub thr: WriteOnly<u8>,
/// interrupt enable register
pub ier: Volatile<IER>,
/// ignore FCR
_padding0: ReadOnly<u8>,
/// line control register
pub lcr: Volatile<u8>,
/// modem control register
pub mcr: Volatile<MCR>,
/// line status register
pub lsr: ReadOnly<LSR>,
/// ignore other registers
_padding1: ReadOnly<u16>,
}
pub struct NS16550aRaw {
base_addr: usize,
}
impl NS16550aRaw {
fn read_end(&mut self) -> &mut ReadWithoutDLAB {
unsafe { &mut *(self.base_addr as *mut ReadWithoutDLAB) }
}
fn write_end(&mut self) -> &mut WriteWithoutDLAB {
unsafe { &mut *(self.base_addr as *mut WriteWithoutDLAB) }
}
pub fn new(base_addr: usize) -> Self {
Self { base_addr }
}
pub fn init(&mut self) {
let read_end = self.read_end();
let mut mcr = MCR::empty();
mcr |= MCR::DATA_TERMINAL_READY;
mcr |= MCR::REQUEST_TO_SEND;
mcr |= MCR::AUX_OUTPUT2;
read_end.mcr.write(mcr);
let ier = IER::RX_AVAILABLE;
read_end.ier.write(ier);
}
pub fn read(&mut self) -> Option<u8> {
let read_end = self.read_end();
let lsr = read_end.lsr.read();
if lsr.contains(LSR::DATA_AVAILABLE) {
Some(read_end.rbr.read())
} else {
None
}
}
pub fn write(&mut self, ch: u8) {
let write_end = self.write_end();
loop {
if write_end.lsr.read().contains(LSR::THR_EMPTY) {
write_end.thr.write(ch);
break;
}
}
}
}
struct NS16550aInner {
ns16550a: NS16550aRaw,
read_buffer: VecDeque<u8>,
}
pub struct NS16550a<const BASE_ADDR: usize> {
inner: UPIntrFreeCell<NS16550aInner>,
condvar: Condvar,
}
impl<const BASE_ADDR: usize> NS16550a<BASE_ADDR> {
pub fn new() -> Self {
let mut inner = NS16550aInner {
ns16550a: NS16550aRaw::new(BASE_ADDR),
read_buffer: VecDeque::new(),
};
inner.ns16550a.init();
Self {
inner: unsafe { UPIntrFreeCell::new(inner) },
condvar: Condvar::new(),
}
}
}
impl<const BASE_ADDR: usize> CharDevice for NS16550a<BASE_ADDR> {
fn read(&self) -> u8 {
loop {
let mut inner = self.inner.exclusive_access();
if let Some(ch) = inner.read_buffer.pop_front() {
return ch;
} else {
let task_cx_ptr = self.condvar.wait_no_sched();
drop(inner);
schedule(task_cx_ptr);
}
}
}
fn write(&self, ch: u8) {
let mut inner = self.inner.exclusive_access();
inner.ns16550a.write(ch);
}
fn handle_irq(&self) {
let mut count = 0;
self.inner.exclusive_session(|inner| {
while let Some(ch) = inner.ns16550a.read() {
count += 1;
inner.read_buffer.push_back(ch);
}
});
if count > 0 {
self.condvar.signal();
}
}
}

@ -1,6 +1,3 @@
pub mod block;
pub mod chardev;
pub mod plic;
pub use block::BLOCK_DEVICE;
pub use chardev::UART;

@ -1,124 +0,0 @@
#[allow(clippy::upper_case_acronyms)]
pub struct PLIC {
base_addr: usize,
}
#[derive(Copy, Clone)]
pub enum IntrTargetPriority {
Machine = 0,
Supervisor = 1,
}
impl IntrTargetPriority {
pub fn supported_number() -> usize {
2
}
}
impl PLIC {
fn priority_ptr(&self, intr_source_id: usize) -> *mut u32 {
assert!(intr_source_id > 0 && intr_source_id <= 132);
(self.base_addr + intr_source_id * 4) as *mut u32
}
fn hart_id_with_priority(hart_id: usize, target_priority: IntrTargetPriority) -> usize {
let priority_num = IntrTargetPriority::supported_number();
hart_id * priority_num + target_priority as usize
}
fn enable_ptr(
&self,
hart_id: usize,
target_priority: IntrTargetPriority,
intr_source_id: usize,
) -> (*mut u32, usize) {
let id = Self::hart_id_with_priority(hart_id, target_priority);
let (reg_id, reg_shift) = (intr_source_id / 32, intr_source_id % 32);
(
(self.base_addr + 0x2000 + 0x80 * id + 0x4 * reg_id) as *mut u32,
reg_shift,
)
}
fn threshold_ptr_of_hart_with_priority(
&self,
hart_id: usize,
target_priority: IntrTargetPriority,
) -> *mut u32 {
let id = Self::hart_id_with_priority(hart_id, target_priority);
(self.base_addr + 0x20_0000 + 0x1000 * id) as *mut u32
}
fn claim_comp_ptr_of_hart_with_priority(
&self,
hart_id: usize,
target_priority: IntrTargetPriority,
) -> *mut u32 {
let id = Self::hart_id_with_priority(hart_id, target_priority);
(self.base_addr + 0x20_0004 + 0x1000 * id) as *mut u32
}
pub unsafe fn new(base_addr: usize) -> Self {
Self { base_addr }
}
pub fn set_priority(&mut self, intr_source_id: usize, priority: u32) {
assert!(priority < 8);
unsafe {
self.priority_ptr(intr_source_id).write_volatile(priority);
}
}
#[allow(unused)]
pub fn get_priority(&mut self, intr_source_id: usize) -> u32 {
unsafe { self.priority_ptr(intr_source_id).read_volatile() & 7 }
}
pub fn enable(
&mut self,
hart_id: usize,
target_priority: IntrTargetPriority,
intr_source_id: usize,
) {
let (reg_ptr, shift) = self.enable_ptr(hart_id, target_priority, intr_source_id);
unsafe {
reg_ptr.write_volatile(reg_ptr.read_volatile() | 1 << shift);
}
}
#[allow(unused)]
pub fn disable(
&mut self,
hart_id: usize,
target_priority: IntrTargetPriority,
intr_source_id: usize,
) {
let (reg_ptr, shift) = self.enable_ptr(hart_id, target_priority, intr_source_id);
unsafe {
reg_ptr.write_volatile(reg_ptr.read_volatile() & (!(1u32 << shift)));
}
}
pub fn set_threshold(
&mut self,
hart_id: usize,
target_priority: IntrTargetPriority,
threshold: u32,
) {
assert!(threshold < 8);
let threshold_ptr = self.threshold_ptr_of_hart_with_priority(hart_id, target_priority);
unsafe {
threshold_ptr.write_volatile(threshold);
}
}
#[allow(unused)]
pub fn get_threshold(&mut self, hart_id: usize, target_priority: IntrTargetPriority) -> u32 {
let threshold_ptr = self.threshold_ptr_of_hart_with_priority(hart_id, target_priority);
unsafe { threshold_ptr.read_volatile() & 7 }
}
pub fn claim(&mut self, hart_id: usize, target_priority: IntrTargetPriority) -> u32 {
let claim_comp_ptr = self.claim_comp_ptr_of_hart_with_priority(hart_id, target_priority);
unsafe { claim_comp_ptr.read_volatile() }
}
pub fn complete(
&mut self,
hart_id: usize,
target_priority: IntrTargetPriority,
completion: u32,
) {
let claim_comp_ptr = self.claim_comp_ptr_of_hart_with_priority(hart_id, target_priority);
unsafe {
claim_comp_ptr.write_volatile(completion);
}
}
}

@ -9,4 +9,4 @@ _start:
boot_stack:
.space 4096 * 16
.globl boot_stack_top
boot_stack_top:
boot_stack_top:

@ -1,7 +1,7 @@
use super::File;
use crate::drivers::BLOCK_DEVICE;
use crate::mm::UserBuffer;
use crate::sync::UPIntrFreeCell;
use crate::sync::UPSafeCell;
use alloc::sync::Arc;
use alloc::vec::Vec;
use bitflags::*;
@ -11,7 +11,7 @@ use lazy_static::*;
pub struct OSInode {
readable: bool,
writable: bool,
inner: UPIntrFreeCell<OSInodeInner>,
inner: UPSafeCell<OSInodeInner>,
}
pub struct OSInodeInner {
@ -24,7 +24,7 @@ impl OSInode {
Self {
readable,
writable,
inner: unsafe { UPIntrFreeCell::new(OSInodeInner { offset: 0, inode }) },
inner: unsafe { UPSafeCell::new(OSInodeInner { offset: 0, inode }) },
}
}
pub fn read_all(&self) -> Vec<u8> {
@ -55,7 +55,7 @@ pub fn list_apps() {
for app in ROOT_INODE.ls() {
println!("{}", app);
}
println!("**************/")
println!("**************/");
}
bitflags! {

@ -1,6 +1,6 @@
use super::File;
use crate::mm::UserBuffer;
use crate::sync::UPIntrFreeCell;
use crate::sync::UPSafeCell;
use alloc::sync::{Arc, Weak};
use crate::task::suspend_current_and_run_next;
@ -8,18 +8,18 @@ use crate::task::suspend_current_and_run_next;
pub struct Pipe {
readable: bool,
writable: bool,
buffer: Arc<UPIntrFreeCell<PipeRingBuffer>>,
buffer: Arc<UPSafeCell<PipeRingBuffer>>,
}
impl Pipe {
pub fn read_end_with_buffer(buffer: Arc<UPIntrFreeCell<PipeRingBuffer>>) -> Self {
pub fn read_end_with_buffer(buffer: Arc<UPSafeCell<PipeRingBuffer>>) -> Self {
Self {
readable: true,
writable: false,
buffer,
}
}
pub fn write_end_with_buffer(buffer: Arc<UPIntrFreeCell<PipeRingBuffer>>) -> Self {
pub fn write_end_with_buffer(buffer: Arc<UPSafeCell<PipeRingBuffer>>) -> Self {
Self {
readable: false,
writable: true,
@ -98,7 +98,7 @@ impl PipeRingBuffer {
/// Return (read_end, write_end)
pub fn make_pipe() -> (Arc<Pipe>, Arc<Pipe>) {
let buffer = Arc::new(unsafe { UPIntrFreeCell::new(PipeRingBuffer::new()) });
let buffer = Arc::new(unsafe { UPSafeCell::new(PipeRingBuffer::new()) });
let read_end = Arc::new(Pipe::read_end_with_buffer(buffer.clone()));
let write_end = Arc::new(Pipe::write_end_with_buffer(buffer.clone()));
buffer.exclusive_access().set_write_end(&write_end);

@ -1,8 +1,10 @@
use super::File;
use crate::drivers::chardev::{CharDevice, UART};
use crate::mm::UserBuffer;
use crate::sbi::console_getchar;
use crate::task::suspend_current_and_run_next;
pub struct Stdin;
pub struct Stdout;
impl File for Stdin {
@ -14,8 +16,18 @@ impl File for Stdin {
}
fn read(&self, mut user_buf: UserBuffer) -> usize {
assert_eq!(user_buf.len(), 1);
//println!("before UART.read() in Stdin::read()");
let ch = UART.read();
// busy loop
let mut c: usize;
loop {
c = console_getchar();
if c == 0 {
suspend_current_and_run_next();
continue;
} else {
break;
}
}
let ch = c as u8;
unsafe {
user_buf.buffers[0].as_mut_ptr().write_volatile(ch);
}

@ -1,6 +1,4 @@
use crate::sbi::shutdown;
use crate::task::current_kstack_top;
use core::arch::asm;
use core::panic::PanicInfo;
#[panic_handler]
@ -15,23 +13,5 @@ fn panic(info: &PanicInfo) -> ! {
} else {
println!("[kernel] Panicked: {}", info.message().unwrap());
}
unsafe {
backtrace();
}
shutdown(255)
}
unsafe fn backtrace() {
let mut fp: usize;
let stop = current_kstack_top();
asm!("mv {}, s0", out(reg) fp);
println!("---START BACKTRACE---");
for i in 0..10 {
if fp == stop {
break;
}
println!("#{}:ra={:#x}", i, *((fp - 8) as *const usize));
fp = *((fp - 16) as *const usize);
}
println!("---END BACKTRACE---");
shutdown()
}

@ -29,7 +29,9 @@ mod task;
mod timer;
mod trap;
core::arch::global_asm!(include_str!("entry.asm"));
use core::arch::global_asm;
global_asm!(include_str!("entry.asm"));
fn clear_bss() {
extern "C" {
@ -42,25 +44,17 @@ fn clear_bss() {
}
}
use lazy_static::*;
use sync::UPIntrFreeCell;
lazy_static! {
pub static ref DEV_NON_BLOCKING_ACCESS: UPIntrFreeCell<bool> =
unsafe { UPIntrFreeCell::new(false) };
}
#[no_mangle]
pub fn rust_main() -> ! {
clear_bss();
println!("[kernel] Hello, world!");
mm::init();
mm::remap_test();
trap::init();
trap::enable_timer_interrupt();
timer::set_next_trigger();
board::device_init();
fs::list_apps();
task::add_initproc();
*DEV_NON_BLOCKING_ACCESS.exclusive_access() = true;
task::run_tasks();
panic!("Unreachable in rust_main!");
}

@ -83,11 +83,7 @@ impl From<PhysPageNum> for usize {
}
impl From<VirtAddr> for usize {
fn from(v: VirtAddr) -> Self {
if v.0 >= (1 << (VA_WIDTH_SV39 - 1)) {
v.0 | (!((1 << VA_WIDTH_SV39) - 1))
} else {
v.0
}
v.0
}
}
impl From<VirtPageNum> for usize {

@ -1,6 +1,6 @@
use super::{PhysAddr, PhysPageNum};
use crate::config::MEMORY_END;
use crate::sync::UPIntrFreeCell;
use crate::sync::UPSafeCell;
use alloc::vec::Vec;
use core::fmt::{self, Debug, Formatter};
use lazy_static::*;
@ -83,8 +83,8 @@ impl FrameAllocator for StackFrameAllocator {
type FrameAllocatorImpl = StackFrameAllocator;
lazy_static! {
pub static ref FRAME_ALLOCATOR: UPIntrFreeCell<FrameAllocatorImpl> =
unsafe { UPIntrFreeCell::new(FrameAllocatorImpl::new()) };
pub static ref FRAME_ALLOCATOR: UPSafeCell<FrameAllocatorImpl> =
unsafe { UPSafeCell::new(FrameAllocatorImpl::new()) };
}
pub fn init_frame_allocator() {

@ -2,8 +2,8 @@ use super::{frame_alloc, FrameTracker};
use super::{PTEFlags, PageTable, PageTableEntry};
use super::{PhysAddr, PhysPageNum, VirtAddr, VirtPageNum};
use super::{StepByOne, VPNRange};
use crate::config::{MEMORY_END, MMIO, PAGE_SIZE, TRAMPOLINE};
use crate::sync::UPIntrFreeCell;
use crate::config::{MEMORY_END, MMIO, PAGE_SIZE, TRAMPOLINE, TRAP_CONTEXT, USER_STACK_SIZE};
use crate::sync::UPSafeCell;
use alloc::collections::BTreeMap;
use alloc::sync::Arc;
use alloc::vec::Vec;
@ -25,8 +25,8 @@ extern "C" {
}
lazy_static! {
pub static ref KERNEL_SPACE: Arc<UPIntrFreeCell<MemorySet>> =
Arc::new(unsafe { UPIntrFreeCell::new(MemorySet::new_kernel()) });
pub static ref KERNEL_SPACE: Arc<UPSafeCell<MemorySet>> =
Arc::new(unsafe { UPSafeCell::new(MemorySet::new_kernel()) });
}
pub fn kernel_token() -> usize {
@ -163,8 +163,8 @@ impl MemorySet {
}
memory_set
}
/// Include sections in elf and trampoline,
/// also returns user_sp_base and entry point.
/// Include sections in elf and trampoline and TrapContext and user stack,
/// also returns user_sp and entry point.
pub fn from_elf(elf_data: &[u8]) -> (Self, usize, usize) {
let mut memory_set = Self::new_bare();
// map trampoline
@ -200,12 +200,34 @@ impl MemorySet {
);
}
}
// map user stack with U flags
let max_end_va: VirtAddr = max_end_vpn.into();
let mut user_stack_base: usize = max_end_va.into();
user_stack_base += PAGE_SIZE;
let mut user_stack_bottom: usize = max_end_va.into();
// guard page
user_stack_bottom += PAGE_SIZE;
let user_stack_top = user_stack_bottom + USER_STACK_SIZE;
memory_set.push(
MapArea::new(
user_stack_bottom.into(),
user_stack_top.into(),
MapType::Framed,
MapPermission::R | MapPermission::W | MapPermission::U,
),
None,
);
// map TrapContext
memory_set.push(
MapArea::new(
TRAP_CONTEXT.into(),
TRAMPOLINE.into(),
MapType::Framed,
MapPermission::R | MapPermission::W,
),
None,
);
(
memory_set,
user_stack_base,
user_stack_top,
elf.header.pt2.entry_point() as usize,
)
}
@ -351,20 +373,26 @@ pub fn remap_test() {
let mid_text: VirtAddr = ((stext as usize + etext as usize) / 2).into();
let mid_rodata: VirtAddr = ((srodata as usize + erodata as usize) / 2).into();
let mid_data: VirtAddr = ((sdata as usize + edata as usize) / 2).into();
assert!(!kernel_space
.page_table
.translate(mid_text.floor())
.unwrap()
.writable(),);
assert!(!kernel_space
.page_table
.translate(mid_rodata.floor())
.unwrap()
.writable(),);
assert!(!kernel_space
.page_table
.translate(mid_data.floor())
.unwrap()
.executable(),);
assert!(
!kernel_space
.page_table
.translate(mid_text.floor())
.unwrap()
.writable(),
);
assert!(
!kernel_space
.page_table
.translate(mid_rodata.floor())
.unwrap()
.writable(),
);
assert!(
!kernel_space
.page_table
.translate(mid_data.floor())
.unwrap()
.executable(),
);
println!("remap_test passed!");
}

@ -16,7 +16,7 @@ const SBI_SHUTDOWN: usize = 8;
fn sbi_call(which: usize, arg0: usize, arg1: usize, arg2: usize) -> usize {
let mut ret;
unsafe {
core::arch::asm!(
asm!(
"ecall",
inlateout("x10") arg0 => ret,
in("x11") arg1,
@ -39,9 +39,7 @@ pub fn console_getchar() -> usize {
sbi_call(SBI_CONSOLE_GETCHAR, 0, 0, 0)
}
use crate::board::QEMUExit;
pub fn shutdown(exit_code: usize) -> ! {
//sbi_call(SBI_SHUTDOWN, exit_code, 0, 0);
crate::board::QEMU_EXIT_HANDLE.exit_failure();
pub fn shutdown() -> ! {
sbi_call(SBI_SHUTDOWN, 0, 0, 0);
panic!("It should shutdown!");
}

@ -1,58 +0,0 @@
use crate::sync::{Mutex, UPIntrFreeCell};
use crate::task::{
add_task, block_current_and_run_next, block_current_task, current_task, TaskContext,
TaskControlBlock,
};
use alloc::{collections::VecDeque, sync::Arc};
pub struct Condvar {
pub inner: UPIntrFreeCell<CondvarInner>,
}
pub struct CondvarInner {
pub wait_queue: VecDeque<Arc<TaskControlBlock>>,
}
impl Condvar {
pub fn new() -> Self {
Self {
inner: unsafe {
UPIntrFreeCell::new(CondvarInner {
wait_queue: VecDeque::new(),
})
},
}
}
pub fn signal(&self) {
let mut inner = self.inner.exclusive_access();
if let Some(task) = inner.wait_queue.pop_front() {
add_task(task);
}
}
/*
pub fn wait(&self) {
let mut inner = self.inner.exclusive_access();
inner.wait_queue.push_back(current_task().unwrap());
drop(inner);
block_current_and_run_next();
}
*/
pub fn wait_no_sched(&self) -> *mut TaskContext {
self.inner.exclusive_session(|inner| {
inner.wait_queue.push_back(current_task().unwrap());
});
block_current_task()
}
pub fn wait_with_mutex(&self, mutex: Arc<dyn Mutex>) {
mutex.unlock();
self.inner.exclusive_session(|inner| {
inner.wait_queue.push_back(current_task().unwrap());
});
block_current_and_run_next();
mutex.lock();
}
}

@ -1,9 +1,3 @@
mod condvar;
mod mutex;
mod semaphore;
mod up;
pub use condvar::Condvar;
pub use mutex::{Mutex, MutexBlocking, MutexSpin};
pub use semaphore::Semaphore;
pub use up::{UPIntrFreeCell, UPIntrRefMut};
pub use up::UPSafeCell;

@ -1,88 +0,0 @@
use super::UPIntrFreeCell;
use crate::task::TaskControlBlock;
use crate::task::{add_task, current_task};
use crate::task::{block_current_and_run_next, suspend_current_and_run_next};
use alloc::{collections::VecDeque, sync::Arc};
pub trait Mutex: Sync + Send {
fn lock(&self);
fn unlock(&self);
}
pub struct MutexSpin {
locked: UPIntrFreeCell<bool>,
}
impl MutexSpin {
pub fn new() -> Self {
Self {
locked: unsafe { UPIntrFreeCell::new(false) },
}
}
}
impl Mutex for MutexSpin {
fn lock(&self) {
loop {
let mut locked = self.locked.exclusive_access();
if *locked {
drop(locked);
suspend_current_and_run_next();
continue;
} else {
*locked = true;
return;
}
}
}
fn unlock(&self) {
let mut locked = self.locked.exclusive_access();
*locked = false;
}
}
pub struct MutexBlocking {
inner: UPIntrFreeCell<MutexBlockingInner>,
}
pub struct MutexBlockingInner {
locked: bool,
wait_queue: VecDeque<Arc<TaskControlBlock>>,
}
impl MutexBlocking {
pub fn new() -> Self {
Self {
inner: unsafe {
UPIntrFreeCell::new(MutexBlockingInner {
locked: false,
wait_queue: VecDeque::new(),
})
},
}
}
}
impl Mutex for MutexBlocking {
fn lock(&self) {
let mut mutex_inner = self.inner.exclusive_access();
if mutex_inner.locked {
mutex_inner.wait_queue.push_back(current_task().unwrap());
drop(mutex_inner);
block_current_and_run_next();
} else {
mutex_inner.locked = true;
}
}
fn unlock(&self) {
let mut mutex_inner = self.inner.exclusive_access();
assert!(mutex_inner.locked);
if let Some(waking_task) = mutex_inner.wait_queue.pop_front() {
add_task(waking_task);
} else {
mutex_inner.locked = false;
}
}
}

@ -1,45 +0,0 @@
use crate::sync::UPIntrFreeCell;
use crate::task::{add_task, block_current_and_run_next, current_task, TaskControlBlock};
use alloc::{collections::VecDeque, sync::Arc};
pub struct Semaphore {
pub inner: UPIntrFreeCell<SemaphoreInner>,
}
pub struct SemaphoreInner {
pub count: isize,
pub wait_queue: VecDeque<Arc<TaskControlBlock>>,
}
impl Semaphore {
pub fn new(res_count: usize) -> Self {
Self {
inner: unsafe {
UPIntrFreeCell::new(SemaphoreInner {
count: res_count as isize,
wait_queue: VecDeque::new(),
})
},
}
}
pub fn up(&self) {
let mut inner = self.inner.exclusive_access();
inner.count += 1;
if inner.count <= 0 {
if let Some(task) = inner.wait_queue.pop_front() {
add_task(task);
}
}
}
pub fn down(&self) {
let mut inner = self.inner.exclusive_access();
inner.count -= 1;
if inner.count < 0 {
inner.wait_queue.push_back(current_task().unwrap());
drop(inner);
block_current_and_run_next();
}
}
}

@ -1,9 +1,5 @@
use core::cell::{RefCell, RefMut, UnsafeCell};
use core::ops::{Deref, DerefMut};
use lazy_static::*;
use riscv::register::sstatus;
use core::cell::{RefCell, RefMut};
/*
/// Wrap a static data structure inside it so that we are
/// able to access it without any `unsafe`.
///
@ -31,110 +27,3 @@ impl<T> UPSafeCell<T> {
self.inner.borrow_mut()
}
}
*/
pub struct UPSafeCellRaw<T> {
inner: UnsafeCell<T>,
}
unsafe impl<T> Sync for UPSafeCellRaw<T> {}
impl<T> UPSafeCellRaw<T> {
pub unsafe fn new(value: T) -> Self {
Self {
inner: UnsafeCell::new(value),
}
}
pub fn get_mut(&self) -> &mut T {
unsafe { &mut (*self.inner.get()) }
}
}
pub struct IntrMaskingInfo {
nested_level: usize,
sie_before_masking: bool,
}
lazy_static! {
static ref INTR_MASKING_INFO: UPSafeCellRaw<IntrMaskingInfo> =
unsafe { UPSafeCellRaw::new(IntrMaskingInfo::new()) };
}
impl IntrMaskingInfo {
pub fn new() -> Self {
Self {
nested_level: 0,
sie_before_masking: false,
}
}
pub fn enter(&mut self) {
let sie = sstatus::read().sie();
unsafe {
sstatus::clear_sie();
}
if self.nested_level == 0 {
self.sie_before_masking = sie;
}
self.nested_level += 1;
}
pub fn exit(&mut self) {
self.nested_level -= 1;
if self.nested_level == 0 && self.sie_before_masking {
unsafe {
sstatus::set_sie();
}
}
}
}
pub struct UPIntrFreeCell<T> {
/// inner data
inner: RefCell<T>,
}
unsafe impl<T> Sync for UPIntrFreeCell<T> {}
pub struct UPIntrRefMut<'a, T>(Option<RefMut<'a, T>>);
impl<T> UPIntrFreeCell<T> {
pub unsafe fn new(value: T) -> Self {
Self {
inner: RefCell::new(value),
}
}
/// Panic if the data has been borrowed.
pub fn exclusive_access(&self) -> UPIntrRefMut<'_, T> {
INTR_MASKING_INFO.get_mut().enter();
UPIntrRefMut(Some(self.inner.borrow_mut()))
}
pub fn exclusive_session<F, V>(&self, f: F) -> V
where
F: FnOnce(&mut T) -> V,
{
let mut inner = self.exclusive_access();
f(inner.deref_mut())
}
}
impl<'a, T> Drop for UPIntrRefMut<'a, T> {
fn drop(&mut self) {
self.0 = None;
INTR_MASKING_INFO.get_mut().exit();
}
}
impl<'a, T> Deref for UPIntrRefMut<'a, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
self.0.as_ref().unwrap().deref()
}
}
impl<'a, T> DerefMut for UPIntrRefMut<'a, T> {
fn deref_mut(&mut self) -> &mut Self::Target {
self.0.as_mut().unwrap().deref_mut()
}
}

@ -1,12 +1,12 @@
use crate::fs::{make_pipe, open_file, OpenFlags};
use crate::mm::{translated_byte_buffer, translated_refmut, translated_str, UserBuffer};
use crate::task::{current_process, current_user_token};
use crate::task::{current_task, current_user_token};
use alloc::sync::Arc;
pub fn sys_write(fd: usize, buf: *const u8, len: usize) -> isize {
let token = current_user_token();
let process = current_process();
let inner = process.inner_exclusive_access();
let task = current_task().unwrap();
let inner = task.inner_exclusive_access();
if fd >= inner.fd_table.len() {
return -1;
}
@ -25,8 +25,8 @@ pub fn sys_write(fd: usize, buf: *const u8, len: usize) -> isize {
pub fn sys_read(fd: usize, buf: *const u8, len: usize) -> isize {
let token = current_user_token();
let process = current_process();
let inner = process.inner_exclusive_access();
let task = current_task().unwrap();
let inner = task.inner_exclusive_access();
if fd >= inner.fd_table.len() {
return -1;
}
@ -44,11 +44,11 @@ pub fn sys_read(fd: usize, buf: *const u8, len: usize) -> isize {
}
pub fn sys_open(path: *const u8, flags: u32) -> isize {
let process = current_process();
let task = current_task().unwrap();
let token = current_user_token();
let path = translated_str(token, path);
if let Some(inode) = open_file(path.as_str(), OpenFlags::from_bits(flags).unwrap()) {
let mut inner = process.inner_exclusive_access();
let mut inner = task.inner_exclusive_access();
let fd = inner.alloc_fd();
inner.fd_table[fd] = Some(inode);
fd as isize
@ -58,8 +58,8 @@ pub fn sys_open(path: *const u8, flags: u32) -> isize {
}
pub fn sys_close(fd: usize) -> isize {
let process = current_process();
let mut inner = process.inner_exclusive_access();
let task = current_task().unwrap();
let mut inner = task.inner_exclusive_access();
if fd >= inner.fd_table.len() {
return -1;
}
@ -71,9 +71,9 @@ pub fn sys_close(fd: usize) -> isize {
}
pub fn sys_pipe(pipe: *mut usize) -> isize {
let process = current_process();
let task = current_task().unwrap();
let token = current_user_token();
let mut inner = process.inner_exclusive_access();
let mut inner = task.inner_exclusive_access();
let (pipe_read, pipe_write) = make_pipe();
let read_fd = inner.alloc_fd();
inner.fd_table[read_fd] = Some(pipe_read);
@ -85,8 +85,8 @@ pub fn sys_pipe(pipe: *mut usize) -> isize {
}
pub fn sys_dup(fd: usize) -> isize {
let process = current_process();
let mut inner = process.inner_exclusive_access();
let task = current_task().unwrap();
let mut inner = task.inner_exclusive_access();
if fd >= inner.fd_table.len() {
return -1;
}

@ -5,36 +5,24 @@ const SYSCALL_PIPE: usize = 59;
const SYSCALL_READ: usize = 63;
const SYSCALL_WRITE: usize = 64;
const SYSCALL_EXIT: usize = 93;
const SYSCALL_SLEEP: usize = 101;
const SYSCALL_YIELD: usize = 124;
const SYSCALL_KILL: usize = 129;
const SYSCALL_SIGACTION: usize = 134;
const SYSCALL_SIGPROCMASK: usize = 135;
const SYSCALL_SIGRETURN: usize = 139;
const SYSCALL_GET_TIME: usize = 169;
const SYSCALL_GETPID: usize = 172;
const SYSCALL_FORK: usize = 220;
const SYSCALL_EXEC: usize = 221;
const SYSCALL_WAITPID: usize = 260;
const SYSCALL_THREAD_CREATE: usize = 1000;
const SYSCALL_GETTID: usize = 1001;
const SYSCALL_WAITTID: usize = 1002;
const SYSCALL_MUTEX_CREATE: usize = 1010;
const SYSCALL_MUTEX_LOCK: usize = 1011;
const SYSCALL_MUTEX_UNLOCK: usize = 1012;
const SYSCALL_SEMAPHORE_CREATE: usize = 1020;
const SYSCALL_SEMAPHORE_UP: usize = 1021;
const SYSCALL_SEMAPHORE_DOWN: usize = 1022;
const SYSCALL_CONDVAR_CREATE: usize = 1030;
const SYSCALL_CONDVAR_SIGNAL: usize = 1031;
const SYSCALL_CONDVAR_WAIT: usize = 1032;
mod fs;
mod process;
mod sync;
mod thread;
use fs::*;
use process::*;
use sync::*;
use thread::*;
use crate::task::SignalAction;
pub fn syscall(syscall_id: usize, args: [usize; 3]) -> isize {
match syscall_id {
@ -45,26 +33,16 @@ pub fn syscall(syscall_id: usize, args: [usize; 3]) -> isize {
SYSCALL_READ => sys_read(args[0], args[1] as *const u8, args[2]),
SYSCALL_WRITE => sys_write(args[0], args[1] as *const u8, args[2]),
SYSCALL_EXIT => sys_exit(args[0] as i32),
SYSCALL_SLEEP => sys_sleep(args[0]),
SYSCALL_YIELD => sys_yield(),
SYSCALL_KILL => sys_kill(args[0], args[1] as u32),
SYSCALL_KILL => sys_kill(args[0], args[1] as i32),
SYSCALL_SIGACTION => sys_sigaction(args[0] as i32, args[1] as *const SignalAction, args[2] as *mut SignalAction),
SYSCALL_SIGPROCMASK => sys_sigprocmask(args[0] as u32),
SYSCALL_SIGRETURN => sys_sigretrun(),
SYSCALL_GET_TIME => sys_get_time(),
SYSCALL_GETPID => sys_getpid(),
SYSCALL_FORK => sys_fork(),
SYSCALL_EXEC => sys_exec(args[0] as *const u8, args[1] as *const usize),
SYSCALL_WAITPID => sys_waitpid(args[0] as isize, args[1] as *mut i32),
SYSCALL_THREAD_CREATE => sys_thread_create(args[0], args[1]),
SYSCALL_GETTID => sys_gettid(),
SYSCALL_WAITTID => sys_waittid(args[0]) as isize,
SYSCALL_MUTEX_CREATE => sys_mutex_create(args[0] == 1),
SYSCALL_MUTEX_LOCK => sys_mutex_lock(args[0]),
SYSCALL_MUTEX_UNLOCK => sys_mutex_unlock(args[0]),
SYSCALL_SEMAPHORE_CREATE => sys_semaphore_create(args[0]),
SYSCALL_SEMAPHORE_UP => sys_semaphore_up(args[0]),
SYSCALL_SEMAPHORE_DOWN => sys_semaphore_down(args[0]),
SYSCALL_CONDVAR_CREATE => sys_condvar_create(args[0]),
SYSCALL_CONDVAR_SIGNAL => sys_condvar_signal(args[0]),
SYSCALL_CONDVAR_WAIT => sys_condvar_wait(args[0], args[1]),
_ => panic!("Unsupported syscall_id: {}", syscall_id),
}
}

@ -1,8 +1,8 @@
use crate::fs::{open_file, OpenFlags};
use crate::mm::{translated_ref, translated_refmut, translated_str};
use crate::task::{
current_process, current_task, current_user_token, exit_current_and_run_next, pid2process,
suspend_current_and_run_next, SignalFlags,
add_task, current_task, current_user_token, exit_current_and_run_next, pid2task,
suspend_current_and_run_next, SignalFlags, SignalAction, MAX_SIG,
};
use crate::timer::get_time_ms;
use alloc::string::String;
@ -24,20 +24,20 @@ pub fn sys_get_time() -> isize {
}
pub fn sys_getpid() -> isize {
current_task().unwrap().process.upgrade().unwrap().getpid() as isize
current_task().unwrap().pid.0 as isize
}
pub fn sys_fork() -> isize {
let current_process = current_process();
let new_process = current_process.fork();
let new_pid = new_process.getpid();
let current_task = current_task().unwrap();
let new_task = current_task.fork();
let new_pid = new_task.pid.0;
// modify trap context of new_task, because it returns immediately after switching
let new_process_inner = new_process.inner_exclusive_access();
let task = new_process_inner.tasks[0].as_ref().unwrap();
let trap_cx = task.inner_exclusive_access().get_trap_cx();
let trap_cx = new_task.inner_exclusive_access().get_trap_cx();
// we do not have to move to next instruction since we have done it before
// for child process, fork returns 0
trap_cx.x[10] = 0;
// add new task to scheduler
add_task(new_task);
new_pid as isize
}
@ -57,9 +57,9 @@ pub fn sys_exec(path: *const u8, mut args: *const usize) -> isize {
}
if let Some(app_inode) = open_file(path.as_str(), OpenFlags::RDONLY) {
let all_data = app_inode.read_all();
let process = current_process();
let task = current_task().unwrap();
let argc = args_vec.len();
process.exec(all_data.as_slice(), args_vec);
task.exec(all_data.as_slice(), args_vec);
// return argc because cx.x[10] will be covered with it later
argc as isize
} else {
@ -70,10 +70,11 @@ pub fn sys_exec(path: *const u8, mut args: *const usize) -> isize {
/// If there is not a child process whose pid is same as given, return -1.
/// Else if there is a child process but it is still running, return -2.
pub fn sys_waitpid(pid: isize, exit_code_ptr: *mut i32) -> isize {
let process = current_process();
let task = current_task().unwrap();
// find a child process
let mut inner = process.inner_exclusive_access();
// ---- access current PCB exclusively
let mut inner = task.inner_exclusive_access();
if !inner
.children
.iter()
@ -84,7 +85,7 @@ pub fn sys_waitpid(pid: isize, exit_code_ptr: *mut i32) -> isize {
}
let pair = inner.children.iter().enumerate().find(|(_, p)| {
// ++++ temporarily access child PCB exclusively
p.inner_exclusive_access().is_zombie && (pid == -1 || pid as usize == p.getpid())
p.inner_exclusive_access().is_zombie() && (pid == -1 || pid as usize == p.getpid())
// ++++ release child PCB
});
if let Some((idx, _)) = pair {
@ -103,10 +104,15 @@ pub fn sys_waitpid(pid: isize, exit_code_ptr: *mut i32) -> isize {
// ---- release current PCB automatically
}
pub fn sys_kill(pid: usize, signal: u32) -> isize {
if let Some(process) = pid2process(pid) {
if let Some(flag) = SignalFlags::from_bits(signal) {
process.inner_exclusive_access().signals |= flag;
pub fn sys_kill(pid: usize, signum: i32) -> isize {
if let Some(task) = pid2task(pid) {
if let Some(flag) = SignalFlags::from_bits(1 << signum) {
// insert the signal if legal
let mut task_ref = task.inner_exclusive_access();
if task_ref.signals.contains(flag) {
return -1;
}
task_ref.signals.insert(flag);
0
} else {
-1
@ -115,3 +121,66 @@ pub fn sys_kill(pid: usize, signal: u32) -> isize {
-1
}
}
pub fn sys_sigprocmask(mask: u32) -> isize {
if let Some(task) = current_task() {
let mut inner = task.inner_exclusive_access();
let old_mask = inner.signal_mask;
if let Some(flag) = SignalFlags::from_bits(mask) {
inner.signal_mask = flag;
old_mask.bits() as isize
} else {
-1
}
} else {
-1
}
}
pub fn sys_sigretrun() -> isize {
if let Some(task) = current_task() {
let mut inner = task.inner_exclusive_access();
inner.handling_sig = -1;
// restore the trap context
let trap_ctx = inner.get_trap_cx();
*trap_ctx = inner.trap_ctx_backup.unwrap();
0
} else {
-1
}
}
fn check_sigaction_error(signal: SignalFlags, action: usize, old_action: usize) -> bool {
if action == 0 || old_action == 0 || signal == SignalFlags::SIGKILL ||
signal == SignalFlags::SIGSTOP {
true
} else {
false
}
}
pub fn sys_sigaction(signum: i32, action: *const SignalAction, old_action: *mut SignalAction) -> isize {
let token = current_user_token();
if let Some(task) = current_task() {
let mut inner = task.inner_exclusive_access();
if signum as usize > MAX_SIG {
return -1;
}
if let Some(flag) = SignalFlags::from_bits(1 << signum) {
if check_sigaction_error(flag, action as usize, old_action as usize) {
return -1;
}
let old_kernel_action = inner.signal_actions.table[signum as usize];
if old_kernel_action.mask != SignalFlags::from_bits(40).unwrap() {
*translated_refmut(token, old_action) = old_kernel_action;
} else {
let mut ref_old_action = *translated_refmut(token, old_action);
ref_old_action.handler = old_kernel_action.handler;
}
let ref_action = translated_ref(token, action);
inner.signal_actions.table[signum as usize] = *ref_action;
return 0;
}
}
-1
}

@ -1,134 +0,0 @@
use crate::sync::{Condvar, Mutex, MutexBlocking, MutexSpin, Semaphore};
use crate::task::{block_current_and_run_next, current_process, current_task};
use crate::timer::{add_timer, get_time_ms};
use alloc::sync::Arc;
pub fn sys_sleep(ms: usize) -> isize {
let expire_ms = get_time_ms() + ms;
let task = current_task().unwrap();
add_timer(expire_ms, task);
block_current_and_run_next();
0
}
pub fn sys_mutex_create(blocking: bool) -> isize {
let process = current_process();
let mutex: Option<Arc<dyn Mutex>> = if !blocking {
Some(Arc::new(MutexSpin::new()))
} else {
Some(Arc::new(MutexBlocking::new()))
};
let mut process_inner = process.inner_exclusive_access();
if let Some(id) = process_inner
.mutex_list
.iter()
.enumerate()
.find(|(_, item)| item.is_none())
.map(|(id, _)| id)
{
process_inner.mutex_list[id] = mutex;
id as isize
} else {
process_inner.mutex_list.push(mutex);
process_inner.mutex_list.len() as isize - 1
}
}
pub fn sys_mutex_lock(mutex_id: usize) -> isize {
let process = current_process();
let process_inner = process.inner_exclusive_access();
let mutex = Arc::clone(process_inner.mutex_list[mutex_id].as_ref().unwrap());
drop(process_inner);
drop(process);
mutex.lock();
0
}
pub fn sys_mutex_unlock(mutex_id: usize) -> isize {
let process = current_process();
let process_inner = process.inner_exclusive_access();
let mutex = Arc::clone(process_inner.mutex_list[mutex_id].as_ref().unwrap());
drop(process_inner);
drop(process);
mutex.unlock();
0
}
pub fn sys_semaphore_create(res_count: usize) -> isize {
let process = current_process();
let mut process_inner = process.inner_exclusive_access();
let id = if let Some(id) = process_inner
.semaphore_list
.iter()
.enumerate()
.find(|(_, item)| item.is_none())
.map(|(id, _)| id)
{
process_inner.semaphore_list[id] = Some(Arc::new(Semaphore::new(res_count)));
id
} else {
process_inner
.semaphore_list
.push(Some(Arc::new(Semaphore::new(res_count))));
process_inner.semaphore_list.len() - 1
};
id as isize
}
pub fn sys_semaphore_up(sem_id: usize) -> isize {
let process = current_process();
let process_inner = process.inner_exclusive_access();
let sem = Arc::clone(process_inner.semaphore_list[sem_id].as_ref().unwrap());
drop(process_inner);
sem.up();
0
}
pub fn sys_semaphore_down(sem_id: usize) -> isize {
let process = current_process();
let process_inner = process.inner_exclusive_access();
let sem = Arc::clone(process_inner.semaphore_list[sem_id].as_ref().unwrap());
drop(process_inner);
sem.down();
0
}
pub fn sys_condvar_create(_arg: usize) -> isize {
let process = current_process();
let mut process_inner = process.inner_exclusive_access();
let id = if let Some(id) = process_inner
.condvar_list
.iter()
.enumerate()
.find(|(_, item)| item.is_none())
.map(|(id, _)| id)
{
process_inner.condvar_list[id] = Some(Arc::new(Condvar::new()));
id
} else {
process_inner
.condvar_list
.push(Some(Arc::new(Condvar::new())));
process_inner.condvar_list.len() - 1
};
id as isize
}
pub fn sys_condvar_signal(condvar_id: usize) -> isize {
let process = current_process();
let process_inner = process.inner_exclusive_access();
let condvar = Arc::clone(process_inner.condvar_list[condvar_id].as_ref().unwrap());
drop(process_inner);
condvar.signal();
0
}
pub fn sys_condvar_wait(condvar_id: usize, mutex_id: usize) -> isize {
let process = current_process();
let process_inner = process.inner_exclusive_access();
let condvar = Arc::clone(process_inner.condvar_list[condvar_id].as_ref().unwrap());
let mutex = Arc::clone(process_inner.mutex_list[mutex_id].as_ref().unwrap());
drop(process_inner);
condvar.wait_with_mutex(mutex);
0
}

@ -1,85 +0,0 @@
use crate::{
mm::kernel_token,
task::{add_task, current_task, TaskControlBlock},
trap::{trap_handler, TrapContext},
};
use alloc::sync::Arc;
pub fn sys_thread_create(entry: usize, arg: usize) -> isize {
let task = current_task().unwrap();
let process = task.process.upgrade().unwrap();
// create a new thread
let new_task = Arc::new(TaskControlBlock::new(
Arc::clone(&process),
task.inner_exclusive_access()
.res
.as_ref()
.unwrap()
.ustack_base,
true,
));
// add new task to scheduler
add_task(Arc::clone(&new_task));
let new_task_inner = new_task.inner_exclusive_access();
let new_task_res = new_task_inner.res.as_ref().unwrap();
let new_task_tid = new_task_res.tid;
let mut process_inner = process.inner_exclusive_access();
// add new thread to current process
let tasks = &mut process_inner.tasks;
while tasks.len() < new_task_tid + 1 {
tasks.push(None);
}
tasks[new_task_tid] = Some(Arc::clone(&new_task));
let new_task_trap_cx = new_task_inner.get_trap_cx();
*new_task_trap_cx = TrapContext::app_init_context(
entry,
new_task_res.ustack_top(),
kernel_token(),
new_task.kstack.get_top(),
trap_handler as usize,
);
(*new_task_trap_cx).x[10] = arg;
new_task_tid as isize
}
pub fn sys_gettid() -> isize {
current_task()
.unwrap()
.inner_exclusive_access()
.res
.as_ref()
.unwrap()
.tid as isize
}
/// thread does not exist, return -1
/// thread has not exited yet, return -2
/// otherwise, return thread's exit code
pub fn sys_waittid(tid: usize) -> i32 {
let task = current_task().unwrap();
let process = task.process.upgrade().unwrap();
let task_inner = task.inner_exclusive_access();
let mut process_inner = process.inner_exclusive_access();
// a thread cannot wait for itself
if task_inner.res.as_ref().unwrap().tid == tid {
return -1;
}
let mut exit_code: Option<i32> = None;
let waited_task = process_inner.tasks[tid].as_ref();
if let Some(waited_task) = waited_task {
if let Some(waited_exit_code) = waited_task.inner_exclusive_access().exit_code {
exit_code = Some(waited_exit_code);
}
} else {
// waited thread does not exist
return -1;
}
if let Some(exit_code) = exit_code {
// dealloc the exited thread
process_inner.tasks[tid] = None;
exit_code
} else {
// waited thread has not exited
-2
}
}

@ -0,0 +1,32 @@
use crate::task::{SignalFlags, MAX_SIG};
/// Action for a signal
#[repr(C)]
#[derive(Debug, Clone, Copy)]
pub struct SignalAction {
pub handler: usize,
pub mask: SignalFlags
}
impl Default for SignalAction {
fn default() -> Self {
Self {
handler: 0,
mask: SignalFlags::from_bits(40).unwrap()
}
}
}
#[derive(Clone)]
pub struct SignalActions {
pub table: [SignalAction; MAX_SIG + 1],
}
impl Default for SignalActions {
fn default() -> Self {
Self {
table: [SignalAction::default(); MAX_SIG + 1],
}
}
}

@ -1,225 +0,0 @@
use super::ProcessControlBlock;
use crate::config::{KERNEL_STACK_SIZE, PAGE_SIZE, TRAMPOLINE, TRAP_CONTEXT_BASE, USER_STACK_SIZE};
use crate::mm::{MapPermission, PhysPageNum, VirtAddr, KERNEL_SPACE};
use crate::sync::UPIntrFreeCell;
use alloc::{
sync::{Arc, Weak},
vec::Vec,
};
use lazy_static::*;
pub struct RecycleAllocator {
current: usize,
recycled: Vec<usize>,
}
impl RecycleAllocator {
pub fn new() -> Self {
RecycleAllocator {
current: 0,
recycled: Vec::new(),
}
}
pub fn alloc(&mut self) -> usize {
if let Some(id) = self.recycled.pop() {
id
} else {
self.current += 1;
self.current - 1
}
}
pub fn dealloc(&mut self, id: usize) {
assert!(id < self.current);
assert!(
!self.recycled.iter().any(|i| *i == id),
"id {} has been deallocated!",
id
);
self.recycled.push(id);
}
}
lazy_static! {
static ref PID_ALLOCATOR: UPIntrFreeCell<RecycleAllocator> =
unsafe { UPIntrFreeCell::new(RecycleAllocator::new()) };
static ref KSTACK_ALLOCATOR: UPIntrFreeCell<RecycleAllocator> =
unsafe { UPIntrFreeCell::new(RecycleAllocator::new()) };
}
pub const IDLE_PID: usize = 0;
pub struct PidHandle(pub usize);
pub fn pid_alloc() -> PidHandle {
PidHandle(PID_ALLOCATOR.exclusive_access().alloc())
}
impl Drop for PidHandle {
fn drop(&mut self) {
PID_ALLOCATOR.exclusive_access().dealloc(self.0);
}
}
/// Return (bottom, top) of a kernel stack in kernel space.
pub fn kernel_stack_position(kstack_id: usize) -> (usize, usize) {
let top = TRAMPOLINE - kstack_id * (KERNEL_STACK_SIZE + PAGE_SIZE);
let bottom = top - KERNEL_STACK_SIZE;
(bottom, top)
}
pub struct KernelStack(pub usize);
pub fn kstack_alloc() -> KernelStack {
let kstack_id = KSTACK_ALLOCATOR.exclusive_access().alloc();
let (kstack_bottom, kstack_top) = kernel_stack_position(kstack_id);
KERNEL_SPACE.exclusive_access().insert_framed_area(
kstack_bottom.into(),
kstack_top.into(),
MapPermission::R | MapPermission::W,
);
KernelStack(kstack_id)
}
impl Drop for KernelStack {
fn drop(&mut self) {
let (kernel_stack_bottom, _) = kernel_stack_position(self.0);
let kernel_stack_bottom_va: VirtAddr = kernel_stack_bottom.into();
KERNEL_SPACE
.exclusive_access()
.remove_area_with_start_vpn(kernel_stack_bottom_va.into());
}
}
impl KernelStack {
#[allow(unused)]
pub fn push_on_top<T>(&self, value: T) -> *mut T
where
T: Sized,
{
let kernel_stack_top = self.get_top();
let ptr_mut = (kernel_stack_top - core::mem::size_of::<T>()) as *mut T;
unsafe {
*ptr_mut = value;
}
ptr_mut
}
pub fn get_top(&self) -> usize {
let (_, kernel_stack_top) = kernel_stack_position(self.0);
kernel_stack_top
}
}
pub struct TaskUserRes {
pub tid: usize,
pub ustack_base: usize,
pub process: Weak<ProcessControlBlock>,
}
fn trap_cx_bottom_from_tid(tid: usize) -> usize {
TRAP_CONTEXT_BASE - tid * PAGE_SIZE
}
fn ustack_bottom_from_tid(ustack_base: usize, tid: usize) -> usize {
ustack_base + tid * (PAGE_SIZE + USER_STACK_SIZE)
}
impl TaskUserRes {
pub fn new(
process: Arc<ProcessControlBlock>,
ustack_base: usize,
alloc_user_res: bool,
) -> Self {
let tid = process.inner_exclusive_access().alloc_tid();
let task_user_res = Self {
tid,
ustack_base,
process: Arc::downgrade(&process),
};
if alloc_user_res {
task_user_res.alloc_user_res();
}
task_user_res
}
pub fn alloc_user_res(&self) {
let process = self.process.upgrade().unwrap();
let mut process_inner = process.inner_exclusive_access();
// alloc user stack
let ustack_bottom = ustack_bottom_from_tid(self.ustack_base, self.tid);
let ustack_top = ustack_bottom + USER_STACK_SIZE;
process_inner.memory_set.insert_framed_area(
ustack_bottom.into(),
ustack_top.into(),
MapPermission::R | MapPermission::W | MapPermission::U,
);
// alloc trap_cx
let trap_cx_bottom = trap_cx_bottom_from_tid(self.tid);
let trap_cx_top = trap_cx_bottom + PAGE_SIZE;
process_inner.memory_set.insert_framed_area(
trap_cx_bottom.into(),
trap_cx_top.into(),
MapPermission::R | MapPermission::W,
);
}
fn dealloc_user_res(&self) {
// dealloc tid
let process = self.process.upgrade().unwrap();
let mut process_inner = process.inner_exclusive_access();
// dealloc ustack manually
let ustack_bottom_va: VirtAddr = ustack_bottom_from_tid(self.ustack_base, self.tid).into();
process_inner
.memory_set
.remove_area_with_start_vpn(ustack_bottom_va.into());
// dealloc trap_cx manually
let trap_cx_bottom_va: VirtAddr = trap_cx_bottom_from_tid(self.tid).into();
process_inner
.memory_set
.remove_area_with_start_vpn(trap_cx_bottom_va.into());
}
#[allow(unused)]
pub fn alloc_tid(&mut self) {
self.tid = self
.process
.upgrade()
.unwrap()
.inner_exclusive_access()
.alloc_tid();
}
pub fn dealloc_tid(&self) {
let process = self.process.upgrade().unwrap();
let mut process_inner = process.inner_exclusive_access();
process_inner.dealloc_tid(self.tid);
}
pub fn trap_cx_user_va(&self) -> usize {
trap_cx_bottom_from_tid(self.tid)
}
pub fn trap_cx_ppn(&self) -> PhysPageNum {
let process = self.process.upgrade().unwrap();
let process_inner = process.inner_exclusive_access();
let trap_cx_bottom_va: VirtAddr = trap_cx_bottom_from_tid(self.tid).into();
process_inner
.memory_set
.translate(trap_cx_bottom_va.into())
.unwrap()
.ppn()
}
pub fn ustack_base(&self) -> usize {
self.ustack_base
}
pub fn ustack_top(&self) -> usize {
ustack_bottom_from_tid(self.ustack_base, self.tid) + USER_STACK_SIZE
}
}
impl Drop for TaskUserRes {
fn drop(&mut self) {
self.dealloc_tid();
self.dealloc_user_res();
}
}

@ -1,5 +1,5 @@
use super::{ProcessControlBlock, TaskControlBlock};
use crate::sync::UPIntrFreeCell;
use super::TaskControlBlock;
use crate::sync::UPSafeCell;
use alloc::collections::{BTreeMap, VecDeque};
use alloc::sync::Arc;
use lazy_static::*;
@ -24,13 +24,16 @@ impl TaskManager {
}
lazy_static! {
pub static ref TASK_MANAGER: UPIntrFreeCell<TaskManager> =
unsafe { UPIntrFreeCell::new(TaskManager::new()) };
pub static ref PID2PCB: UPIntrFreeCell<BTreeMap<usize, Arc<ProcessControlBlock>>> =
unsafe { UPIntrFreeCell::new(BTreeMap::new()) };
pub static ref TASK_MANAGER: UPSafeCell<TaskManager> =
unsafe { UPSafeCell::new(TaskManager::new()) };
pub static ref PID2TCB: UPSafeCell<BTreeMap<usize, Arc<TaskControlBlock>>> =
unsafe { UPSafeCell::new(BTreeMap::new()) };
}
pub fn add_task(task: Arc<TaskControlBlock>) {
PID2TCB
.exclusive_access()
.insert(task.getpid(), Arc::clone(&task));
TASK_MANAGER.exclusive_access().add(task);
}
@ -38,17 +41,13 @@ pub fn fetch_task() -> Option<Arc<TaskControlBlock>> {
TASK_MANAGER.exclusive_access().fetch()
}
pub fn pid2process(pid: usize) -> Option<Arc<ProcessControlBlock>> {
let map = PID2PCB.exclusive_access();
pub fn pid2task(pid: usize) -> Option<Arc<TaskControlBlock>> {
let map = PID2TCB.exclusive_access();
map.get(&pid).map(Arc::clone)
}
pub fn insert_into_pid2process(pid: usize, process: Arc<ProcessControlBlock>) {
PID2PCB.exclusive_access().insert(pid, process);
}
pub fn remove_from_pid2process(pid: usize) {
let mut map = PID2PCB.exclusive_access();
pub fn remove_from_pid2task(pid: usize) {
let mut map = PID2TCB.exclusive_access();
if map.remove(&pid).is_none() {
panic!("cannot find pid {} in pid2task!", pid);
}

@ -1,30 +1,30 @@
mod context;
mod id;
mod manager;
mod process;
mod pid;
mod processor;
mod signal;
mod switch;
mod action;
#[allow(clippy::module_inception)]
mod task;
use self::id::TaskUserRes;
use crate::fs::{open_file, OpenFlags};
use alloc::{sync::Arc, vec::Vec};
use alloc::sync::Arc;
pub use context::TaskContext;
use lazy_static::*;
use manager::fetch_task;
use process::ProcessControlBlock;
use manager::remove_from_pid2task;
use switch::__switch;
use task::{TaskControlBlock, TaskStatus};
pub use context::TaskContext;
pub use id::{kstack_alloc, pid_alloc, KernelStack, PidHandle, IDLE_PID};
pub use manager::{add_task, pid2process, remove_from_pid2process};
pub use manager::{add_task, pid2task};
pub use pid::{pid_alloc, KernelStack, PidHandle};
pub use processor::{
current_kstack_top, current_process, current_task, current_trap_cx, current_trap_cx_user_va,
current_user_token, run_tasks, schedule, take_current_task,
current_task, current_trap_cx, current_user_token, run_tasks, schedule, take_current_task,
};
pub use signal::SignalFlags;
pub use task::{TaskControlBlock, TaskStatus};
pub use signal::{SignalFlags, MAX_SIG};
pub use action::{SignalAction, SignalActions};
pub fn suspend_current_and_run_next() {
// There must be an application running.
@ -36,7 +36,7 @@ pub fn suspend_current_and_run_next() {
// Change status to Ready
task_inner.task_status = TaskStatus::Ready;
drop(task_inner);
// ---- release current TCB
// ---- release current PCB
// push back to ready queue.
add_task(task);
@ -44,116 +44,159 @@ pub fn suspend_current_and_run_next() {
schedule(task_cx_ptr);
}
/// This function must be followed by a schedule
pub fn block_current_task() -> *mut TaskContext {
let task = take_current_task().unwrap();
let mut task_inner = task.inner_exclusive_access();
task_inner.task_status = TaskStatus::Blocking;
&mut task_inner.task_cx as *mut TaskContext
}
pub fn block_current_and_run_next() {
let task_cx_ptr = block_current_task();
schedule(task_cx_ptr);
}
use crate::board::QEMUExit;
pub fn exit_current_and_run_next(exit_code: i32) {
// take from Processor
let task = take_current_task().unwrap();
let mut task_inner = task.inner_exclusive_access();
let process = task.process.upgrade().unwrap();
let tid = task_inner.res.as_ref().unwrap().tid;
// record exit code
task_inner.exit_code = Some(exit_code);
task_inner.res = None;
// here we do not remove the thread since we are still using the kstack
// it will be deallocated when sys_waittid is called
drop(task_inner);
drop(task);
// however, if this is the main thread of current process
// the process should terminate at once
if tid == 0 {
let pid = process.getpid();
if pid == IDLE_PID {
println!(
"[kernel] Idle process exit with exit_code {} ...",
exit_code
);
if exit_code != 0 {
//crate::sbi::shutdown(255); //255 == -1 for err hint
crate::board::QEMU_EXIT_HANDLE.exit_failure();
} else {
//crate::sbi::shutdown(0); //0 for success hint
crate::board::QEMU_EXIT_HANDLE.exit_success();
}
}
remove_from_pid2process(pid);
let mut process_inner = process.inner_exclusive_access();
// mark this process as a zombie process
process_inner.is_zombie = true;
// record exit code of main process
process_inner.exit_code = exit_code;
{
// move all child processes under init process
let mut initproc_inner = INITPROC.inner_exclusive_access();
for child in process_inner.children.iter() {
child.inner_exclusive_access().parent = Some(Arc::downgrade(&INITPROC));
initproc_inner.children.push(child.clone());
}
// remove from pid2task
remove_from_pid2task(task.getpid());
// **** access current TCB exclusively
let mut inner = task.inner_exclusive_access();
// Change status to Zombie
inner.task_status = TaskStatus::Zombie;
// Record exit code
inner.exit_code = exit_code;
// do not move to its parent but under initproc
// ++++++ access initproc TCB exclusively
{
let mut initproc_inner = INITPROC.inner_exclusive_access();
for child in inner.children.iter() {
child.inner_exclusive_access().parent = Some(Arc::downgrade(&INITPROC));
initproc_inner.children.push(child.clone());
}
// deallocate user res (including tid/trap_cx/ustack) of all threads
// it has to be done before we dealloc the whole memory_set
// otherwise they will be deallocated twice
let mut recycle_res = Vec::<TaskUserRes>::new();
for task in process_inner.tasks.iter().filter(|t| t.is_some()) {
let task = task.as_ref().unwrap();
let mut task_inner = task.inner_exclusive_access();
if let Some(res) = task_inner.res.take() {
recycle_res.push(res);
}
}
// dealloc_tid and dealloc_user_res require access to PCB inner, so we
// need to collect those user res first, then release process_inner
// for now to avoid deadlock/double borrow problem.
drop(process_inner);
recycle_res.clear();
let mut process_inner = process.inner_exclusive_access();
process_inner.children.clear();
// deallocate other data in user space i.e. program code/data section
process_inner.memory_set.recycle_data_pages();
// drop file descriptors
process_inner.fd_table.clear();
}
drop(process);
// ++++++ release parent PCB
inner.children.clear();
// deallocate user space
inner.memory_set.recycle_data_pages();
// drop file descriptors
inner.fd_table.clear();
drop(inner);
// **** release current PCB
// drop task manually to maintain rc correctly
drop(task);
// we do not have to save task context
let mut _unused = TaskContext::zero_init();
schedule(&mut _unused as *mut _);
}
lazy_static! {
pub static ref INITPROC: Arc<ProcessControlBlock> = {
pub static ref INITPROC: Arc<TaskControlBlock> = Arc::new({
let inode = open_file("initproc", OpenFlags::RDONLY).unwrap();
let v = inode.read_all();
ProcessControlBlock::new(v.as_slice())
};
TaskControlBlock::new(v.as_slice())
});
}
pub fn add_initproc() {
let _initproc = INITPROC.clone();
add_task(INITPROC.clone());
}
pub fn check_signals_of_current() -> Option<(i32, &'static str)> {
let process = current_process();
let process_inner = process.inner_exclusive_access();
process_inner.signals.check_error()
pub fn check_signals_error_of_current() -> Option<(i32, &'static str)> {
let task = current_task().unwrap();
let task_inner = task.inner_exclusive_access();
task_inner.signals.check_error()
}
pub fn current_add_signal(signal: SignalFlags) {
let process = current_process();
let mut process_inner = process.inner_exclusive_access();
process_inner.signals |= signal;
let task = current_task().unwrap();
let mut task_inner = task.inner_exclusive_access();
task_inner.signals |= signal;
}
fn call_kernel_signal_handler(signal: SignalFlags) {
let task = current_task().unwrap();
let mut task_inner = task.inner_exclusive_access();
match signal {
SignalFlags::SIGSTOP => {
task_inner.frozen = true;
task_inner.signals ^= SignalFlags::SIGSTOP;
}
SignalFlags::SIGCONT => {
if task_inner.signals.contains(SignalFlags::SIGCONT) {
task_inner.signals ^= SignalFlags::SIGCONT;
task_inner.frozen = false;
}
}
_ => {
task_inner.killed = true;
}
}
}
fn call_user_signal_handler(sig: usize, signal: SignalFlags) {
let task = current_task().unwrap();
let mut task_inner = task.inner_exclusive_access();
let handler = task_inner.signal_actions.table[sig].handler;
// change current mask
task_inner.signal_mask = task_inner.signal_actions.table[sig].mask;
// handle flag
task_inner.handling_sig = sig as isize;
task_inner.signals ^= signal;
// backup trapframe
let mut trap_ctx = task_inner.get_trap_cx();
task_inner.trap_ctx_backup = Some(*trap_ctx);
// modify trapframe
trap_ctx.sepc = handler;
// put args (a0)
trap_ctx.x[10] = sig;
}
fn check_pending_signals() {
for sig in 0..(MAX_SIG + 1) {
let task = current_task().unwrap();
let task_inner = task.inner_exclusive_access();
let signal = SignalFlags::from_bits(1 << sig).unwrap();
if task_inner.signals.contains(signal) && (!task_inner.signal_mask.contains(signal)) {
if task_inner.handling_sig == -1 {
drop(task_inner);
drop(task);
if signal == SignalFlags::SIGKILL || signal == SignalFlags::SIGSTOP ||
signal == SignalFlags::SIGCONT || signal == SignalFlags::SIGDEF {
// signal is a kernel signal
call_kernel_signal_handler(signal);
} else {
// signal is a user signal
call_user_signal_handler(sig, signal);
return;
}
} else {
if !task_inner.signal_actions.table[task_inner.handling_sig as usize].mask.contains(signal) {
drop(task_inner);
drop(task);
if signal == SignalFlags::SIGKILL || signal == SignalFlags::SIGSTOP ||
signal == SignalFlags::SIGCONT || signal == SignalFlags::SIGDEF {
// signal is a kernel signal
call_kernel_signal_handler(signal);
} else {
// signal is a user signal
call_user_signal_handler(sig, signal);
return;
}
}
}
}
}
}
pub fn handle_signals() {
check_pending_signals();
loop {
let task = current_task().unwrap();
let task_inner = task.inner_exclusive_access();
let frozen_flag = task_inner.frozen;
let killed_flag = task_inner.killed;
drop(task_inner);
drop(task);
if (!frozen_flag) || killed_flag {
break;
}
check_pending_signals();
suspend_current_and_run_next()
}
}

@ -0,0 +1,104 @@
use crate::config::{KERNEL_STACK_SIZE, PAGE_SIZE, TRAMPOLINE};
use crate::mm::{MapPermission, VirtAddr, KERNEL_SPACE};
use crate::sync::UPSafeCell;
use alloc::vec::Vec;
use lazy_static::*;
struct PidAllocator {
current: usize,
recycled: Vec<usize>,
}
impl PidAllocator {
pub fn new() -> Self {
PidAllocator {
current: 0,
recycled: Vec::new(),
}
}
pub fn alloc(&mut self) -> PidHandle {
if let Some(pid) = self.recycled.pop() {
PidHandle(pid)
} else {
self.current += 1;
PidHandle(self.current - 1)
}
}
pub fn dealloc(&mut self, pid: usize) {
assert!(pid < self.current);
assert!(
!self.recycled.iter().any(|ppid| *ppid == pid),
"pid {} has been deallocated!",
pid
);
self.recycled.push(pid);
}
}
lazy_static! {
static ref PID_ALLOCATOR: UPSafeCell<PidAllocator> =
unsafe { UPSafeCell::new(PidAllocator::new()) };
}
pub struct PidHandle(pub usize);
impl Drop for PidHandle {
fn drop(&mut self) {
//println!("drop pid {}", self.0);
PID_ALLOCATOR.exclusive_access().dealloc(self.0);
}
}
pub fn pid_alloc() -> PidHandle {
PID_ALLOCATOR.exclusive_access().alloc()
}
/// Return (bottom, top) of a kernel stack in kernel space.
pub fn kernel_stack_position(app_id: usize) -> (usize, usize) {
let top = TRAMPOLINE - app_id * (KERNEL_STACK_SIZE + PAGE_SIZE);
let bottom = top - KERNEL_STACK_SIZE;
(bottom, top)
}
pub struct KernelStack {
pid: usize,
}
impl KernelStack {
pub fn new(pid_handle: &PidHandle) -> Self {
let pid = pid_handle.0;
let (kernel_stack_bottom, kernel_stack_top) = kernel_stack_position(pid);
KERNEL_SPACE.exclusive_access().insert_framed_area(
kernel_stack_bottom.into(),
kernel_stack_top.into(),
MapPermission::R | MapPermission::W,
);
KernelStack { pid: pid_handle.0 }
}
#[allow(unused)]
pub fn push_on_top<T>(&self, value: T) -> *mut T
where
T: Sized,
{
let kernel_stack_top = self.get_top();
let ptr_mut = (kernel_stack_top - core::mem::size_of::<T>()) as *mut T;
unsafe {
*ptr_mut = value;
}
ptr_mut
}
pub fn get_top(&self) -> usize {
let (_, kernel_stack_top) = kernel_stack_position(self.pid);
kernel_stack_top
}
}
impl Drop for KernelStack {
fn drop(&mut self) {
let (kernel_stack_bottom, _) = kernel_stack_position(self.pid);
let kernel_stack_bottom_va: VirtAddr = kernel_stack_bottom.into();
KERNEL_SPACE
.exclusive_access()
.remove_area_with_start_vpn(kernel_stack_bottom_va.into());
}
}

@ -1,258 +0,0 @@
use super::id::RecycleAllocator;
use super::manager::insert_into_pid2process;
use super::TaskControlBlock;
use super::{add_task, SignalFlags};
use super::{pid_alloc, PidHandle};
use crate::fs::{File, Stdin, Stdout};
use crate::mm::{translated_refmut, MemorySet, KERNEL_SPACE};
use crate::sync::{Condvar, Mutex, Semaphore, UPIntrFreeCell, UPIntrRefMut};
use crate::trap::{trap_handler, TrapContext};
use alloc::string::String;
use alloc::sync::{Arc, Weak};
use alloc::vec;
use alloc::vec::Vec;
pub struct ProcessControlBlock {
// immutable
pub pid: PidHandle,
// mutable
inner: UPIntrFreeCell<ProcessControlBlockInner>,
}
pub struct ProcessControlBlockInner {
pub is_zombie: bool,
pub memory_set: MemorySet,
pub parent: Option<Weak<ProcessControlBlock>>,
pub children: Vec<Arc<ProcessControlBlock>>,
pub exit_code: i32,
pub fd_table: Vec<Option<Arc<dyn File + Send + Sync>>>,
pub signals: SignalFlags,
pub tasks: Vec<Option<Arc<TaskControlBlock>>>,
pub task_res_allocator: RecycleAllocator,
pub mutex_list: Vec<Option<Arc<dyn Mutex>>>,
pub semaphore_list: Vec<Option<Arc<Semaphore>>>,
pub condvar_list: Vec<Option<Arc<Condvar>>>,
}
impl ProcessControlBlockInner {
#[allow(unused)]
pub fn get_user_token(&self) -> usize {
self.memory_set.token()
}
pub fn alloc_fd(&mut self) -> usize {
if let Some(fd) = (0..self.fd_table.len()).find(|fd| self.fd_table[*fd].is_none()) {
fd
} else {
self.fd_table.push(None);
self.fd_table.len() - 1
}
}
pub fn alloc_tid(&mut self) -> usize {
self.task_res_allocator.alloc()
}
pub fn dealloc_tid(&mut self, tid: usize) {
self.task_res_allocator.dealloc(tid)
}
pub fn thread_count(&self) -> usize {
self.tasks.len()
}
pub fn get_task(&self, tid: usize) -> Arc<TaskControlBlock> {
self.tasks[tid].as_ref().unwrap().clone()
}
}
impl ProcessControlBlock {
pub fn inner_exclusive_access(&self) -> UPIntrRefMut<'_, ProcessControlBlockInner> {
self.inner.exclusive_access()
}
pub fn new(elf_data: &[u8]) -> Arc<Self> {
// memory_set with elf program headers/trampoline/trap context/user stack
let (memory_set, ustack_base, entry_point) = MemorySet::from_elf(elf_data);
// allocate a pid
let pid_handle = pid_alloc();
let process = Arc::new(Self {
pid: pid_handle,
inner: unsafe {
UPIntrFreeCell::new(ProcessControlBlockInner {
is_zombie: false,
memory_set,
parent: None,
children: Vec::new(),
exit_code: 0,
fd_table: vec![
// 0 -> stdin
Some(Arc::new(Stdin)),
// 1 -> stdout
Some(Arc::new(Stdout)),
// 2 -> stderr
Some(Arc::new(Stdout)),
],
signals: SignalFlags::empty(),
tasks: Vec::new(),
task_res_allocator: RecycleAllocator::new(),
mutex_list: Vec::new(),
semaphore_list: Vec::new(),
condvar_list: Vec::new(),
})
},
});
// create a main thread, we should allocate ustack and trap_cx here
let task = Arc::new(TaskControlBlock::new(
Arc::clone(&process),
ustack_base,
true,
));
// prepare trap_cx of main thread
let task_inner = task.inner_exclusive_access();
let trap_cx = task_inner.get_trap_cx();
let ustack_top = task_inner.res.as_ref().unwrap().ustack_top();
let kstack_top = task.kstack.get_top();
drop(task_inner);
*trap_cx = TrapContext::app_init_context(
entry_point,
ustack_top,
KERNEL_SPACE.exclusive_access().token(),
kstack_top,
trap_handler as usize,
);
// add main thread to the process
let mut process_inner = process.inner_exclusive_access();
process_inner.tasks.push(Some(Arc::clone(&task)));
drop(process_inner);
insert_into_pid2process(process.getpid(), Arc::clone(&process));
// add main thread to scheduler
add_task(task);
process
}
/// Only support processes with a single thread.
pub fn exec(self: &Arc<Self>, elf_data: &[u8], args: Vec<String>) {
assert_eq!(self.inner_exclusive_access().thread_count(), 1);
// memory_set with elf program headers/trampoline/trap context/user stack
let (memory_set, ustack_base, entry_point) = MemorySet::from_elf(elf_data);
let new_token = memory_set.token();
// substitute memory_set
self.inner_exclusive_access().memory_set = memory_set;
// then we alloc user resource for main thread again
// since memory_set has been changed
let task = self.inner_exclusive_access().get_task(0);
let mut task_inner = task.inner_exclusive_access();
task_inner.res.as_mut().unwrap().ustack_base = ustack_base;
task_inner.res.as_mut().unwrap().alloc_user_res();
task_inner.trap_cx_ppn = task_inner.res.as_mut().unwrap().trap_cx_ppn();
// push arguments on user stack
let mut user_sp = task_inner.res.as_mut().unwrap().ustack_top();
user_sp -= (args.len() + 1) * core::mem::size_of::<usize>();
let argv_base = user_sp;
let mut argv: Vec<_> = (0..=args.len())
.map(|arg| {
translated_refmut(
new_token,
(argv_base + arg * core::mem::size_of::<usize>()) as *mut usize,
)
})
.collect();
*argv[args.len()] = 0;
for i in 0..args.len() {
user_sp -= args[i].len() + 1;
*argv[i] = user_sp;
let mut p = user_sp;
for c in args[i].as_bytes() {
*translated_refmut(new_token, p as *mut u8) = *c;
p += 1;
}
*translated_refmut(new_token, p as *mut u8) = 0;
}
// make the user_sp aligned to 8B for k210 platform
user_sp -= user_sp % core::mem::size_of::<usize>();
// initialize trap_cx
let mut trap_cx = TrapContext::app_init_context(
entry_point,
user_sp,
KERNEL_SPACE.exclusive_access().token(),
task.kstack.get_top(),
trap_handler as usize,
);
trap_cx.x[10] = args.len();
trap_cx.x[11] = argv_base;
*task_inner.get_trap_cx() = trap_cx;
}
/// Only support processes with a single thread.
pub fn fork(self: &Arc<Self>) -> Arc<Self> {
let mut parent = self.inner_exclusive_access();
assert_eq!(parent.thread_count(), 1);
// clone parent's memory_set completely including trampoline/ustacks/trap_cxs
let memory_set = MemorySet::from_existed_user(&parent.memory_set);
// alloc a pid
let pid = pid_alloc();
// copy fd table
let mut new_fd_table: Vec<Option<Arc<dyn File + Send + Sync>>> = Vec::new();
for fd in parent.fd_table.iter() {
if let Some(file) = fd {
new_fd_table.push(Some(file.clone()));
} else {
new_fd_table.push(None);
}
}
// create child process pcb
let child = Arc::new(Self {
pid,
inner: unsafe {
UPIntrFreeCell::new(ProcessControlBlockInner {
is_zombie: false,
memory_set,
parent: Some(Arc::downgrade(self)),
children: Vec::new(),
exit_code: 0,
fd_table: new_fd_table,
signals: SignalFlags::empty(),
tasks: Vec::new(),
task_res_allocator: RecycleAllocator::new(),
mutex_list: Vec::new(),
semaphore_list: Vec::new(),
condvar_list: Vec::new(),
})
},
});
// add child
parent.children.push(Arc::clone(&child));
// create main thread of child process
let task = Arc::new(TaskControlBlock::new(
Arc::clone(&child),
parent
.get_task(0)
.inner_exclusive_access()
.res
.as_ref()
.unwrap()
.ustack_base(),
// here we do not allocate trap_cx or ustack again
// but mention that we allocate a new kstack here
false,
));
// attach task to child process
let mut child_inner = child.inner_exclusive_access();
child_inner.tasks.push(Some(Arc::clone(&task)));
drop(child_inner);
// modify kstack_top in trap_cx of this thread
let task_inner = task.inner_exclusive_access();
let trap_cx = task_inner.get_trap_cx();
trap_cx.kernel_sp = task.kstack.get_top();
drop(task_inner);
insert_into_pid2process(child.getpid(), Arc::clone(&child));
// add this thread to scheduler
add_task(task);
child
}
pub fn getpid(&self) -> usize {
self.pid.0
}
}

@ -1,7 +1,7 @@
use super::__switch;
use super::{fetch_task, TaskStatus};
use super::{ProcessControlBlock, TaskContext, TaskControlBlock};
use crate::sync::UPIntrFreeCell;
use super::{TaskContext, TaskControlBlock};
use crate::sync::UPSafeCell;
use crate::trap::TrapContext;
use alloc::sync::Arc;
use lazy_static::*;
@ -30,8 +30,7 @@ impl Processor {
}
lazy_static! {
pub static ref PROCESSOR: UPIntrFreeCell<Processor> =
unsafe { UPIntrFreeCell::new(Processor::new()) };
pub static ref PROCESSOR: UPSafeCell<Processor> = unsafe { UPSafeCell::new(Processor::new()) };
}
pub fn run_tasks() {
@ -40,18 +39,17 @@ pub fn run_tasks() {
if let Some(task) = fetch_task() {
let idle_task_cx_ptr = processor.get_idle_task_cx_ptr();
// access coming task TCB exclusively
let next_task_cx_ptr = task.inner.exclusive_session(|task_inner| {
task_inner.task_status = TaskStatus::Running;
&task_inner.task_cx as *const TaskContext
});
let mut task_inner = task.inner_exclusive_access();
let next_task_cx_ptr = &task_inner.task_cx as *const TaskContext;
task_inner.task_status = TaskStatus::Running;
drop(task_inner);
// release coming task TCB manually
processor.current = Some(task);
// release processor manually
drop(processor);
unsafe {
__switch(idle_task_cx_ptr, next_task_cx_ptr);
}
} else {
println!("no tasks available in run_tasks");
}
}
}
@ -64,13 +62,10 @@ pub fn current_task() -> Option<Arc<TaskControlBlock>> {
PROCESSOR.exclusive_access().current()
}
pub fn current_process() -> Arc<ProcessControlBlock> {
current_task().unwrap().process.upgrade().unwrap()
}
pub fn current_user_token() -> usize {
let task = current_task().unwrap();
task.get_user_token()
let token = task.inner_exclusive_access().get_user_token();
token
}
pub fn current_trap_cx() -> &'static mut TrapContext {
@ -80,23 +75,10 @@ pub fn current_trap_cx() -> &'static mut TrapContext {
.get_trap_cx()
}
pub fn current_trap_cx_user_va() -> usize {
current_task()
.unwrap()
.inner_exclusive_access()
.res
.as_ref()
.unwrap()
.trap_cx_user_va()
}
pub fn current_kstack_top() -> usize {
current_task().unwrap().kstack.get_top()
}
pub fn schedule(switched_task_cx_ptr: *mut TaskContext) {
let idle_task_cx_ptr =
PROCESSOR.exclusive_session(|processor| processor.get_idle_task_cx_ptr());
let mut processor = PROCESSOR.exclusive_access();
let idle_task_cx_ptr = processor.get_idle_task_cx_ptr();
drop(processor);
unsafe {
__switch(switched_task_cx_ptr, idle_task_cx_ptr);
}

@ -1,12 +1,41 @@
use bitflags::*;
pub const MAX_SIG: usize = 31;
bitflags! {
pub struct SignalFlags: u32 {
const SIGINT = 1 << 2;
const SIGILL = 1 << 4;
const SIGABRT = 1 << 6;
const SIGFPE = 1 << 8;
const SIGSEGV = 1 << 11;
const SIGDEF = 1; // Default signal handling
const SIGHUP = 1 << 1;
const SIGINT = 1 << 2;
const SIGQUIT = 1 << 3;
const SIGILL = 1 << 4;
const SIGTRAP = 1 << 5;
const SIGABRT = 1 << 6;
const SIGBUS = 1 << 7;
const SIGFPE = 1 << 8;
const SIGKILL = 1 << 9;
const SIGUSR1 = 1 << 10;
const SIGSEGV = 1 << 11;
const SIGUSR2 = 1 << 12;
const SIGPIPE = 1 << 13;
const SIGALRM = 1 << 14;
const SIGTERM = 1 << 15;
const SIGSTKFLT = 1 << 16;
const SIGCHLD = 1 << 17;
const SIGCONT = 1 << 18;
const SIGSTOP = 1 << 19;
const SIGTSTP = 1 << 20;
const SIGTTIN = 1 << 21;
const SIGTTOU = 1 << 22;
const SIGURG = 1 << 23;
const SIGXCPU = 1 << 24;
const SIGXFSZ = 1 << 25;
const SIGVTALRM = 1 << 26;
const SIGPROF = 1 << 27;
const SIGWINCH = 1 << 28;
const SIGIO = 1 << 29;
const SIGPWR = 1 << 30;
const SIGSYS = 1 << 31;
}
}
@ -20,6 +49,8 @@ impl SignalFlags {
Some((-6, "Aborted, SIGABRT=6"))
} else if self.contains(Self::SIGFPE) {
Some((-8, "Erroneous Arithmetic Operation, SIGFPE=8"))
} else if self.contains(Self::SIGKILL) {
Some((-9, "Killed, SIGKILL=9"))
} else if self.contains(Self::SIGSEGV) {
Some((-11, "Segmentation Fault, SIGSEGV=11"))
} else {

@ -1,74 +1,238 @@
use super::id::TaskUserRes;
use super::{kstack_alloc, KernelStack, ProcessControlBlock, TaskContext};
use crate::trap::TrapContext;
use crate::{
mm::PhysPageNum,
sync::{UPIntrFreeCell, UPIntrRefMut},
};
use super::{TaskContext, SignalActions};
use super::{pid_alloc, KernelStack, PidHandle, SignalFlags};
use crate::config::TRAP_CONTEXT;
use crate::fs::{File, Stdin, Stdout};
use crate::mm::{translated_refmut, MemorySet, PhysPageNum, VirtAddr, KERNEL_SPACE};
use crate::sync::UPSafeCell;
use crate::trap::{trap_handler, TrapContext};
use alloc::string::String;
use alloc::sync::{Arc, Weak};
use alloc::vec;
use alloc::vec::Vec;
use core::cell::RefMut;
pub struct TaskControlBlock {
// immutable
pub process: Weak<ProcessControlBlock>,
pub kstack: KernelStack,
pub pid: PidHandle,
pub kernel_stack: KernelStack,
// mutable
pub inner: UPIntrFreeCell<TaskControlBlockInner>,
}
impl TaskControlBlock {
pub fn inner_exclusive_access(&self) -> UPIntrRefMut<'_, TaskControlBlockInner> {
self.inner.exclusive_access()
}
pub fn get_user_token(&self) -> usize {
let process = self.process.upgrade().unwrap();
let inner = process.inner_exclusive_access();
inner.memory_set.token()
}
inner: UPSafeCell<TaskControlBlockInner>,
}
pub struct TaskControlBlockInner {
pub res: Option<TaskUserRes>,
pub trap_cx_ppn: PhysPageNum,
pub base_size: usize,
pub task_cx: TaskContext,
pub task_status: TaskStatus,
pub exit_code: Option<i32>,
pub memory_set: MemorySet,
pub parent: Option<Weak<TaskControlBlock>>,
pub children: Vec<Arc<TaskControlBlock>>,
pub exit_code: i32,
pub fd_table: Vec<Option<Arc<dyn File + Send + Sync>>>,
pub signals: SignalFlags,
pub signal_mask: SignalFlags,
// the signal which is being handling
pub handling_sig: isize,
// Signal actions
pub signal_actions: SignalActions,
// if the task is killed
pub killed: bool,
// if the task is frozen by a signal
pub frozen: bool,
pub trap_ctx_backup: Option<TrapContext>
}
impl TaskControlBlockInner {
pub fn get_trap_cx(&self) -> &'static mut TrapContext {
self.trap_cx_ppn.get_mut()
}
#[allow(unused)]
pub fn get_user_token(&self) -> usize {
self.memory_set.token()
}
fn get_status(&self) -> TaskStatus {
self.task_status
}
pub fn is_zombie(&self) -> bool {
self.get_status() == TaskStatus::Zombie
}
pub fn alloc_fd(&mut self) -> usize {
if let Some(fd) = (0..self.fd_table.len()).find(|fd| self.fd_table[*fd].is_none()) {
fd
} else {
self.fd_table.push(None);
self.fd_table.len() - 1
}
}
}
impl TaskControlBlock {
pub fn new(
process: Arc<ProcessControlBlock>,
ustack_base: usize,
alloc_user_res: bool,
) -> Self {
let res = TaskUserRes::new(Arc::clone(&process), ustack_base, alloc_user_res);
let trap_cx_ppn = res.trap_cx_ppn();
let kstack = kstack_alloc();
let kstack_top = kstack.get_top();
Self {
process: Arc::downgrade(&process),
kstack,
pub fn inner_exclusive_access(&self) -> RefMut<'_, TaskControlBlockInner> {
self.inner.exclusive_access()
}
pub fn new(elf_data: &[u8]) -> Self {
// memory_set with elf program headers/trampoline/trap context/user stack
let (memory_set, user_sp, entry_point) = MemorySet::from_elf(elf_data);
let trap_cx_ppn = memory_set
.translate(VirtAddr::from(TRAP_CONTEXT).into())
.unwrap()
.ppn();
// alloc a pid and a kernel stack in kernel space
let pid_handle = pid_alloc();
let kernel_stack = KernelStack::new(&pid_handle);
let kernel_stack_top = kernel_stack.get_top();
let task_control_block = Self {
pid: pid_handle,
kernel_stack,
inner: unsafe {
UPIntrFreeCell::new(TaskControlBlockInner {
res: Some(res),
UPSafeCell::new(TaskControlBlockInner {
trap_cx_ppn,
task_cx: TaskContext::goto_trap_return(kstack_top),
base_size: user_sp,
task_cx: TaskContext::goto_trap_return(kernel_stack_top),
task_status: TaskStatus::Ready,
exit_code: None,
memory_set,
parent: None,
children: Vec::new(),
exit_code: 0,
fd_table: vec![
// 0 -> stdin
Some(Arc::new(Stdin)),
// 1 -> stdout
Some(Arc::new(Stdout)),
// 2 -> stderr
Some(Arc::new(Stdout)),
],
signals: SignalFlags::empty(),
signal_mask: SignalFlags::empty(),
handling_sig: -1,
signal_actions: SignalActions::default(),
killed: false,
frozen: false,
trap_ctx_backup: None
})
},
};
// prepare TrapContext in user space
let trap_cx = task_control_block.inner_exclusive_access().get_trap_cx();
*trap_cx = TrapContext::app_init_context(
entry_point,
user_sp,
KERNEL_SPACE.exclusive_access().token(),
kernel_stack_top,
trap_handler as usize,
);
task_control_block
}
pub fn exec(&self, elf_data: &[u8], args: Vec<String>) {
// memory_set with elf program headers/trampoline/trap context/user stack
let (memory_set, mut user_sp, entry_point) = MemorySet::from_elf(elf_data);
let trap_cx_ppn = memory_set
.translate(VirtAddr::from(TRAP_CONTEXT).into())
.unwrap()
.ppn();
// push arguments on user stack
user_sp -= (args.len() + 1) * core::mem::size_of::<usize>();
let argv_base = user_sp;
let mut argv: Vec<_> = (0..=args.len())
.map(|arg| {
translated_refmut(
memory_set.token(),
(argv_base + arg * core::mem::size_of::<usize>()) as *mut usize,
)
})
.collect();
*argv[args.len()] = 0;
for i in 0..args.len() {
user_sp -= args[i].len() + 1;
*argv[i] = user_sp;
let mut p = user_sp;
for c in args[i].as_bytes() {
*translated_refmut(memory_set.token(), p as *mut u8) = *c;
p += 1;
}
*translated_refmut(memory_set.token(), p as *mut u8) = 0;
}
// make the user_sp aligned to 8B for k210 platform
user_sp -= user_sp % core::mem::size_of::<usize>();
// **** access current TCB exclusively
let mut inner = self.inner_exclusive_access();
// substitute memory_set
inner.memory_set = memory_set;
// update trap_cx ppn
inner.trap_cx_ppn = trap_cx_ppn;
// initialize trap_cx
let mut trap_cx = TrapContext::app_init_context(
entry_point,
user_sp,
KERNEL_SPACE.exclusive_access().token(),
self.kernel_stack.get_top(),
trap_handler as usize,
);
trap_cx.x[10] = args.len();
trap_cx.x[11] = argv_base;
*inner.get_trap_cx() = trap_cx;
// **** release current PCB
}
pub fn fork(self: &Arc<TaskControlBlock>) -> Arc<TaskControlBlock> {
// ---- hold parent PCB lock
let mut parent_inner = self.inner_exclusive_access();
// copy user space(include trap context)
let memory_set = MemorySet::from_existed_user(&parent_inner.memory_set);
let trap_cx_ppn = memory_set
.translate(VirtAddr::from(TRAP_CONTEXT).into())
.unwrap()
.ppn();
// alloc a pid and a kernel stack in kernel space
let pid_handle = pid_alloc();
let kernel_stack = KernelStack::new(&pid_handle);
let kernel_stack_top = kernel_stack.get_top();
// copy fd table
let mut new_fd_table: Vec<Option<Arc<dyn File + Send + Sync>>> = Vec::new();
for fd in parent_inner.fd_table.iter() {
if let Some(file) = fd {
new_fd_table.push(Some(file.clone()));
} else {
new_fd_table.push(None);
}
}
let task_control_block = Arc::new(TaskControlBlock {
pid: pid_handle,
kernel_stack,
inner: unsafe {
UPSafeCell::new(TaskControlBlockInner {
trap_cx_ppn,
base_size: parent_inner.base_size,
task_cx: TaskContext::goto_trap_return(kernel_stack_top),
task_status: TaskStatus::Ready,
memory_set,
parent: Some(Arc::downgrade(self)),
children: Vec::new(),
exit_code: 0,
fd_table: new_fd_table,
signals: SignalFlags::empty(),
// inherit the signal_mask and signal_action
signal_mask: parent_inner.signal_mask,
handling_sig: -1,
signal_actions: parent_inner.signal_actions.clone(),
killed: false,
frozen: false,
trap_ctx_backup: None
})
},
});
// add child
parent_inner.children.push(task_control_block.clone());
// modify kernel_sp in trap_cx
// **** access child PCB exclusively
let trap_cx = task_control_block.inner_exclusive_access().get_trap_cx();
trap_cx.kernel_sp = kernel_stack_top;
// return
task_control_block
// **** release child PCB
// ---- release parent PCB
}
pub fn getpid(&self) -> usize {
self.pid.0
}
}
@ -76,5 +240,5 @@ impl TaskControlBlock {
pub enum TaskStatus {
Ready,
Running,
Blocking,
Zombie,
}

@ -1,12 +1,5 @@
use core::cmp::Ordering;
use crate::config::CLOCK_FREQ;
use crate::sbi::set_timer;
use crate::sync::UPIntrFreeCell;
use crate::task::{add_task, TaskControlBlock};
use alloc::collections::BinaryHeap;
use alloc::sync::Arc;
use lazy_static::*;
use riscv::register::time;
const TICKS_PER_SEC: usize = 100;
@ -23,51 +16,3 @@ pub fn get_time_ms() -> usize {
pub fn set_next_trigger() {
set_timer(get_time() + CLOCK_FREQ / TICKS_PER_SEC);
}
pub struct TimerCondVar {
pub expire_ms: usize,
pub task: Arc<TaskControlBlock>,
}
impl PartialEq for TimerCondVar {
fn eq(&self, other: &Self) -> bool {
self.expire_ms == other.expire_ms
}
}
impl Eq for TimerCondVar {}
impl PartialOrd for TimerCondVar {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
let a = -(self.expire_ms as isize);
let b = -(other.expire_ms as isize);
Some(a.cmp(&b))
}
}
impl Ord for TimerCondVar {
fn cmp(&self, other: &Self) -> Ordering {
self.partial_cmp(other).unwrap()
}
}
lazy_static! {
static ref TIMERS: UPIntrFreeCell<BinaryHeap<TimerCondVar>> =
unsafe { UPIntrFreeCell::new(BinaryHeap::<TimerCondVar>::new()) };
}
pub fn add_timer(expire_ms: usize, task: Arc<TaskControlBlock>) {
let mut timers = TIMERS.exclusive_access();
timers.push(TimerCondVar { expire_ms, task });
}
pub fn check_timer() {
let current_ms = get_time_ms();
let mut timers = TIMERS.exclusive_access();
while let Some(timer) = timers.peek() {
if timer.expire_ms <= current_ms {
add_task(Arc::clone(&timer.task));
timers.pop();
} else {
break;
}
}
}

@ -1,7 +1,7 @@
use riscv::register::sstatus::{self, Sstatus, SPP};
#[repr(C)]
#[derive(Debug)]
#[derive(Debug, Clone, Copy)]
pub struct TrapContext {
pub x: [usize; 32],
pub sstatus: Sstatus,

@ -1,17 +1,17 @@
mod context;
use crate::config::TRAMPOLINE;
use crate::config::{TRAMPOLINE, TRAP_CONTEXT};
use crate::syscall::syscall;
use crate::task::{
check_signals_of_current, current_add_signal, current_trap_cx, current_trap_cx_user_va,
current_user_token, exit_current_and_run_next, suspend_current_and_run_next, SignalFlags,
check_signals_error_of_current, current_add_signal, current_trap_cx, current_user_token,
exit_current_and_run_next, suspend_current_and_run_next, SignalFlags, handle_signals,
};
use crate::timer::{check_timer, set_next_trigger};
use crate::timer::set_next_trigger;
use core::arch::{asm, global_asm};
use riscv::register::{
mtvec::TrapMode,
scause::{self, Exception, Interrupt, Trap},
sie, sscratch, sstatus, stval, stvec,
sie, stval, stvec,
};
global_asm!(include_str!("trap.S"));
@ -21,14 +21,8 @@ pub fn init() {
}
fn set_kernel_trap_entry() {
extern "C" {
fn __alltraps();
fn __alltraps_k();
}
let __alltraps_k_va = __alltraps_k as usize - __alltraps as usize + TRAMPOLINE;
unsafe {
stvec::write(__alltraps_k_va, TrapMode::Direct);
sscratch::write(trap_from_kernel as usize);
stvec::write(trap_from_kernel as usize, TrapMode::Direct);
}
}
@ -44,32 +38,16 @@ pub fn enable_timer_interrupt() {
}
}
fn enable_supervisor_interrupt() {
unsafe {
sstatus::set_sie();
}
}
fn disable_supervisor_interrupt() {
unsafe {
sstatus::clear_sie();
}
}
#[no_mangle]
pub fn trap_handler() -> ! {
set_kernel_trap_entry();
let scause = scause::read();
let stval = stval::read();
//println!("into {:?}", scause.cause());
match scause.cause() {
Trap::Exception(Exception::UserEnvCall) => {
// jump to next instruction anyway
let mut cx = current_trap_cx();
cx.sepc += 4;
enable_supervisor_interrupt();
// get system call return value
let result = syscall(cx.x[17], [cx.x[10], cx.x[11], cx.x[12]]);
// cx is changed during sys_exec, so we have to call it again
@ -97,12 +75,8 @@ pub fn trap_handler() -> ! {
}
Trap::Interrupt(Interrupt::SupervisorTimer) => {
set_next_trigger();
check_timer();
suspend_current_and_run_next();
}
Trap::Interrupt(Interrupt::SupervisorExternal) => {
crate::board::irq_handler();
}
_ => {
panic!(
"Unsupported trap {:?}, stval = {:#x}!",
@ -111,8 +85,11 @@ pub fn trap_handler() -> ! {
);
}
}
// check signals
if let Some((errno, msg)) = check_signals_of_current() {
// handle signals (handle the sent signal)
handle_signals();
// check error signals (if error then exit)
if let Some((errno, msg)) = check_signals_error_of_current() {
println!("[kernel] {}", msg);
exit_current_and_run_next(errno);
}
@ -121,22 +98,20 @@ pub fn trap_handler() -> ! {
#[no_mangle]
pub fn trap_return() -> ! {
disable_supervisor_interrupt();
set_user_trap_entry();
let trap_cx_user_va = current_trap_cx_user_va();
let trap_cx_ptr = TRAP_CONTEXT;
let user_satp = current_user_token();
extern "C" {
fn __alltraps();
fn __restore();
}
let restore_va = __restore as usize - __alltraps as usize + TRAMPOLINE;
//println!("before return");
unsafe {
asm!(
"fence.i",
"jr {restore_va}",
restore_va = in(reg) restore_va,
in("a0") trap_cx_user_va,
in("a0") trap_cx_ptr,
in("a1") user_satp,
options(noreturn)
);
@ -144,26 +119,10 @@ pub fn trap_return() -> ! {
}
#[no_mangle]
pub fn trap_from_kernel(_trap_cx: &TrapContext) {
let scause = scause::read();
let stval = stval::read();
match scause.cause() {
Trap::Interrupt(Interrupt::SupervisorExternal) => {
crate::board::irq_handler();
}
Trap::Interrupt(Interrupt::SupervisorTimer) => {
set_next_trigger();
check_timer();
// do not schedule now
}
_ => {
panic!(
"Unsupported trap from kernel: {:?}, stval = {:#x}!",
scause.cause(),
stval
);
}
}
pub fn trap_from_kernel() -> ! {
use riscv::register::sepc;
println!("stval = {:#x}, sepc = {:#x}", stval::read(), sepc::read());
panic!("a trap {:?} from kernel!", scause::read().cause());
}
pub use context::TrapContext;

@ -8,8 +8,6 @@
.section .text.trampoline
.globl __alltraps
.globl __restore
.globl __alltraps_k
.globl __restore_k
.align 2
__alltraps:
csrrw sp, sscratch, sp
@ -69,36 +67,3 @@ __restore:
# back to user stack
ld sp, 2*8(sp)
sret
.align 2
__alltraps_k:
addi sp, sp, -34*8
sd x1, 1*8(sp)
sd x3, 3*8(sp)
.set n, 5
.rept 27
SAVE_GP %n
.set n, n+1
.endr
csrr t0, sstatus
csrr t1, sepc
sd t0, 32*8(sp)
sd t1, 33*8(sp)
mv a0, sp
csrr t2, sscratch
jalr t2
__restore_k:
ld t0, 32*8(sp)
ld t1, 33*8(sp)
csrw sstatus, t0
csrw sepc, t1
ld x1, 1*8(sp)
ld x3, 3*8(sp)
.set n, 5
.rept 27
LOAD_GP %n
.set n, n+1
.endr
addi sp, sp, 34*8
sret

@ -1,2 +0,0 @@
export PATH=$(rustc --print sysroot)/bin:$PATH
export RUST_SRC_PATH=$(rustc --print sysroot)/lib/rustlib/src/rust/library/

@ -8,15 +8,9 @@ BINS := $(patsubst $(APP_DIR)/%.rs, $(TARGET_DIR)/%.bin, $(APPS))
OBJDUMP := rust-objdump --arch-name=riscv64
OBJCOPY := rust-objcopy --binary-architecture=riscv64
CP := cp
TEST ?=
elf: $(APPS)
@cargo build --release
ifeq ($(TEST), 1)
@$(CP) $(TARGET_DIR)/usertests $(TARGET_DIR)/initproc
endif
binary: elf
$(foreach elf, $(ELFS), $(OBJCOPY) $(elf) --strip-all -O binary $(patsubst $(TARGET_DIR)/%, $(TARGET_DIR)/%.bin, $(elf));)

@ -9,14 +9,10 @@ use user_lib::{close, open, read, OpenFlags};
#[no_mangle]
pub fn main(argc: usize, argv: &[&str]) -> i32 {
println!("argc = {}", argc);
for (i, arg) in argv.iter().enumerate() {
println!("argv[{}] = {}", i, arg);
}
assert!(argc == 2);
let fd = open(argv[1], OpenFlags::RDONLY);
if fd == -1 {
panic!("Error occurred when opening file");
panic!("Error occured when opening file");
}
let fd = fd as usize;
let mut buf = [0u8; 256];

@ -1,138 +0,0 @@
#![no_std]
#![no_main]
#![feature(core_intrinsics)]
#[macro_use]
extern crate user_lib;
extern crate alloc;
extern crate core;
use alloc::vec::Vec;
use core::sync::atomic::{AtomicUsize, Ordering};
use user_lib::{exit, sleep, thread_create, waittid};
const N: usize = 2;
const THREAD_NUM: usize = 10;
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
enum FlagState {
Out,
Want,
In,
}
static mut TURN: usize = 0;
static mut FLAG: [FlagState; THREAD_NUM] = [FlagState::Out; THREAD_NUM];
static GUARD: AtomicUsize = AtomicUsize::new(0);
fn critical_test_enter() {
assert_eq!(GUARD.fetch_add(1, Ordering::SeqCst), 0);
}
fn critical_test_claim() {
assert_eq!(GUARD.load(Ordering::SeqCst), 1);
}
fn critical_test_exit() {
assert_eq!(GUARD.fetch_sub(1, Ordering::SeqCst), 1);
}
fn eisenberg_enter_critical(id: usize) {
/* announce that we want to enter */
loop {
println!("Thread[{}] try enter", id);
vstore!(&FLAG[id], FlagState::Want);
loop {
/* check if any with higher priority is `Want` or `In` */
let mut prior_thread: Option<usize> = None;
let turn = vload!(&TURN);
let ring_id = if id < turn { id + THREAD_NUM } else { id };
// FLAG.iter() may lead to some errors, use for-loop instead
for i in turn..ring_id {
if vload!(&FLAG[i % THREAD_NUM]) != FlagState::Out {
prior_thread = Some(i % THREAD_NUM);
break;
}
}
if prior_thread.is_none() {
break;
}
println!(
"Thread[{}]: prior thread {} exist, sleep and retry",
id,
prior_thread.unwrap()
);
sleep(1);
}
/* now tentatively claim the resource */
vstore!(&FLAG[id], FlagState::In);
/* enforce the order of `claim` and `conflict check`*/
memory_fence!();
/* check if anthor thread is also `In`, which imply a conflict*/
let mut conflict = false;
for i in 0..THREAD_NUM {
if i != id && vload!(&FLAG[i]) == FlagState::In {
conflict = true;
}
}
if !conflict {
break;
}
println!("Thread[{}]: CONFLECT!", id);
/* no need to sleep */
}
/* clain the trun */
vstore!(&TURN, id);
println!("Thread[{}] enter", id);
}
fn eisenberg_exit_critical(id: usize) {
/* find next one who wants to enter and give the turn to it*/
let mut next = id;
let ring_id = id + THREAD_NUM;
for i in (id + 1)..ring_id {
let idx = i % THREAD_NUM;
if vload!(&FLAG[idx]) == FlagState::Want {
next = idx;
break;
}
}
vstore!(&TURN, next);
/* All done */
vstore!(&FLAG[id], FlagState::Out);
println!("Thread[{}] exit, give turn to {}", id, next);
}
pub fn thread_fn(id: usize) -> ! {
println!("Thread[{}] init.", id);
for _ in 0..N {
eisenberg_enter_critical(id);
critical_test_enter();
for _ in 0..3 {
critical_test_claim();
sleep(2);
}
critical_test_exit();
eisenberg_exit_critical(id);
}
exit(0)
}
#[no_mangle]
pub fn main() -> i32 {
let mut v = Vec::new();
// TODO: really shuffle
assert_eq!(THREAD_NUM, 10);
let shuffle: [usize; 10] = [0, 7, 4, 6, 2, 9, 8, 1, 3, 5];
for i in 0..THREAD_NUM {
v.push(thread_create(thread_fn as usize, shuffle[i]));
}
for tid in v.iter() {
let exit_code = waittid(*tid as usize);
assert_eq!(exit_code, 0, "thread conflict happened!");
println!("thread#{} exited with code {}", tid, exit_code);
}
println!("main thread exited.");
0
}

@ -4,7 +4,7 @@
#[macro_use]
extern crate user_lib;
use user_lib::{exit, fork, getpid, sleep, wait, yield_};
use user_lib::{exit, fork, getpid, sleep, yield_};
const DEPTH: usize = 4;
@ -27,19 +27,11 @@ fn fork_tree(cur: &str) {
println!("pid{}: {}", getpid(), cur);
fork_child(cur, '0');
fork_child(cur, '1');
let mut exit_code: i32 = 0;
for _ in 0..2 {
wait(&mut exit_code);
}
}
#[no_mangle]
pub fn main() -> i32 {
fork_tree("");
let mut exit_code: i32 = 0;
for _ in 0..2 {
wait(&mut exit_code);
}
sleep(3000);
0
}

@ -24,7 +24,7 @@ pub fn main() -> i32 {
}
close(f);
let time_ms = (get_time() - start) as usize;
let speed_kbs = (size_mb << 20) / time_ms;
let speed_kbs = size_mb * 1000000 / time_ms;
println!(
"{}MiB written, time cost = {}ms, write speed = {}KiB/s",
size_mb, time_ms, speed_kbs

@ -1,56 +0,0 @@
#![no_std]
#![no_main]
#[macro_use]
extern crate user_lib;
extern crate alloc;
use alloc::{fmt::format, string::String, vec::Vec};
use user_lib::{close, get_time, gettid, open, write, OpenFlags};
use user_lib::{exit, thread_create, waittid};
fn worker(size_kib: usize) {
let mut buffer = [0u8; 1024]; // 1KiB
for (i, ch) in buffer.iter_mut().enumerate() {
*ch = i as u8;
}
let filename = format(format_args!("testf{}\0", gettid()));
let f = open(filename.as_str(), OpenFlags::CREATE | OpenFlags::WRONLY);
if f < 0 {
panic!("Open test file failed!");
}
let f = f as usize;
for _ in 0..size_kib {
write(f, &buffer);
}
close(f);
exit(0)
}
#[no_mangle]
pub fn main(argc: usize, argv: &[&str]) -> i32 {
assert_eq!(argc, 2, "wrong argument");
let size_mb = 1usize;
let size_kb = size_mb << 10;
let workers = argv[1].parse::<usize>().expect("wrong argument");
assert!(workers >= 1 && size_kb % workers == 0, "wrong argument");
let start = get_time();
let mut v = Vec::new();
let size_mb = 1usize;
for _ in 0..workers {
v.push(thread_create(worker as usize, size_kb / workers));
}
for tid in v.iter() {
assert_eq!(0, waittid(*tid as usize));
}
let time_ms = (get_time() - start) as usize;
let speed_kbs = size_kb * 1000 / time_ms;
println!(
"{}MiB written by {} threads, time cost = {}ms, write speed = {}KiB/s",
size_mb, workers, time_ms, speed_kbs
);
0
}

@ -1,73 +0,0 @@
#![no_std]
#![no_main]
#![allow(clippy::println_empty_string)]
#[macro_use]
extern crate user_lib;
extern crate alloc;
use alloc::vec::Vec;
use user_lib::exit;
use user_lib::{semaphore_create, semaphore_down, semaphore_up};
use user_lib::{thread_create, waittid};
const SEM_MUTEX: usize = 0;
const SEM_EMPTY: usize = 1;
const SEM_EXISTED: usize = 2;
const BUFFER_SIZE: usize = 8;
static mut BUFFER: [usize; BUFFER_SIZE] = [0; BUFFER_SIZE];
static mut FRONT: usize = 0;
static mut TAIL: usize = 0;
const PRODUCER_COUNT: usize = 4;
const NUMBER_PER_PRODUCER: usize = 100;
unsafe fn producer(id: *const usize) -> ! {
let id = *id;
for _ in 0..NUMBER_PER_PRODUCER {
semaphore_down(SEM_EMPTY);
semaphore_down(SEM_MUTEX);
BUFFER[FRONT] = id;
FRONT = (FRONT + 1) % BUFFER_SIZE;
semaphore_up(SEM_MUTEX);
semaphore_up(SEM_EXISTED);
}
exit(0)
}
unsafe fn consumer() -> ! {
for _ in 0..PRODUCER_COUNT * NUMBER_PER_PRODUCER {
semaphore_down(SEM_EXISTED);
semaphore_down(SEM_MUTEX);
print!("{} ", BUFFER[TAIL]);
TAIL = (TAIL + 1) % BUFFER_SIZE;
semaphore_up(SEM_MUTEX);
semaphore_up(SEM_EMPTY);
}
println!("");
exit(0)
}
#[no_mangle]
pub fn main() -> i32 {
// create semaphores
assert_eq!(semaphore_create(1) as usize, SEM_MUTEX);
assert_eq!(semaphore_create(BUFFER_SIZE) as usize, SEM_EMPTY);
assert_eq!(semaphore_create(0) as usize, SEM_EXISTED);
// create threads
let ids: Vec<_> = (0..PRODUCER_COUNT).collect();
let mut threads = Vec::new();
for i in 0..PRODUCER_COUNT {
threads.push(thread_create(
producer as usize,
&ids.as_slice()[i] as *const _ as usize,
));
}
threads.push(thread_create(consumer as usize, 0));
// wait for all threads to complete
for thread in threads.iter() {
waittid(*thread as usize);
}
println!("mpsc_sem passed!");
0
}

@ -1,78 +0,0 @@
#![no_std]
#![no_main]
#![feature(core_intrinsics)]
#![feature(asm)]
#[macro_use]
extern crate user_lib;
extern crate alloc;
extern crate core;
use alloc::vec::Vec;
use core::sync::atomic::{AtomicUsize, Ordering};
use user_lib::{exit, sleep, thread_create, waittid};
const N: usize = 3;
static mut TURN: usize = 0;
static mut FLAG: [bool; 2] = [false; 2];
static GUARD: AtomicUsize = AtomicUsize::new(0);
fn critical_test_enter() {
assert_eq!(GUARD.fetch_add(1, Ordering::SeqCst), 0);
}
fn critical_test_claim() {
assert_eq!(GUARD.load(Ordering::SeqCst), 1);
}
fn critical_test_exit() {
assert_eq!(GUARD.fetch_sub(1, Ordering::SeqCst), 1);
}
fn peterson_enter_critical(id: usize, peer_id: usize) {
println!("Thread[{}] try enter", id);
vstore!(&FLAG[id], true);
vstore!(&TURN, peer_id);
memory_fence!();
while vload!(&FLAG[peer_id]) && vload!(&TURN) == peer_id {
println!("Thread[{}] enter fail", id);
sleep(1);
println!("Thread[{}] retry enter", id);
}
println!("Thread[{}] enter", id);
}
fn peterson_exit_critical(id: usize) {
vstore!(&FLAG[id], false);
println!("Thread[{}] exit", id);
}
pub fn thread_fn(id: usize) -> ! {
println!("Thread[{}] init.", id);
let peer_id: usize = id ^ 1;
for _ in 0..N {
peterson_enter_critical(id, peer_id);
critical_test_enter();
for _ in 0..3 {
critical_test_claim();
sleep(2);
}
critical_test_exit();
peterson_exit_critical(id);
}
exit(0)
}
#[no_mangle]
pub fn main() -> i32 {
let mut v = Vec::new();
v.push(thread_create(thread_fn as usize, 0));
// v.push(thread_create(thread_fn as usize, 1));
for tid in v.iter() {
let exit_code = waittid(*tid as usize);
assert_eq!(exit_code, 0, "thread conflict happened!");
println!("thread#{} exited with code {}", tid, exit_code);
}
println!("main thread exited.");
0
}

@ -1,107 +0,0 @@
#![no_std]
#![no_main]
#![allow(clippy::println_empty_string)]
#[macro_use]
extern crate user_lib;
extern crate alloc;
use alloc::vec::Vec;
use user_lib::{exit, get_time, sleep};
use user_lib::{mutex_blocking_create, mutex_lock, mutex_unlock};
use user_lib::{thread_create, waittid};
const N: usize = 5;
const ROUND: usize = 4;
// A round: think -> wait for forks -> eat
const GRAPH_SCALE: usize = 100;
fn get_time_u() -> usize {
get_time() as usize
}
// Time unit: ms
const ARR: [[usize; ROUND * 2]; N] = [
[700, 800, 1000, 400, 500, 600, 200, 400],
[300, 600, 200, 700, 1000, 100, 300, 600],
[500, 200, 900, 200, 400, 600, 1200, 400],
[500, 1000, 600, 500, 800, 600, 200, 900],
[600, 100, 600, 600, 200, 500, 600, 200],
];
static mut THINK: [[usize; ROUND * 2]; N] = [[0; ROUND * 2]; N];
static mut EAT: [[usize; ROUND * 2]; N] = [[0; ROUND * 2]; N];
fn philosopher_dining_problem(id: *const usize) {
let id = unsafe { *id };
let left = id;
let right = if id == N - 1 { 0 } else { id + 1 };
let min = if left < right { left } else { right };
let max = left + right - min;
for round in 0..ROUND {
// thinking
unsafe {
THINK[id][2 * round] = get_time_u();
}
sleep(ARR[id][2 * round]);
unsafe {
THINK[id][2 * round + 1] = get_time_u();
}
// wait for forks
mutex_lock(min);
mutex_lock(max);
// eating
unsafe {
EAT[id][2 * round] = get_time_u();
}
sleep(ARR[id][2 * round + 1]);
unsafe {
EAT[id][2 * round + 1] = get_time_u();
}
mutex_unlock(max);
mutex_unlock(min);
}
exit(0)
}
#[no_mangle]
pub fn main() -> i32 {
let mut v = Vec::new();
let ids: Vec<_> = (0..N).collect();
let start = get_time_u();
for i in 0..N {
assert_eq!(mutex_blocking_create(), i as isize);
v.push(thread_create(
philosopher_dining_problem as usize,
&ids.as_slice()[i] as *const _ as usize,
));
}
for tid in v.iter() {
waittid(*tid as usize);
}
let time_cost = get_time_u() - start;
println!("time cost = {}", time_cost);
println!("'-' -> THINKING; 'x' -> EATING; ' ' -> WAITING ");
for id in (0..N).into_iter().chain(0..=0) {
print!("#{}:", id);
for j in 0..time_cost / GRAPH_SCALE {
let current_time = j * GRAPH_SCALE + start;
if (0..ROUND).any(|round| unsafe {
let start_thinking = THINK[id][2 * round];
let end_thinking = THINK[id][2 * round + 1];
start_thinking <= current_time && current_time <= end_thinking
}) {
print!("-");
} else if (0..ROUND).any(|round| unsafe {
let start_eating = EAT[id][2 * round];
let end_eating = EAT[id][2 * round + 1];
start_eating <= current_time && current_time <= end_eating
}) {
print!("x");
} else {
print!(" ");
};
}
println!("");
}
0
}

@ -1,42 +0,0 @@
#![no_std]
#![no_main]
#[macro_use]
extern crate user_lib;
extern crate alloc;
use alloc::vec::Vec;
use user_lib::{exit, get_time, thread_create, waittid};
static mut A: usize = 0;
const PER_THREAD: usize = 1000;
const THREAD_COUNT: usize = 16;
unsafe fn f() -> ! {
let mut t = 2usize;
for _ in 0..PER_THREAD {
let a = &mut A as *mut usize;
let cur = a.read_volatile();
for _ in 0..500 {
t = t * t % 10007;
}
a.write_volatile(cur + 1);
}
exit(t as i32)
}
#[no_mangle]
pub fn main() -> i32 {
let start = get_time();
let mut v = Vec::new();
for _ in 0..THREAD_COUNT {
v.push(thread_create(f as usize, 0) as usize);
}
let mut time_cost = Vec::new();
for tid in v.iter() {
time_cost.push(waittid(*tid));
}
println!("time cost is {}ms", get_time() - start);
assert_eq!(unsafe { A }, PER_THREAD * THREAD_COUNT);
0
}

@ -1,56 +0,0 @@
#![no_std]
#![no_main]
#[macro_use]
extern crate user_lib;
extern crate alloc;
use crate::alloc::string::ToString;
use alloc::vec::Vec;
use user_lib::{exit, get_time, thread_create, waittid};
static mut A: usize = 0;
const PER_THREAD: usize = 1000;
const THREAD_COUNT: usize = 16;
unsafe fn f(count: usize) -> ! {
let mut t = 2usize;
for _ in 0..PER_THREAD {
let a = &mut A as *mut usize;
let cur = a.read_volatile();
for _ in 0..count {
t = t * t % 10007;
}
a.write_volatile(cur + 1);
}
exit(t as i32)
}
#[no_mangle]
pub fn main(argc: usize, argv: &[&str]) -> i32 {
let count: usize;
if argc == 1 {
count = THREAD_COUNT;
} else if argc == 2 {
count = argv[1].to_string().parse::<usize>().unwrap();
} else {
println!(
"ERROR in argv, argc is {}, argv[0] {} , argv[1] {} , argv[2] {}",
argc, argv[0], argv[1], argv[2]
);
exit(-1);
}
let start = get_time();
let mut v = Vec::new();
for _ in 0..THREAD_COUNT {
v.push(thread_create(f as usize, count) as usize);
}
let mut time_cost = Vec::new();
for tid in v.iter() {
time_cost.push(waittid(*tid));
}
println!("time cost is {}ms", get_time() - start);
assert_eq!(unsafe { A }, PER_THREAD * THREAD_COUNT);
0
}

@ -1,51 +0,0 @@
#![no_std]
#![no_main]
#[macro_use]
extern crate user_lib;
extern crate alloc;
use alloc::vec::Vec;
use core::sync::atomic::{AtomicBool, Ordering};
use user_lib::{exit, get_time, thread_create, waittid, yield_};
static mut A: usize = 0;
static OCCUPIED: AtomicBool = AtomicBool::new(false);
const PER_THREAD: usize = 1000;
const THREAD_COUNT: usize = 16;
unsafe fn f() -> ! {
let mut t = 2usize;
for _ in 0..PER_THREAD {
while OCCUPIED
.compare_exchange(false, true, Ordering::Relaxed, Ordering::Relaxed)
.is_err()
{
yield_();
}
let a = &mut A as *mut usize;
let cur = a.read_volatile();
for _ in 0..500 {
t = t * t % 10007;
}
a.write_volatile(cur + 1);
OCCUPIED.store(false, Ordering::Relaxed);
}
exit(t as i32)
}
#[no_mangle]
pub fn main() -> i32 {
let start = get_time();
let mut v = Vec::new();
for _ in 0..THREAD_COUNT {
v.push(thread_create(f as usize, 0) as usize);
}
let mut time_cost = Vec::new();
for tid in v.iter() {
time_cost.push(waittid(*tid));
}
println!("time cost is {}ms", get_time() - start);
assert_eq!(unsafe { A }, PER_THREAD * THREAD_COUNT);
0
}

@ -1,51 +0,0 @@
#![no_std]
#![no_main]
#[macro_use]
extern crate user_lib;
extern crate alloc;
use alloc::vec::Vec;
use user_lib::{exit, get_time, thread_create, waittid, yield_};
static mut A: usize = 0;
static mut OCCUPIED: bool = false;
const PER_THREAD: usize = 1000;
const THREAD_COUNT: usize = 16;
unsafe fn f() -> ! {
let mut t = 2usize;
for _ in 0..PER_THREAD {
while OCCUPIED {
yield_();
}
OCCUPIED = true;
// enter critical section
let a = &mut A as *mut usize;
let cur = a.read_volatile();
for _ in 0..500 {
t = t * t % 10007;
}
a.write_volatile(cur + 1);
// exit critical section
OCCUPIED = false;
}
exit(t as i32)
}
#[no_mangle]
pub fn main() -> i32 {
let start = get_time();
let mut v = Vec::new();
for _ in 0..THREAD_COUNT {
v.push(thread_create(f as usize, 0) as usize);
}
let mut time_cost = Vec::new();
for tid in v.iter() {
time_cost.push(waittid(*tid));
}
println!("time cost is {}ms", get_time() - start);
assert_eq!(unsafe { A }, PER_THREAD * THREAD_COUNT);
0
}

@ -1,46 +0,0 @@
#![no_std]
#![no_main]
#[macro_use]
extern crate user_lib;
extern crate alloc;
use alloc::vec::Vec;
use user_lib::{exit, get_time, thread_create, waittid};
use user_lib::{mutex_blocking_create, mutex_lock, mutex_unlock};
static mut A: usize = 0;
const PER_THREAD: usize = 1000;
const THREAD_COUNT: usize = 16;
unsafe fn f() -> ! {
let mut t = 2usize;
for _ in 0..PER_THREAD {
mutex_lock(0);
let a = &mut A as *mut usize;
let cur = a.read_volatile();
for _ in 0..500 {
t = t * t % 10007;
}
a.write_volatile(cur + 1);
mutex_unlock(0);
}
exit(t as i32)
}
#[no_mangle]
pub fn main() -> i32 {
let start = get_time();
assert_eq!(mutex_blocking_create(), 0);
let mut v = Vec::new();
for _ in 0..THREAD_COUNT {
v.push(thread_create(f as usize, 0) as usize);
}
let mut time_cost = Vec::new();
for tid in v.iter() {
time_cost.push(waittid(*tid));
}
println!("time cost is {}ms", get_time() - start);
assert_eq!(unsafe { A }, PER_THREAD * THREAD_COUNT);
0
}

@ -1,46 +0,0 @@
#![no_std]
#![no_main]
#[macro_use]
extern crate user_lib;
extern crate alloc;
use alloc::vec::Vec;
use user_lib::{exit, get_time, thread_create, waittid};
use user_lib::{mutex_create, mutex_lock, mutex_unlock};
static mut A: usize = 0;
const PER_THREAD: usize = 1000;
const THREAD_COUNT: usize = 16;
unsafe fn f() -> ! {
let mut t = 2usize;
for _ in 0..PER_THREAD {
mutex_lock(0);
let a = &mut A as *mut usize;
let cur = a.read_volatile();
for _ in 0..500 {
t = t * t % 10007;
}
a.write_volatile(cur + 1);
mutex_unlock(0);
}
exit(t as i32)
}
#[no_mangle]
pub fn main() -> i32 {
let start = get_time();
assert_eq!(mutex_create(), 0);
let mut v = Vec::new();
for _ in 0..THREAD_COUNT {
v.push(thread_create(f as usize, 0) as usize);
}
let mut time_cost = Vec::new();
for tid in v.iter() {
time_cost.push(waittid(*tid));
}
println!("time cost is {}ms", get_time() - start);
assert_eq!(unsafe { A }, PER_THREAD * THREAD_COUNT);
0
}

@ -8,7 +8,7 @@ use user_lib::{exec, fork, wait};
#[no_mangle]
pub fn main() -> i32 {
for i in 0..50 {
for i in 0..1000 {
if fork() == 0 {
exec("pipe_large_test\0", &[core::ptr::null::<u8>()]);
} else {

@ -0,0 +1,32 @@
#![no_std]
#![no_main]
#[macro_use]
extern crate user_lib;
// use user_lib::{sigaction, sigprocmask, SignalAction, SignalFlags, fork, exit, wait, kill, getpid, sleep, sigreturn};
use user_lib::*;
fn func() {
println!("user_sig_test succsess");
sigreturn();
}
#[no_mangle]
pub fn main() -> i32 {
let mut new = SignalAction::default();
let old = SignalAction::default();
new.handler = func as usize;
println!("signal_simple: sigaction");
if sigaction(SIGUSR1, &new, &old) < 0 {
panic!("Sigaction failed!");
}
println!("signal_simple: kill");
if kill(getpid() as usize, SIGUSR1) < 0 {
println!("Kill failed!");
exit(1);
}
println!("signal_simple: Done");
0
}

@ -0,0 +1,44 @@
#![no_std]
#![no_main]
#[macro_use]
extern crate user_lib;
use user_lib::{sigaction, sigprocmask, SignalAction, SignalFlags, fork, exit, waitpid, kill, getpid, sleep, sigreturn};
fn func() {
println!("user_sig_test succsess");
sigreturn();
}
#[no_mangle]
pub fn main() -> i32 {
let pid = fork();
if pid==0{
let mut new = SignalAction::default();
let old = SignalAction::default();
new.handler = func as usize;
println!("signal_simple2: child sigaction");
if sigaction(10, &new, &old) < 0 {
panic!("Sigaction failed!");
}
sleep(1000);
println!("signal_simple2: child done");
exit(0);
} else if pid >0 {
println!("signal_simple2: parent kill child");
sleep(500);
if kill(pid as usize, 1<<10) < 0 {
println!("Kill failed!");
exit(1);
}
println!("signal_simple2: parent wait child");
let mut exit_code = 0;
waitpid(pid as usize, &mut exit_code);
println!("signal_simple2: parent Done");
exit(0);
}
0
}

@ -0,0 +1,204 @@
#![no_std]
#![no_main]
#[macro_use]
extern crate user_lib;
// use user_lib::{sigaction, sigprocmask, SignalAction, SignalFlags, fork, exit, wait, kill, getpid, sleep, sigreturn};
use user_lib::*;
fn func() {
println!("user_sig_test succsess");
sigreturn();
}
fn func2() {
loop {
print!("");
}
}
fn func3() {
println!("interrupt");
sigreturn();
}
fn user_sig_test_failsignum() {
let mut new = SignalAction::default();
let old = SignalAction::default();
new.handler = func as usize;
if sigaction(50, &new, &old) >= 0 {
panic!("Wrong sigaction but success!");
}
}
fn user_sig_test_kill() {
let mut new = SignalAction::default();
let old = SignalAction::default();
new.handler = func as usize;
if sigaction(SIGUSR1, &new, &old) < 0 {
panic!("Sigaction failed!");
}
if kill(getpid() as usize, SIGUSR1) < 0 {
println!("Kill failed!");
exit(1);
}
}
fn user_sig_test_multiprocsignals() {
let pid= fork();
if pid == 0{
let mut new = SignalAction::default();
let old = SignalAction::default();
new.handler = func as usize;
if sigaction(SIGUSR1, &new, &old) < 0 {
panic!("Sigaction failed!");
}
} else {
if kill(pid as usize, SIGUSR1) < 0 {
println!("Kill failed!");
exit(1);
}
let mut exit_code = 0;
wait(&mut exit_code);
}
}
fn user_sig_test_restore() {
let mut new = SignalAction::default();
let old = SignalAction::default();
let old2 = SignalAction::default();
new.handler = func as usize;
if sigaction(SIGUSR1, &new, &old) < 0 {
panic!("Sigaction failed!");
}
if sigaction(SIGUSR1, &old, &old2) < 0 {
panic!("Sigaction failed!");
}
if old2.handler != new.handler {
println!("Restore failed!");
exit(-1);
}
}
fn kernel_sig_test_ignore() {
sigprocmask(SignalFlags::SIGSTOP.bits() as u32);
if kill(getpid() as usize, SignalFlags::SIGSTOP.bits()) < 0{
println!("kill faild\n");
exit(-1);
}
}
fn kernel_sig_test_stop_cont() {
let pid= fork();
if pid == 0 {
kill(getpid() as usize, SignalFlags::SIGSTOP.bits());
sleep(1000);
exit(-1);
} else {
sleep(5000);
kill(pid as usize, SignalFlags::SIGCONT.bits());
let mut exit_code = 0;
wait(&mut exit_code);
}
}
fn kernel_sig_test_failignorekill() {
let mut new = SignalAction::default();
let old = SignalAction::default();
new.handler = func as usize;
if sigaction(9, &new, &old) >= 0 {
panic!("Should not set sigaction to kill!");
}
if sigaction(9, &new, 0 as *const SignalAction) >= 0 {
panic!("Should not set sigaction to kill!");
}
if sigaction(9, 0 as *const SignalAction, &old) >= 0 {
panic!("Should not set sigaction to kill!");
}
}
fn final_sig_test() {
let mut new = SignalAction::default();
let old = SignalAction::default();
new.handler = func2 as usize;
let mut new2 = SignalAction::default();
let old2 = SignalAction::default();
new2.handler = func3 as usize;
let pid= fork();
if pid == 0{
if sigaction(SIGUSR1, &new, &old) < 0 {
panic!("Sigaction failed!");
}
if sigaction(14, &new2, &old2) < 0 {
panic!("Sigaction failed!");
}
if kill(getpid() as usize, SIGUSR1) < 0 {
println!("Kill failed!");
exit(-1);
}
} else {
sleep(1000);
if kill(pid as usize, 1 << 14) < 0 {
println!("Kill failed!");
exit(-1);
}
sleep(1000);
kill(pid as usize, SignalFlags::SIGKILL.bits());
}
}
fn run(f: fn()) -> bool {
let pid = fork();
if pid == 0 {
f();
exit(0);
} else {
let mut exit_code: i32 = 0;
wait(&mut exit_code);
if exit_code != 0 {
println!("FAILED!");
} else {
println!("OK!");
}
exit_code == 0
}
}
#[no_mangle]
pub fn main() -> i32 {
let tests: [(fn(), &str); 8] = [
(user_sig_test_failsignum, "user_sig_test_failsignum"),
(user_sig_test_kill, "user_sig_test_kill"),
(user_sig_test_multiprocsignals, "user_sig_test_multiprocsignals"),
(user_sig_test_restore, "user_sig_test_restore"),
(kernel_sig_test_ignore, "kernel_sig_test_ignore"),
(kernel_sig_test_stop_cont, "kernel_sig_test_stop_cont"),
(kernel_sig_test_failignorekill, "kernel_sig_test_failignorekill"),
(final_sig_test, "final_sig_test")
];
let mut fail_num = 0;
for test in tests {
println!("Testing {}", test.1);
if !run(test.0) {
fail_num += 1;
}
}
if fail_num == 0 {
println!("ALL TESTS PASSED");
0
} else {
println!("SOME TESTS FAILED");
-1
}
}

@ -1,45 +0,0 @@
#![no_std]
#![no_main]
#[macro_use]
extern crate user_lib;
extern crate alloc;
use alloc::vec;
use user_lib::exit;
use user_lib::{semaphore_create, semaphore_down, semaphore_up};
use user_lib::{sleep, thread_create, waittid};
const SEM_SYNC: usize = 0;
unsafe fn first() -> ! {
sleep(10);
println!("First work and wakeup Second");
semaphore_up(SEM_SYNC);
exit(0)
}
unsafe fn second() -> ! {
println!("Second want to continue,but need to wait first");
semaphore_down(SEM_SYNC);
println!("Second can work now");
exit(0)
}
#[no_mangle]
pub fn main() -> i32 {
// create semaphores
assert_eq!(semaphore_create(0) as usize, SEM_SYNC);
// create threads
let threads = vec![
thread_create(first as usize, 0),
thread_create(second as usize, 0),
];
// wait for all threads to complete
for thread in threads.iter() {
waittid(*thread as usize);
}
println!("sync_sem passed!");
0
}

@ -1,59 +0,0 @@
#![no_std]
#![no_main]
#[macro_use]
extern crate user_lib;
extern crate alloc;
use alloc::vec;
use user_lib::exit;
use user_lib::{
condvar_create, condvar_signal, condvar_wait, mutex_blocking_create, mutex_lock, mutex_unlock,
};
use user_lib::{sleep, thread_create, waittid};
static mut A: usize = 0;
const CONDVAR_ID: usize = 0;
const MUTEX_ID: usize = 0;
unsafe fn first() -> ! {
sleep(10);
println!("First work, Change A --> 1 and wakeup Second");
mutex_lock(MUTEX_ID);
A = 1;
condvar_signal(CONDVAR_ID);
mutex_unlock(MUTEX_ID);
exit(0)
}
unsafe fn second() -> ! {
println!("Second want to continue,but need to wait A=1");
mutex_lock(MUTEX_ID);
while A == 0 {
println!("Second: A is {}", A);
condvar_wait(CONDVAR_ID, MUTEX_ID);
}
mutex_unlock(MUTEX_ID);
println!("A is {}, Second can work now", A);
exit(0)
}
#[no_mangle]
pub fn main() -> i32 {
// create condvar & mutex
assert_eq!(condvar_create() as usize, CONDVAR_ID);
assert_eq!(mutex_blocking_create() as usize, MUTEX_ID);
// create threads
let threads = vec![
thread_create(first as usize, 0),
thread_create(second as usize, 0),
];
// wait for all threads to complete
for thread in threads.iter() {
waittid(*thread as usize);
}
println!("test_condvar passed!");
0
}

@ -1,45 +0,0 @@
#![no_std]
#![no_main]
#[macro_use]
extern crate user_lib;
extern crate alloc;
use alloc::vec;
use user_lib::{exit, thread_create, waittid};
pub fn thread_a() -> ! {
for _ in 0..1000 {
print!("a");
}
exit(1)
}
pub fn thread_b() -> ! {
for _ in 0..1000 {
print!("b");
}
exit(2)
}
pub fn thread_c() -> ! {
for _ in 0..1000 {
print!("c");
}
exit(3)
}
#[no_mangle]
pub fn main() -> i32 {
let v = vec![
thread_create(thread_a as usize, 0),
thread_create(thread_b as usize, 0),
thread_create(thread_c as usize, 0),
];
for tid in v.iter() {
let exit_code = waittid(*tid as usize);
println!("thread#{} exited with code {}", tid, exit_code);
}
println!("main thread exited.");
0
}

@ -1,44 +0,0 @@
#![no_std]
#![no_main]
#[macro_use]
extern crate user_lib;
extern crate alloc;
use alloc::vec::Vec;
use user_lib::{exit, thread_create, waittid};
struct Argument {
pub ch: char,
pub rc: i32,
}
fn thread_print(arg: *const Argument) -> ! {
let arg = unsafe { &*arg };
for _ in 0..1000 {
print!("{}", arg.ch);
}
exit(arg.rc)
}
#[no_mangle]
pub fn main() -> i32 {
let mut v = Vec::new();
let args = [
Argument { ch: 'a', rc: 1 },
Argument { ch: 'b', rc: 2 },
Argument { ch: 'c', rc: 3 },
];
for arg in args.iter() {
v.push(thread_create(
thread_print as usize,
arg as *const _ as usize,
));
}
for tid in v.iter() {
let exit_code = waittid(*tid as usize);
println!("thread#{} exited with code {}", tid, exit_code);
}
println!("main thread exited.");
0
}

@ -4,138 +4,40 @@
#[macro_use]
extern crate user_lib;
// not in SUCC_TESTS & FAIL_TESTS
// count_lines, infloop, user_shell, usertests
// item of TESTS : app_name(argv_0), argv_1, argv_2, argv_3, exit_code
static SUCC_TESTS: &[(&str, &str, &str, &str, i32)] = &[
("filetest_simple\0", "\0", "\0", "\0", 0),
("cat\0", "filea\0", "\0", "\0", 0),
("cmdline_args\0", "1\0", "2\0", "3\0", 0),
("eisenberg\0", "\0", "\0", "\0", 0),
("exit\0", "\0", "\0", "\0", 0),
("fantastic_text\0", "\0", "\0", "\0", 0),
("forktest_simple\0", "\0", "\0", "\0", 0),
("forktest\0", "\0", "\0", "\0", 0),
("forktest2\0", "\0", "\0", "\0", 0),
("forktree\0", "\0", "\0", "\0", 0),
("hello_world\0", "\0", "\0", "\0", 0),
("huge_write\0", "\0", "\0", "\0", 0),
("matrix\0", "\0", "\0", "\0", 0),
("mpsc_sem\0", "\0", "\0", "\0", 0),
("peterson\0", "\0", "\0", "\0", 0),
("phil_din_mutex\0", "\0", "\0", "\0", 0),
("pipe_large_test\0", "\0", "\0", "\0", 0),
("pipetest\0", "\0", "\0", "\0", 0),
("race_adder_arg\0", "3\0", "\0", "\0", 0),
("race_adder_atomic\0", "\0", "\0", "\0", 0),
("race_adder_mutex_blocking\0", "\0", "\0", "\0", 0),
("race_adder_mutex_spin\0", "\0", "\0", "\0", 0),
("run_pipe_test\0", "\0", "\0", "\0", 0),
("sleep_simple\0", "\0", "\0", "\0", 0),
("sleep\0", "\0", "\0", "\0", 0),
("sleep_simple\0", "\0", "\0", "\0", 0),
("sync_sem\0", "\0", "\0", "\0", 0),
("test_condvar\0", "\0", "\0", "\0", 0),
("threads_arg\0", "\0", "\0", "\0", 0),
("threads\0", "\0", "\0", "\0", 0),
("yield\0", "\0", "\0", "\0", 0),
];
static FAIL_TESTS: &[(&str, &str, &str, &str, i32)] = &[
("stack_overflow\0", "\0", "\0", "\0", -11),
("race_adder_loop\0", "\0", "\0", "\0", -6),
("priv_csr\0", "\0", "\0", "\0", -4),
("priv_inst\0", "\0", "\0", "\0", -4),
("store_fault\0", "\0", "\0", "\0", -11),
("until_timeout\0", "\0", "\0", "\0", -6),
("race_adder\0", "\0", "\0", "\0", -6),
("huge_write_mt\0", "\0", "\0", "\0", -6),
static TESTS: &[&str] = &[
"exit\0",
"fantastic_text\0",
"forktest\0",
"forktest2\0",
"forktest_simple\0",
"hello_world\0",
"matrix\0",
"sleep\0",
"sleep_simple\0",
"stack_overflow\0",
"yield\0",
];
use user_lib::{exec, fork, waitpid};
fn run_tests(tests: &[(&str, &str, &str, &str, i32)]) -> i32 {
let mut pass_num = 0;
let mut arr: [*const u8; 4] = [
core::ptr::null::<u8>(),
core::ptr::null::<u8>(),
core::ptr::null::<u8>(),
core::ptr::null::<u8>(),
];
for test in tests {
println!("Usertests: Running {}", test.0);
arr[0] = test.0.as_ptr();
if test.1 != "\0" {
arr[1] = test.1.as_ptr();
arr[2] = core::ptr::null::<u8>();
arr[3] = core::ptr::null::<u8>();
if test.2 != "\0" {
arr[2] = test.2.as_ptr();
arr[3] = core::ptr::null::<u8>();
if test.3 != "\0" {
arr[3] = test.3.as_ptr();
} else {
arr[3] = core::ptr::null::<u8>();
}
} else {
arr[2] = core::ptr::null::<u8>();
arr[3] = core::ptr::null::<u8>();
}
} else {
arr[1] = core::ptr::null::<u8>();
arr[2] = core::ptr::null::<u8>();
arr[3] = core::ptr::null::<u8>();
}
#[no_mangle]
pub fn main() -> i32 {
for test in TESTS {
println!("Usertests: Running {}", test);
let pid = fork();
if pid == 0 {
exec(test.0, &arr[..]);
exec(*test, &[core::ptr::null::<u8>()]);
panic!("unreachable!");
} else {
let mut exit_code: i32 = Default::default();
let wait_pid = waitpid(pid as usize, &mut exit_code);
assert_eq!(pid, wait_pid);
if exit_code == test.4 {
// summary apps with exit_code
pass_num = pass_num + 1;
}
println!(
"\x1b[32mUsertests: Test {} in Process {} exited with code {}\x1b[0m",
test.0, pid, exit_code
test, pid, exit_code
);
}
}
pass_num
}
#[no_mangle]
pub fn main() -> i32 {
let succ_num = run_tests(SUCC_TESTS);
let err_num = run_tests(FAIL_TESTS);
if succ_num == SUCC_TESTS.len() as i32 && err_num == FAIL_TESTS.len() as i32 {
println!(
"{} of sueecssed apps, {} of failed apps run correctly. \nUsertests passed!",
SUCC_TESTS.len(),
FAIL_TESTS.len()
);
return 0;
}
if succ_num != SUCC_TESTS.len() as i32 {
println!(
"all successed app_num is {} , but only passed {}",
SUCC_TESTS.len(),
succ_num
);
}
if err_num != FAIL_TESTS.len() as i32 {
println!(
"all failed app_num is {} , but only passed {}",
FAIL_TESTS.len(),
err_num
);
}
println!(" Usertests failed!");
return -1;
println!("Usertests passed!");
0
}

@ -2,7 +2,6 @@
#![feature(linkage)]
#![feature(panic_info_message)]
#![feature(alloc_error_handler)]
#![feature(core_intrinsics)]
#[macro_use]
pub mod console;
@ -133,89 +132,112 @@ pub fn waitpid_nb(pid: usize, exit_code: &mut i32) -> isize {
sys_waitpid(pid as isize, exit_code as *mut _)
}
bitflags! {
pub struct SignalFlags: i32 {
const SIGINT = 1 << 2;
const SIGILL = 1 << 4;
const SIGABRT = 1 << 6;
const SIGFPE = 1 << 8;
const SIGSEGV = 1 << 11;
pub fn sleep(period_ms: usize) {
let start = sys_get_time();
while sys_get_time() < start + period_ms as isize {
sys_yield();
}
}
pub fn kill(pid: usize, signal: i32) -> isize {
sys_kill(pid, signal)
/// Action for a signal
#[repr(C)]
#[derive(Debug, Clone, Copy)]
pub struct SignalAction {
pub handler: usize,
pub mask: SignalFlags
}
pub fn sleep(sleep_ms: usize) {
sys_sleep(sleep_ms);
}
pub fn thread_create(entry: usize, arg: usize) -> isize {
sys_thread_create(entry, arg)
}
pub fn gettid() -> isize {
sys_gettid()
}
pub fn waittid(tid: usize) -> isize {
loop {
match sys_waittid(tid) {
-2 => {
yield_();
}
exit_code => return exit_code,
impl Default for SignalAction {
fn default() -> Self {
Self {
handler: 0,
mask: SignalFlags::empty()
}
}
}
pub fn mutex_create() -> isize {
sys_mutex_create(false)
}
pub fn mutex_blocking_create() -> isize {
sys_mutex_create(true)
}
pub fn mutex_lock(mutex_id: usize) {
sys_mutex_lock(mutex_id);
}
pub fn mutex_unlock(mutex_id: usize) {
sys_mutex_unlock(mutex_id);
}
pub fn semaphore_create(res_count: usize) -> isize {
sys_semaphore_create(res_count)
}
pub fn semaphore_up(sem_id: usize) {
sys_semaphore_up(sem_id);
}
pub fn semaphore_down(sem_id: usize) {
sys_semaphore_down(sem_id);
}
pub fn condvar_create() -> isize {
sys_condvar_create(0)
}
pub fn condvar_signal(condvar_id: usize) {
sys_condvar_signal(condvar_id);
pub const SIGDEF :i32 = 1; // Default signal handling
pub const SIGHUP :i32 = 1;
pub const SIGINT :i32 = 2;
pub const SIGQUIT :i32 = 3;
pub const SIGILL :i32 = 4;
pub const SIGTRAP :i32 = 5;
pub const SIGABRT :i32 = 6;
pub const SIGBUS :i32 = 7;
pub const SIGFPE :i32 = 8;
pub const SIGKILL :i32 = 9;
pub const SIGUSR1 :i32 = 10;
pub const SIGSEGV :i32 = 11;
pub const SIGUSR2 :i32 = 12;
pub const SIGPIPE :i32 = 13;
pub const SIGALRM :i32 = 14;
pub const SIGTERM :i32 = 15;
pub const SIGSTKFLT :i32 = 16;
pub const SIGCHLD :i32 = 17;
pub const SIGCONT :i32 = 18;
pub const SIGSTOP :i32 = 19;
pub const SIGTSTP :i32 = 20;
pub const SIGTTIN :i32 = 21;
pub const SIGTTOU :i32 = 22;
pub const SIGURG :i32 = 23;
pub const SIGXCPU :i32 = 24;
pub const SIGXFSZ :i32 = 25;
pub const SIGVTALRM :i32 = 26;
pub const SIGPROF :i32 = 27;
pub const SIGWINCH :i32 = 28;
pub const SIGIO :i32 = 29;
pub const SIGPWR :i32 = 30;
pub const SIGSYS :i32 = 31;
bitflags! {
pub struct SignalFlags: i32 {
const SIGDEF = 1; // Default signal handling
const SIGHUP = 1 << 1;
const SIGINT = 1 << 2;
const SIGQUIT = 1 << 3;
const SIGILL = 1 << 4;
const SIGTRAP = 1 << 5;
const SIGABRT = 1 << 6;
const SIGBUS = 1 << 7;
const SIGFPE = 1 << 8;
const SIGKILL = 1 << 9;
const SIGUSR1 = 1 << 10;
const SIGSEGV = 1 << 11;
const SIGUSR2 = 1 << 12;
const SIGPIPE = 1 << 13;
const SIGALRM = 1 << 14;
const SIGTERM = 1 << 15;
const SIGSTKFLT = 1 << 16;
const SIGCHLD = 1 << 17;
const SIGCONT = 1 << 18;
const SIGSTOP = 1 << 19;
const SIGTSTP = 1 << 20;
const SIGTTIN = 1 << 21;
const SIGTTOU = 1 << 22;
const SIGURG = 1 << 23;
const SIGXCPU = 1 << 24;
const SIGXFSZ = 1 << 25;
const SIGVTALRM = 1 << 26;
const SIGPROF = 1 << 27;
const SIGWINCH = 1 << 28;
const SIGIO = 1 << 29;
const SIGPWR = 1 << 30;
const SIGSYS = 1 << 31;
}
}
pub fn condvar_wait(condvar_id: usize, mutex_id: usize) {
sys_condvar_wait(condvar_id, mutex_id);
pub fn kill(pid: usize, signal: i32) -> isize {
sys_kill(pid, signal)
}
#[macro_export]
macro_rules! vstore {
($var_ref: expr, $value: expr) => {
unsafe { core::intrinsics::volatile_store($var_ref as *const _ as _, $value) }
};
pub fn sigaction(signum: i32, action: *const SignalAction, old_action: *const SignalAction) -> isize {
sys_sigaction(signum, action, old_action)
}
#[macro_export]
macro_rules! vload {
($var_ref: expr) => {
unsafe { core::intrinsics::volatile_load($var_ref as *const _ as _) }
};
pub fn sigprocmask(mask: u32) -> isize {
sys_sigprocmask(mask)
}
#[macro_export]
macro_rules! memory_fence {
() => {
core::sync::atomic::fence(core::sync::atomic::Ordering::SeqCst)
};
pub fn sigreturn() -> isize {
sys_sigreturn()
}

@ -1,3 +1,7 @@
use core::arch::asm;
use crate::SignalAction;
const SYSCALL_DUP: usize = 24;
const SYSCALL_OPEN: usize = 56;
const SYSCALL_CLOSE: usize = 57;
@ -5,31 +9,21 @@ const SYSCALL_PIPE: usize = 59;
const SYSCALL_READ: usize = 63;
const SYSCALL_WRITE: usize = 64;
const SYSCALL_EXIT: usize = 93;
const SYSCALL_SLEEP: usize = 101;
const SYSCALL_YIELD: usize = 124;
const SYSCALL_KILL: usize = 129;
const SYSCALL_SIGACTION: usize = 134;
const SYSCALL_SIGPROCMASK: usize = 135;
const SYSCALL_SIGRETURN: usize = 139;
const SYSCALL_GET_TIME: usize = 169;
const SYSCALL_GETPID: usize = 172;
const SYSCALL_FORK: usize = 220;
const SYSCALL_EXEC: usize = 221;
const SYSCALL_WAITPID: usize = 260;
const SYSCALL_THREAD_CREATE: usize = 1000;
const SYSCALL_GETTID: usize = 1001;
const SYSCALL_WAITTID: usize = 1002;
const SYSCALL_MUTEX_CREATE: usize = 1010;
const SYSCALL_MUTEX_LOCK: usize = 1011;
const SYSCALL_MUTEX_UNLOCK: usize = 1012;
const SYSCALL_SEMAPHORE_CREATE: usize = 1020;
const SYSCALL_SEMAPHORE_UP: usize = 1021;
const SYSCALL_SEMAPHORE_DOWN: usize = 1022;
const SYSCALL_CONDVAR_CREATE: usize = 1030;
const SYSCALL_CONDVAR_SIGNAL: usize = 1031;
const SYSCALL_CONDVAR_WAIT: usize = 1032;
fn syscall(id: usize, args: [usize; 3]) -> isize {
let mut ret: isize;
unsafe {
core::arch::asm!(
asm!(
"ecall",
inlateout("x10") args[0] => ret,
in("x11") args[1],
@ -72,10 +66,6 @@ pub fn sys_exit(exit_code: i32) -> ! {
panic!("sys_exit never returns!");
}
pub fn sys_sleep(sleep_ms: usize) -> isize {
syscall(SYSCALL_SLEEP, [sleep_ms, 0, 0])
}
pub fn sys_yield() -> isize {
syscall(SYSCALL_YIELD, [0, 0, 0])
}
@ -107,50 +97,14 @@ pub fn sys_waitpid(pid: isize, exit_code: *mut i32) -> isize {
syscall(SYSCALL_WAITPID, [pid as usize, exit_code as usize, 0])
}
pub fn sys_thread_create(entry: usize, arg: usize) -> isize {
syscall(SYSCALL_THREAD_CREATE, [entry, arg, 0])
}
pub fn sys_gettid() -> isize {
syscall(SYSCALL_GETTID, [0; 3])
}
pub fn sys_waittid(tid: usize) -> isize {
syscall(SYSCALL_WAITTID, [tid, 0, 0])
}
pub fn sys_mutex_create(blocking: bool) -> isize {
syscall(SYSCALL_MUTEX_CREATE, [blocking as usize, 0, 0])
}
pub fn sys_mutex_lock(id: usize) -> isize {
syscall(SYSCALL_MUTEX_LOCK, [id, 0, 0])
}
pub fn sys_mutex_unlock(id: usize) -> isize {
syscall(SYSCALL_MUTEX_UNLOCK, [id, 0, 0])
}
pub fn sys_semaphore_create(res_count: usize) -> isize {
syscall(SYSCALL_SEMAPHORE_CREATE, [res_count, 0, 0])
}
pub fn sys_semaphore_up(sem_id: usize) -> isize {
syscall(SYSCALL_SEMAPHORE_UP, [sem_id, 0, 0])
}
pub fn sys_semaphore_down(sem_id: usize) -> isize {
syscall(SYSCALL_SEMAPHORE_DOWN, [sem_id, 0, 0])
}
pub fn sys_condvar_create(_arg: usize) -> isize {
syscall(SYSCALL_CONDVAR_CREATE, [_arg, 0, 0])
pub fn sys_sigaction(signum: i32, action: *const SignalAction, old_action: *const SignalAction) -> isize {
syscall(SYSCALL_SIGACTION, [signum as usize , action as usize, old_action as usize])
}
pub fn sys_condvar_signal(condvar_id: usize) -> isize {
syscall(SYSCALL_CONDVAR_SIGNAL, [condvar_id, 0, 0])
pub fn sys_sigprocmask(mask: u32) -> isize {
syscall(SYSCALL_SIGPROCMASK, [mask as usize, 0, 0])
}
pub fn sys_condvar_wait(condvar_id: usize, mutex_id: usize) -> isize {
syscall(SYSCALL_CONDVAR_WAIT, [condvar_id, mutex_id, 0])
pub fn sys_sigreturn() -> isize {
syscall(SYSCALL_SIGRETURN, [0, 0, 0])
}

Loading…
Cancel
Save