Compare commits

...

233 Commits
ch6 ... main

Author SHA1 Message Date
Yu Chen 21bae8b034 add cargo fmt in Makefile, and exec make fmt
3 years ago
Yu Chen a773a977e7 udpate README
3 years ago
chyyuu d52256baa9
Merge pull request #78 from CL8192/forktree-fix
3 years ago
cl8192 7c83dd49e7 wait child exit in forktree
3 years ago
Yu Chen 9a95c7dcb4 update usr apps: usertests.rs cat.rs. now all apps in usertests can run correctly
3 years ago
chyyuu cc10cd10eb
Merge pull request #77 from CL8192/dev
3 years ago
yuoo655 b732da242d initiatively exit child in forktree
3 years ago
Yu Chen ca997784ab add comments for qemu-exit
3 years ago
Yu Chen a85cb98743 add VIRT_TEST support for qemu exit with exit_code, for CI autotest
3 years ago
Yu Chen 31a2873a61 update rustsbi-qemu 'commit id:c1761f2' :: setup PMP addr[base:0x0010_0000, len: 0x0000_2000] for VIRT_TEST/RTC
3 years ago
Yu Chen 1371aab236 add more apps for test
3 years ago
chyyuu e615ae4483
Merge pull request #45 from DeathWish5/main
3 years ago
chyyuu 18d4a89ab7
Merge branch 'main' into main
3 years ago
Yu Chen c234d1417b fix bug in sbi_shutdown
3 years ago
Yu Chen cf00c3ae5d update github CI for autotest
3 years ago
chyyuu 87cd106b67
Merge pull request #75 from CL8192/main
3 years ago
chyyuu 3d5e9ed9e6
Merge branch 'main' into main
3 years ago
chyyuu 2bd092dd64
Merge pull request #74 from YdrMaster/main
3 years ago
yuoo655 07675fcd00 add ci tests update usertests
3 years ago
YdrMaster c9a288c110 ci: cache qemu
3 years ago
Yu Chen 8e73480c99 cargo fmt
3 years ago
Yu Chen 8657b656e5 rust-toolchain nightly-2020-04-11
3 years ago
DeathWish5 1afa429e93 feat: CI run tests
3 years ago
Yifan Wu 964445e59c Fix #71.
3 years ago
Yifan Wu df36cbe657
Merge pull request #69 from wei-huan/main
3 years ago
RunOS 9d2690f8d9 virtaddr -> usize high 256GB addrspace bug fix
3 years ago
chyyuu a8e4c13e99
Merge pull request #65 from cuishuang/main
3 years ago
cuishuang a70a983497 fix some typos
3 years ago
Yifan Wu ca1d7a07b8 Bump Rust to nightly-2022-04-11 && support debugging in release mode
3 years ago
Yu Chen 3b7c4b1cdc update README for debug info of OS
3 years ago
Yu Chen 91c29d0b70 update CI for build-doc
3 years ago
Yu Chen a61d34e2b1 update CI for build api doc
3 years ago
Yu Chen 858334bc02 update CI for build api doc
3 years ago
Yu Chen b6978bf6ec update index.html
3 years ago
Yu Chen aec0a6ebe5 Merge branch 'main' of github.com:rcore-os/rCore-Tutorial-v3 into main
3 years ago
Yu Chen 34e1a3def8 fix typo in CI for build api doc
3 years ago
Yu Chen d6a3de2510 update CI for build api doc
3 years ago
Yu Chen 3a44decf58 add index.html
3 years ago
Yu Chen 923638023d add CI for build-doc
3 years ago
Yu Chen c009012d85 Merge branch 'ch9' into main
3 years ago
Yifan Wu 21d57c1396 use exclusive_session to eliminate some explicit drops.
3 years ago
Yifan Wu 334d868a5c We should disable sie before trapping back to user.
3 years ago
Yifan Wu fbe8e39b38 Still a lot of bugs :(
3 years ago
Yifan Wu 53034d7c33 Add ns16550a
3 years ago
Yifan Wu d3bd19867c Use latest virtio-drivers && add huge_write_mt but it cannot work now
3 years ago
Yifan Wu 6ef566faac IRQ-based VirtIOBlk Access. Plz wait for the virtio-drivers crate to be updated.
3 years ago
Yifan Wu f0f7f6fcaa Update rustsbi-qemu
3 years ago
Yifan Wu 89b9d7c161 Merge recent updates from ch8
3 years ago
Yifan Wu e6b6251979 Update README.md
3 years ago
Yifan Wu 61152471b7 Add boards/ && clippy
3 years ago
Yifan Wu be2ed8fa37 Update README.md
3 years ago
Yifan Wu 6f09af2c0f Support signal mechanism for ch8(only works on signal-thread apps)
3 years ago
Yifan Wu 48bdefe6b8 Update README.md
3 years ago
Yifan Wu 29d6d26644 Fix cat && add count_lines
3 years ago
Yifan Wu 1c55663478 Small Fix && cargo fmt
3 years ago
Yifan Wu 740730e7f7 Merge recent update from ch7 && cargo clippy
3 years ago
Yifan Wu f65451bc72 Update README.md
3 years ago
Yifan Wu e8686526bb Ref asm&global_asm from core::arch.
3 years ago
Yifan Wu 514c110a28 Bump Rust to nightly-2022-01-19
3 years ago
Yifan Wu 40f37501af Maximum concurrent processes from 40/35->30.
3 years ago
Yifan Wu 24e530935f Update .gitignore
3 years ago
Yifan Wu c358424fae Update README.md
4 years ago
Yifan Wu 2ac621972c Merge recent updates from ch7.
4 years ago
Yifan Wu aa104ecd54 Update README.md.
4 years ago
Yifan Wu d81560a492 Update README.md.
4 years ago
Yifan Wu cba8d9e6d8 Now PageTable::unmap calls PageTable::find_pte instead of PageTable::find_pte_create.
4 years ago
Yifan Wu 72d6b38fca Now PageTable::unmap calls PageTable::find_pte instead of PageTable::find_pte_create.
4 years ago
Yifan Wu cf6d905ac4 Kernel cannot dump now.
4 years ago
Yifan Wu abd9d361e4 Bump to rust nightly-2022-01-01, feature global_asm,asm->stable
4 years ago
DeathWish5 045c47e4ef user: add critical test for software-synchronous tests
4 years ago
DeathWish5 97fdd4f2a2 Merge branch 'main' of https://github.com/rcore-os/rCore-Tutorial-v3 into main
4 years ago
DeathWish5 22db3123d8 user: add peterson algorithm and Eisenberg & McGuire algorithm
4 years ago
Yifan Wu f6b210adbe
Update README.md
4 years ago
Yifan Wu 760615095e
Update README.md
4 years ago
Yifan Wu 06243fac76 Bump to Rust nightly 2021-12-15
4 years ago
Yu Chen 713e78ea91 add condvar in kernel and app
4 years ago
Yu Chen 1c0bbf4404 add user app: sync_sem.rs
4 years ago
Yu Chen 90796450fe fix typo of sys_semaphore_create
4 years ago
Yifan Wu 4ad64e83f6 MutexBlocking works correctly.
4 years ago
Yu Chen 3dc6d9c97c add user app: race_adder with arg
4 years ago
Yifan Wu b77b108a28 Update os/Makefile, rm ... -f -> rm -f ...
4 years ago
Yu Chen 1baf177f9e update .gitignore README.md dev-env-info.md
4 years ago
Yu Chen a3f9b5fea9 update .gitignore
4 years ago
Yu Chen 11a389592a update README.md, dev-env-info.md
4 years ago
Yu Chen 7caf43bbbf add setenv.sh
4 years ago
Yu Chen bb5dca2158 update README.md, dev-env-info.md
4 years ago
Yu Chen 3d2909e990 Merge branch 'ch8' into main
4 years ago
Yifan Wu 2041a7c0d4 Now construction of PA/VA only uses 56/39 bits.
4 years ago
Yifan Wu cd6754a7df rust->nightly-2021-10-15,cargo-binutils->0.3.3
4 years ago
Yifan Wu 5753a09366 Implement mpsc using semaphores.
4 years ago
Yifan Wu aedd7f5612 Add a solution of Philosopher dining problem using Mutex with an illustration.
4 years ago
Yifan Wu b851f8d743 Create threads with a argument. See bin/threads_arg.rs
4 years ago
Yifan Wu 8adfc90db9 Implement sleep using blocking & BinaryHeap.
4 years ago
Yifan Wu 7225254d8a Add MutexBlocking.
4 years ago
Yifan Wu b0fad5aca3 Add MutexSpin and several syscalls.
4 years ago
Yifan Wu 43c6b7cf01 Add race_adder_{atomic,loop}.
4 years ago
Yifan Wu 136e26ae6c User base from 0x0->0x10000; user image size limit from 128MB->16MB; trigger race condition on k210
4 years ago
Yifan Wu 24b3c82b8a Stage2: multiple user threads based on uniprocessor, see new added test race_adder and threads.
4 years ago
Yifan Wu 1493ec9459 Stage1 clear! All applications work but now they are based on threads.
4 years ago
Yifan Wu c599a31dd0 Debugging sys_exec :(
4 years ago
Yifan Wu 6693de9611 Update rustc to newest
4 years ago
Yifan Wu 26bbec6320 Working on ch8
4 years ago
Yifan Wu 91a758d657 Rustc->nightly2021-01-30
4 years ago
Yifan Wu 03ea339e58
Update README.md
4 years ago
Yifan Wu 638eb8666a Update rustc && rustsbi-k210
4 years ago
Yifan Wu bf69560f9b Update rustsbi; huge_write writes 1MiB
4 years ago
Chen 940073e9f3
Merge pull request #26 from felixonmars/patch-1
4 years ago
Felix Yan 03151ac124
Correct typos in drivers/block/sdcard.rs
4 years ago
Yu Chen 46900cc9af update to rustc 1.56.0-nightly (08095fc1f 2021-07-26)
4 years ago
Yu Chen e68f261ed6 update to rustc 1.56.0-nightly (08095fc1f 2021-07-26)
4 years ago
Yifan Wu cf9218113f Update 2021-07-21
4 years ago
Yifan Wu 315e61da1a Rm spin::Mutex except for easy-fs & add new test huge_write & flush cache to disk after a write transaction
4 years ago
Yifan Wu 1590923666 Update README.md
4 years ago
Yifan Wu aaed6006aa Update progress 2021-07-19
4 years ago
Yifan Wu 95431917ed Update README.md
4 years ago
Yifan Wu b5f7fb6c45 Update README.md
4 years ago
Yifan Wu f04110e6e3 Update README.md
4 years ago
Yifan Wu a7b981b14d
Update README.md
4 years ago
Yifan Wu 3a6e4e38f2
Update README.md
4 years ago
Yifan Wu 81559f70b9 Merge branch 'dev' into main
4 years ago
Yifan Wu 0a8bd2c3fd Fixed a bug that the efs lock was not be held correctly
4 years ago
Yifan Wu 01098eb113 Fixed a bug that the efs lock was not be held correctly
4 years ago
Yifan Wu e7f120bab3 Downgrade cargo-binutils to 0.2.0
4 years ago
Yifan Wu 7a36cdb77f Bump rustsbi to 0.2.0-alpha.1[81d53d8]
4 years ago
Yifan Wu f7ed29756c Link small sections in linker
4 years ago
Yifan Wu bfa6a80732 Downgrade cargo-binutils to 0.2.0
4 years ago
Yifan Wu ff69985d79 Downgrade cargo-binutils to 0.2.0
4 years ago
Yifan Wu a57d470edc Bump rustsbi to 0.2.0-alpha.1[81d53d8]
4 years ago
Yifan Wu 2e76499676 Bump rustsbi to 0.2.0-alpha.1[81d53d8]
4 years ago
Yifan Wu 91d4d6d40c Link small sections in linker
4 years ago
Yifan Wu a09429b32c Link small sections in linker
4 years ago
Yifan Wu 60477da9be
Merge pull request #9 from Spxg/main
4 years ago
Spxg a1cda4aa59 panic_handler: update msg format and add column location
4 years ago
Yifan Wu d302a0d616 Merge branch 'dev' into main
4 years ago
Yifan Wu 3554e20dc6 Do not fetch tools when running on qemu.
4 years ago
Yifan Wu d57a160b32 Do not fetch tools when running on qemu.
4 years ago
Yifan Wu 818363f2ca Fix qemu mmio range
4 years ago
Yifan Wu 195816ce2c Fix qemu mmio range
4 years ago
Yifan Wu 613f77c5a4 Remove unused code.
4 years ago
Yifan Wu 230e4442d0 Remove unused code.
4 years ago
Yifan Wu 6298f57a87 Remove DirentBytes
4 years ago
Yifan Wu a589179adc Remove DirentBytes
4 years ago
Yifan Wu 1e2e83e886 Remove unused code.
4 years ago
Yifan Wu 3e47a0dbee Remove unused code.
4 years ago
Yifan Wu a43dbc4e34 Fix k210 alignment issue when push cmdargs when sys_exec
4 years ago
Yifan Wu dd2be93ef0 Fix k210 alignment issue when push cmdargs when sys_exec
4 years ago
Yifan Wu e643af7937 Fix k210 alignment issue when push cmdargs when sys_exec
4 years ago
Yifan Wu e55c5200c5 Merge dev: ch7 updates
4 years ago
Yifan Wu 67372ac84d Merge updates from ch7
4 years ago
Yifan Wu cfa3819bee Add Ubuntu18.04 docker
4 years ago
Yifan Wu 33395156f9 Add Ubuntu18.04 docker
4 years ago
Yifan Wu 3e1c12b6a1 Add sys_dup && support input/output redirection in user_shell
4 years ago
Yifan Wu a7346c96b4 DiskInode sz->128bytes && user heap -> 32KiB
4 years ago
Yifan Wu 8a1c96d963 Add tool: cat
4 years ago
Yifan Wu 01280fc833 Support indirect2 in easy-fs::layout::DiskInode
4 years ago
Yifan Wu 90d351bfe8 Support cmdline_args when sys_exec.
4 years ago
Yifan Wu 4668911483
Merge pull request #6 from ZhangHanDong/main
4 years ago
Yifan Wu 9196963e44
Update Makefile
4 years ago
blackanger 3f5308f46c modify Makefile for Docker
4 years ago
blackanger e0a3933b1c add Dockerfile
4 years ago
Yifan Wu 35cc3d6e2f Fix overflow bug when ceiling va
4 years ago
Yifan Wu b8f1db4aa3 Fix overflow bug when ceiling va
4 years ago
Yifan Wu 1346fb1a1f Refactor easy-fs
4 years ago
Yifan Wu 12c6c53af5 Close all pipes in pipetest.
4 years ago
Yifan Wu e8a0682cf8 Refactor easy-fs
4 years ago
Yifan Wu 40508d68ab Clean easy-fs-fuse
4 years ago
Yifan Wu 920d077a66 Refactor easy-fs.
4 years ago
Yifan Wu 8b27976d23
Merge pull request #5 from Spxg/main
4 years ago
Spxg 84a55c17c4 VirtAddr: fix add with overflow when debug mode
4 years ago
Yifan Wu c6a262d215 Close all pipes in pipetest.
4 years ago
Yifan Wu 69933e2985 Close all pipes in pipetest.
4 years ago
Yifan Wu 00084f5165 Merge branch 'dev' into main
4 years ago
Yifan Wu 87743bac4d Merge branch 'ch7' into dev
4 years ago
Yifan Wu 04114ad949 Remove Any Trait of File
4 years ago
Yifan Wu a5c4f3228e Merge branch 'dev' into main
4 years ago
Yifan Wu 1b6f2c4c1e Fix lock uses in ch5
4 years ago
Yifan Wu 9b65abcfa8 Fix lock uses in ch5
4 years ago
Yifan Wu caac1beb0a Mutex -> RefCell in Processor.
4 years ago
Yifan Wu fd75ac027a Mutex -> RefCell in Processor.
4 years ago
Yifan Wu d97b0a20ab Replace TCB.inner.block with TCB::acquire_inner_lock
4 years ago
Yifan Wu e04394af56 Replace TCB.inner.block with TCB::acquire_inner_lock
4 years ago
Yifan Wu ea4222bed0 Merge branch 'dev' into main
4 years ago
Yifan Wu b28d94b226 Fix other usertests: xstate -> exit_code
4 years ago
Yifan Wu b6626d534b Fix other usertests: xstate -> exit_code
4 years ago
Yifan Wu 0d9024b5bd Merge branch 'dev' into main
4 years ago
Yifan Wu b659f10d22 Fix user_shell
4 years ago
Yifan Wu b8240fac5a Fix user_shell
4 years ago
Yifan Wu 29d02e7442 Small Fix.
4 years ago
Yifan Wu 4b6bd7deaa Merge branch 'dev' into main
4 years ago
Yifan Wu 1cc75ded25 Move kflash.py out of proj.
4 years ago
Yifan Wu dddd04b683 Move kflash.py out of proj.
4 years ago
Yifan Wu c01b3289c6 Merge branch 'dev' into main
4 years ago
Yifan Wu eb5ef8e956 Bump rustsbi to 0.1.1 && make config of qemu/k210 different
4 years ago
Yifan Wu 33373aa20d Bump rustsbi to 0.1.1 && make config of qemu/k210 different
4 years ago
Yifan Wu 5afed009c0 Merge branch 'dev' into main
4 years ago
Yifan Wu 07467287da Fix exit_code in user
4 years ago
Yifan Wu a6c7b52283 Fix exit_code in user
4 years ago
Yifan Wu 654f6eb959 Update os/Makefile && Update rust to 2021-01-30
4 years ago
Yifan Wu 18da8a3879 Update os/Makefile && Update rust to 2021-01-30
4 years ago
Yifan Wu 4f7db8b92e Update os/Makefile && Update rust to 2021-01-30
4 years ago
Yifan Wu 6267c5d922 Do not clone KERNEL_SPACE in mm::init
4 years ago
Yifan Wu 9d7882a73d Do not clone KERNEL_SPACE in mm::init
4 years ago
Yifan Wu 9772373743 Fix os/Makefile: Support macOS
5 years ago
Yifan Wu e588d40d70 Fix os/Makefile: Support macOS
5 years ago
Yifan Wu c14392ec60
Merge pull request #3 from cyyself/main
5 years ago
Yangyu Chen c763a3be96 add env check, write sdcard check and change dd bs to fit macOS
5 years ago
Yu Chen c65ce846ce rust-toochain --> nightly
5 years ago
Yu Chen e9453d7834 rust-toochain --> nightly
5 years ago
Yu Chen 3b8b42ec4c rust-toochain --> nightly
5 years ago
Yifan Wu 05d34106ce Update wyfcyx/rustsbi fb6af33f.
5 years ago
Yifan Wu 1a7261d86d Move some variable name to task_cx to task_cx_ptr2(ch5 ver).
5 years ago
Yifan Wu dbe56c1362 Move some variable name to task_cx to task_cx_ptr2(ch5 ver).
5 years ago
Yifan Wu 12747d71b4 Remove meaningless sstatus::set_sie() when initializing.
5 years ago
Yifan Wu fd638d5388 Remove meaningless sstatus::set_sie() when initializing.
5 years ago
Yifan Wu 208b827b5c Add env.
5 years ago
Yifan Wu b674fd5a77 Remove fs.img.
5 years ago
Yifan Wu fdab87d2ed Update Rust environment initialization.
5 years ago
Yifan Wu 7c0d66c58f Remove some warnings.
5 years ago
Yifan Wu 606abbe6a1 Simple filetest passed on qemu/k210.
5 years ago
Yifan Wu 1bafe9615f Fix virtio_phys_to_virt. Now we can load app from disk on qemu/k210!
5 years ago
Yifan Wu ae9eecf97b Load app from sdcard on K210, but panicked on qemu.
5 years ago
Yifan Wu ea515323d3 Import easy-fs in os && change easy-fs to no_std mode.
5 years ago
Yifan Wu ecccea65a0 Change single file limit from 70KiB to 94KiB & pack apps and list them.
5 years ago
Yifan Wu d13380603b Large/small file I/O test passed.
5 years ago
Yifan Wu dbaa7c8c6c Write Hello, world! to a file and read it!
5 years ago
Yifan Wu 5ea07840ce Eliminate unuseful block reads/writes.
5 years ago
Yifan Wu c351635e2f Create two files and list them.
5 years ago
Yifan Wu 459efec595 We need BlockCache.
5 years ago
Yifan Wu af02e68d19 create & open efs.
5 years ago
Yifan Wu 84900e8b94 Remove block device test.
5 years ago
Yifan Wu 63bccc4e8f Now sleep test sleeps 5secs.
5 years ago
Yifan Wu 914f042617 Add sdcard driver based on k210-rust crates && adjust clock freq.
5 years ago
Yifan Wu f754326d0a virtio-blk worked.
5 years ago

@ -0,0 +1,66 @@
name: Build Rust Doc And Run tests
on: [push]
env:
CARGO_TERM_COLOR: always
jobs:
build-doc:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: nightly-2022-04-11
components: rust-src, llvm-tools-preview
target: riscv64gc-unknown-none-elf
- name: Build doc
run: cd os && cargo doc --no-deps --verbose
- name: Deploy to Github Pages
uses: peaceiris/actions-gh-pages@v3
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
publish_dir: ./os/target/riscv64gc-unknown-none-elf/doc
destination_dir: ${{ github.ref_name }}
run-tests:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: nightly-2022-04-11
components: rust-src, llvm-tools-preview
target: riscv64gc-unknown-none-elf
- uses: actions-rs/install@v0.1
with:
crate: cargo-binutils
version: latest
use-tool-cache: true
- name: Cache QEMU
uses: actions/cache@v3
with:
path: qemu-7.0.0
key: qemu-7.0.0-x86_64-riscv64
- name: Install QEMU
run: |
sudo apt-get update
sudo apt-get install ninja-build -y
if [ ! -d qemu-7.0.0 ]; then
wget https://download.qemu.org/qemu-7.0.0.tar.xz
tar -xf qemu-7.0.0.tar.xz
cd qemu-7.0.0
./configure --target-list=riscv64-softmmu
make -j
else
cd qemu-7.0.0
fi
sudo make install
qemu-system-riscv64 --version
- name: Run usertests
run: cd os && make run TEST=1
timeout-minutes: 10

19
.gitignore vendored

@ -1,8 +1,13 @@
.idea/*
os/target/*
os/.idea/*
.*/*
!.github/*
!.vscode/settings.json
**/target/
**/Cargo.lock
os/src/link_app.S
os/Cargo.lock
user/target/*
user/.idea/*
user/Cargo.lock
os/src/linker.ld
os/last-*
os/.gdb_history
tools/
pushall.sh

@ -0,0 +1,10 @@
{
// Prevent "can't find crate for `test`" error on no_std
// Ref: https://github.com/rust-lang/vscode-rust/issues/729
// For vscode-rust plugin users:
"rust.target": "riscv64gc-unknown-none-elf",
"rust.all_targets": false,
// For Rust Analyzer plugin users:
"rust-analyzer.cargo.target": "riscv64gc-unknown-none-elf",
"rust-analyzer.checkOnSave.allTargets": false
}

@ -0,0 +1,40 @@
FROM ubuntu:18.04
LABEL maintainer="dinghao188" \
version="1.1" \
description="ubuntu 18.04 with tools for tsinghua's rCore-Tutorial-V3"
#install some deps
RUN set -x \
&& apt-get update \
&& apt-get install -y curl wget autoconf automake autotools-dev curl libmpc-dev libmpfr-dev libgmp-dev \
gawk build-essential bison flex texinfo gperf libtool patchutils bc xz-utils \
zlib1g-dev libexpat-dev pkg-config libglib2.0-dev libpixman-1-dev git tmux python3
#install rust and qemu
RUN set -x; \
RUSTUP='/root/rustup.sh' \
&& cd $HOME \
#install rust
&& curl https://sh.rustup.rs -sSf > $RUSTUP && chmod +x $RUSTUP \
&& $RUSTUP -y --default-toolchain nightly --profile minimal \
#compile qemu
&& wget https://ftp.osuosl.org/pub/blfs/conglomeration/qemu/qemu-5.0.0.tar.xz \
&& tar xvJf qemu-5.0.0.tar.xz \
&& cd qemu-5.0.0 \
&& ./configure --target-list=riscv64-softmmu,riscv64-linux-user \
&& make -j$(nproc) install \
&& cd $HOME && rm -rf qemu-5.0.0 qemu-5.0.0.tar.xz
#for chinese network
RUN set -x; \
APT_CONF='/etc/apt/sources.list'; \
CARGO_CONF='/root/.cargo/config'; \
BASHRC='/root/.bashrc' \
&& echo 'export RUSTUP_DIST_SERVER=https://mirrors.ustc.edu.cn/rust-static' >> $BASHRC \
&& echo 'export RUSTUP_UPDATE_ROOT=https://mirrors.ustc.edu.cn/rust-static/rustup' >> $BASHRC \
&& touch $CARGO_CONF \
&& echo '[source.crates-io]' > $CARGO_CONF \
&& echo "replace-with = 'ustc'" >> $CARGO_CONF \
&& echo '[source.ustc]' >> $CARGO_CONF \
&& echo 'registry = "git://mirrors.ustc.edu.cn/crates.io-index"' >> $CARGO_CONF

@ -0,0 +1,10 @@
DOCKER_NAME ?= dinghao188/rcore-tutorial
.PHONY: docker build_docker
docker:
docker run --rm -it --mount type=bind,source=$(shell pwd),destination=/mnt ${DOCKER_NAME}
build_docker:
docker build -t ${DOCKER_NAME} .
fmt:
cd easy-fs; cargo fmt; cd ../easy-fs-fuse cargo fmt; cd ../os ; cargo fmt; cd ../user; cargo fmt; cd ..

@ -1,2 +1,300 @@
# rCore-Tutorial-v3
rCore-Tutorial version 3.
rCore-Tutorial version 3.5. See the [Documentation in Chinese](https://rcore-os.github.io/rCore-Tutorial-Book-v3/).
rCore-Tutorial API Docs. See the [API Docs of Ten OSes ](#OS-API-DOCS)
If you don't know Rust Language and try to learn it, please visit [Rust Learning Resources](https://github.com/rcore-os/rCore/wiki/study-resource-of-system-programming-in-RUST)
Official QQ group number: 735045051
## news
- 25/01/2022: Version 3.6.0 is on the way! Now we directly update the code on chX branches, please periodically check if there are any updates.
## Overview
This project aims to show how to write an **Unix-like OS** running on **RISC-V** platforms **from scratch** in **[Rust](https://www.rust-lang.org/)** for **beginners** without any background knowledge about **computer architectures, assembly languages or operating systems**.
## Features
* Platform supported: `qemu-system-riscv64` simulator or dev boards based on [Kendryte K210 SoC](https://canaan.io/product/kendryteai) such as [Maix Dock](https://www.seeedstudio.com/Sipeed-MAIX-Dock-p-4815.html)
* OS
* concurrency of multiple processes each of which contains mutiple native threads
* preemptive scheduling(Round-Robin algorithm)
* dynamic memory management in kernel
* virtual memory
* a simple file system with a block cache
* an interactive shell in the userspace
* **only 4K+ LoC**
* [A detailed documentation in Chinese](https://rcore-os.github.io/rCore-Tutorial-Book-v3/) in spite of the lack of comments in the code(English version is not available at present)
## Prerequisites
### Install Rust
See [official guide](https://www.rust-lang.org/tools/install).
Install some tools:
```sh
$ rustup target add riscv64gc-unknown-none-elf
$ cargo install cargo-binutils --vers =0.3.3
$ rustup component add llvm-tools-preview
$ rustup component add rust-src
```
### Install Qemu
Here we manually compile and install Qemu 5.0.0. For example, on Ubuntu 18.04:
```sh
# install dependency packages
$ sudo apt install autoconf automake autotools-dev curl libmpc-dev libmpfr-dev libgmp-dev \
gawk build-essential bison flex texinfo gperf libtool patchutils bc \
zlib1g-dev libexpat-dev pkg-config libglib2.0-dev libpixman-1-dev git tmux python3 python3-pip
# download Qemu source code
$ wget https://download.qemu.org/qemu-5.0.0.tar.xz
# extract to qemu-5.0.0/
$ tar xvJf qemu-5.0.0.tar.xz
$ cd qemu-5.0.0
# build
$ ./configure --target-list=riscv64-softmmu,riscv64-linux-user
$ make -j$(nproc)
```
Then, add following contents to `~/.bashrc`(please adjust these paths according to your environment):
```
export PATH=$PATH:/home/shinbokuow/Downloads/built/qemu-5.0.0
export PATH=$PATH:/home/shinbokuow/Downloads/built/qemu-5.0.0/riscv64-softmmu
export PATH=$PATH:/home/shinbokuow/Downloads/built/qemu-5.0.0/riscv64-linux-user
```
Finally, update the current shell:
```sh
$ source ~/.bashrc
```
Now we can check the version of Qemu:
```sh
$ qemu-system-riscv64 --version
QEMU emulator version 5.0.0
Copyright (c) 2003-2020 Fabrice Bellard and the QEMU Project developers
```
### Install RISC-V GNU Embedded Toolchain(including GDB)
Download the compressed file according to your platform From [Sifive website](https://www.sifive.com/software)(Ctrl+F 'toolchain').
Extract it and append the location of the 'bin' directory under its root directory to `$PATH`.
For example, we can check the version of GDB:
```sh
$ riscv64-unknown-elf-gdb --version
GNU gdb (SiFive GDB-Metal 10.1.0-2020.12.7) 10.1
Copyright (C) 2020 Free Software Foundation, Inc.
License GPLv3+: GNU GPL version 3 or later <http://gnu.org/licenses/gpl.html>
This is free software: you are free to change and redistribute it.
There is NO WARRANTY, to the extent permitted by law.
```
### Install serial tools(Optional, if you want to run on K210)
```sh
$ pip3 install pyserial
$ sudo apt install python3-serial
```
## Run our project
### Qemu
```sh
$ git clone https://github.com/rcore-os/rCore-Tutorial-v3.git
$ cd rCore-Tutorial-v3/os
$ make run
```
After outputing some debug messages, the kernel lists all the applications available and enter the user shell:
```
/**** APPS ****
mpsc_sem
usertests
pipetest
forktest2
cat
initproc
race_adder_loop
threads_arg
race_adder_mutex_spin
race_adder_mutex_blocking
forktree
user_shell
huge_write
race_adder
race_adder_atomic
threads
stack_overflow
filetest_simple
forktest_simple
cmdline_args
run_pipe_test
forktest
matrix
exit
fantastic_text
sleep_simple
yield
hello_world
pipe_large_test
sleep
phil_din_mutex
**************/
Rust user shell
>>
```
You can run any application except for `initproc` and `user_shell` itself. To run an application, just input its filename and hit enter. `usertests` can run a bunch of applications, thus it is recommended.
Type `Ctrl+a` then `x` to exit Qemu.
### K210
Before chapter 6, you do not need a SD card:
```sh
$ git clone https://github.com/rcore-os/rCore-Tutorial-v3.git
$ cd rCore-Tutorial-v3/os
$ make run BOARD=k210
```
From chapter 6, before running the kernel, we should insert a SD card into PC and manually write the filesystem image to it:
```sh
$ cd rCore-Tutorial-v3/os
$ make sdcard
```
By default it will overwrite the device `/dev/sdb` which is the SD card, but you can provide another location. For example, `make sdcard SDCARD=/dev/sdc`.
After that, remove the SD card from PC and insert it to the slot of K210. Connect the K210 to PC and then:
```sh
$ git clone https://github.com/rcore-os/rCore-Tutorial-v3.git
$ cd rCore-Tutorial-v3/os
$ make run BOARD=k210
```
Type `Ctrl+]` to disconnect from K210.
## Show runtime debug info of OS kernel version
The branch of ch9-log contains a lot of debug info. You could try to run rcore tutorial
for understand the internal behavior of os kernel.
```sh
$ git clone https://github.com/rcore-os/rCore-Tutorial-v3.git
$ cd rCore-Tutorial-v3/os
$ git checkout ch9-log
$ make run
......
[rustsbi] RustSBI version 0.2.0-alpha.10, adapting to RISC-V SBI v0.3
.______ __ __ _______.___________. _______..______ __
| _ \ | | | | / | | / || _ \ | |
| |_) | | | | | | (----`---| |----`| (----`| |_) || |
| / | | | | \ \ | | \ \ | _ < | |
| |\ \----.| `--' |.----) | | | .----) | | |_) || |
| _| `._____| \______/ |_______/ |__| |_______/ |______/ |__|
[rustsbi] Implementation: RustSBI-QEMU Version 0.0.2
[rustsbi-dtb] Hart count: cluster0 with 1 cores
[rustsbi] misa: RV64ACDFIMSU
[rustsbi] mideleg: ssoft, stimer, sext (0x222)
[rustsbi] medeleg: ima, ia, bkpt, la, sa, uecall, ipage, lpage, spage (0xb1ab)
[rustsbi] pmp0: 0x10000000 ..= 0x10001fff (rw-)
[rustsbi] pmp1: 0x2000000 ..= 0x200ffff (rw-)
[rustsbi] pmp2: 0xc000000 ..= 0xc3fffff (rw-)
[rustsbi] pmp3: 0x80000000 ..= 0x8fffffff (rwx)
[rustsbi] enter supervisor 0x80200000
[KERN] rust_main() begin
[KERN] clear_bss() begin
[KERN] clear_bss() end
[KERN] mm::init() begin
[KERN] mm::init_heap() begin
[KERN] mm::init_heap() end
[KERN] mm::init_frame_allocator() begin
[KERN] mm::frame_allocator::lazy_static!FRAME_ALLOCATOR begin
......
```
## Rustdoc
Currently it can only help you view the code since only a tiny part of the code has been documented.
You can open a doc html of `os` using `cargo doc --no-deps --open` under `os` directory.
### OS-API-DOCS
The API Docs for Ten OS
1. [Lib-OS API doc](https://learningos.github.io/rCore-Tutorial-v3/ch1/os/index.html)
1. [Batch-OS API doc](https://learningos.github.io/rCore-Tutorial-v3/ch2/os/index.html)
1. [MultiProg-OS API doc](https://learningos.github.io/rCore-Tutorial-v3/ch3-coop/os/index.html)
1. [TimeSharing-OS API doc](https://learningos.github.io/rCore-Tutorial-v3/ch3/os/index.html)
1. [AddrSpace-OS API doc](https://learningos.github.io/rCore-Tutorial-v3/ch4/os/index.html)
1. [Process-OS API doc](https://learningos.github.io/rCore-Tutorial-v3/ch5/os/index.html)
1. [FileSystem-OS API doc](https://learningos.github.io/rCore-Tutorial-v3/ch6/os/index.html)
1. [IPC-OS API doc](https://learningos.github.io/rCore-Tutorial-v3/ch7/os/index.html)
1. [SyncMutex-OS API doc](https://learningos.github.io/rCore-Tutorial-v3/ch8/os/index.html)
1. [IODevice-OS API doc](https://learningos.github.io/rCore-Tutorial-v3/ch9/os/index.html)
## Working in progress
Our first release 3.5.0 (chapter 1-7) has been published.
There will be 9 chapters in our next release 3.6.0, where 2 new chapters will be added:
* chapter 8: synchronization on a uniprocessor
* chapter 9: I/O devices
Current version is 3.6.0-alpha.1 and we are still working on it.
Here are the updates since 3.5.0:
### Completed
* [x] automatically clean up and rebuild before running our project on a different platform
* [x] fix `power` series application in early chapters, now you can find modulus in the output
* [x] use `UPSafeCell` instead of `RefCell` or `spin::Mutex` in order to access static data structures and adjust its API so that it cannot be borrowed twice at a time(mention `& .exclusive_access().task[0]` in `run_first_task`)
* [x] move `TaskContext` into `TaskControlBlock` instead of restoring it in place on kernel stack(since ch3), eliminating annoying `task_cx_ptr2`
* [x] replace `llvm_asm!` with `asm!`
* [x] expand the fs image size generated by `rcore-fs-fuse` to 128MiB
* [x] add a new test named `huge_write` which evaluates the fs performance(qemu\~500KiB/s k210\~50KiB/s)
* [x] flush all block cache to disk after a fs transaction which involves write operation
* [x] replace `spin::Mutex` with `UPSafeCell` before SMP chapter
* [x] add codes for a new chapter about synchronization & mutual exclusion(uniprocessor only)
* [x] bug fix: we should call `find_pte` rather than `find_pte_create` in `PageTable::unmap`
* [x] clarify: "check validity of level-3 pte in `find_pte` instead of checking it outside this function" should not be a bug
* [x] code of chapter 8: synchronization on a uniprocessor
* [x] switch the code of chapter 6 and chapter 7
* [x] support signal mechanism in chapter 7/8(only works for apps with a single thread)
* [x] Add boards/ directory and support rustdoc, for example you can use `cargo doc --no-deps --open` to view the documentation of a crate
### Todo(High priority)
* [ ] review documentation, current progress: 5/9
* [ ] support user-level sync primitives in chapter 8
* [ ] code of chapter 9: device drivers based on interrupts, including UART and block devices
* [ ] use old fs image optionally, do not always rebuild the image
* [ ] add new system calls: getdents64/fstat
* [ ] shell functionality improvement(to be continued...)
* [ ] give every non-zero process exit code an unique and clear error type
* [ ] effective error handling of mm module
### Todo(Low priority)
* [ ] rewrite practice doc and remove some inproper questions
* [ ] provide smooth debug experience at a Rust source code level
* [ ] format the code using official tools
### Crates
We will add them later.

Binary file not shown.

Binary file not shown.

@ -0,0 +1,18 @@
# rCore-Tutorial-v3
rCore-Tutorial version 3.x
## Dependency
### Binaries
* rustc: 1.57.0-nightly (e1e9319d9 2021-10-14)
* cargo-binutils: 0.3.3
* qemu: 5.0.0
* rustsbi-lib: 0.2.0-alpha.4
rustsbi-qemu: d4968dd2
rustsbi-k210: b689314e

@ -0,0 +1,12 @@
[package]
name = "easy-fs-fuse"
version = "0.1.0"
authors = ["Yifan Wu <shinbokuow@163.com>"]
edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
clap = "2.33.3"
easy-fs = { path = "../easy-fs" }
rand = "0.8.0"

@ -0,0 +1,153 @@
use clap::{App, Arg};
use easy_fs::{BlockDevice, EasyFileSystem};
use std::fs::{read_dir, File, OpenOptions};
use std::io::{Read, Seek, SeekFrom, Write};
use std::sync::Arc;
use std::sync::Mutex;
const BLOCK_SZ: usize = 512;
struct BlockFile(Mutex<File>);
impl BlockDevice for BlockFile {
fn read_block(&self, block_id: usize, buf: &mut [u8]) {
let mut file = self.0.lock().unwrap();
file.seek(SeekFrom::Start((block_id * BLOCK_SZ) as u64))
.expect("Error when seeking!");
assert_eq!(file.read(buf).unwrap(), BLOCK_SZ, "Not a complete block!");
}
fn write_block(&self, block_id: usize, buf: &[u8]) {
let mut file = self.0.lock().unwrap();
file.seek(SeekFrom::Start((block_id * BLOCK_SZ) as u64))
.expect("Error when seeking!");
assert_eq!(file.write(buf).unwrap(), BLOCK_SZ, "Not a complete block!");
}
fn handle_irq(&self) { unimplemented!(); }
}
fn main() {
easy_fs_pack().expect("Error when packing easy-fs!");
}
fn easy_fs_pack() -> std::io::Result<()> {
let matches = App::new("EasyFileSystem packer")
.arg(
Arg::with_name("source")
.short("s")
.long("source")
.takes_value(true)
.help("Executable source dir(with backslash)"),
)
.arg(
Arg::with_name("target")
.short("t")
.long("target")
.takes_value(true)
.help("Executable target dir(with backslash)"),
)
.get_matches();
let src_path = matches.value_of("source").unwrap();
let target_path = matches.value_of("target").unwrap();
println!("src_path = {}\ntarget_path = {}", src_path, target_path);
let block_file = Arc::new(BlockFile(Mutex::new({
let f = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(format!("{}{}", target_path, "fs.img"))?;
f.set_len(16 * 2048 * 512).unwrap();
f
})));
// 16MiB, at most 4095 files
let efs = EasyFileSystem::create(block_file, 16 * 2048, 1);
let root_inode = Arc::new(EasyFileSystem::root_inode(&efs));
let apps: Vec<_> = read_dir(src_path)
.unwrap()
.into_iter()
.map(|dir_entry| {
let mut name_with_ext = dir_entry.unwrap().file_name().into_string().unwrap();
name_with_ext.drain(name_with_ext.find('.').unwrap()..name_with_ext.len());
name_with_ext
})
.collect();
for app in apps {
// load app data from host file system
let mut host_file = File::open(format!("{}{}", target_path, app)).unwrap();
let mut all_data: Vec<u8> = Vec::new();
host_file.read_to_end(&mut all_data).unwrap();
// create a file in easy-fs
let inode = root_inode.create(app.as_str()).unwrap();
// write data to easy-fs
inode.write_at(0, all_data.as_slice());
}
// list apps
for app in root_inode.ls() {
println!("{}", app);
}
Ok(())
}
#[test]
fn efs_test() -> std::io::Result<()> {
let block_file = Arc::new(BlockFile(Mutex::new({
let f = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open("target/fs.img")?;
f.set_len(8192 * 512).unwrap();
f
})));
EasyFileSystem::create(block_file.clone(), 4096, 1);
let efs = EasyFileSystem::open(block_file.clone());
let root_inode = EasyFileSystem::root_inode(&efs);
root_inode.create("filea");
root_inode.create("fileb");
for name in root_inode.ls() {
println!("{}", name);
}
let filea = root_inode.find("filea").unwrap();
let greet_str = "Hello, world!";
filea.write_at(0, greet_str.as_bytes());
//let mut buffer = [0u8; 512];
let mut buffer = [0u8; 233];
let len = filea.read_at(0, &mut buffer);
assert_eq!(greet_str, core::str::from_utf8(&buffer[..len]).unwrap(),);
let mut random_str_test = |len: usize| {
filea.clear();
assert_eq!(filea.read_at(0, &mut buffer), 0,);
let mut str = String::new();
use rand;
// random digit
for _ in 0..len {
str.push(char::from('0' as u8 + rand::random::<u8>() % 10));
}
filea.write_at(0, str.as_bytes());
let mut read_buffer = [0u8; 127];
let mut offset = 0usize;
let mut read_str = String::new();
loop {
let len = filea.read_at(offset, &mut read_buffer);
if len == 0 {
break;
}
offset += len;
read_str.push_str(core::str::from_utf8(&read_buffer[..len]).unwrap());
}
assert_eq!(str, read_str);
};
random_str_test(4 * BLOCK_SZ);
random_str_test(8 * BLOCK_SZ + BLOCK_SZ / 2);
random_str_test(100 * BLOCK_SZ);
random_str_test(70 * BLOCK_SZ + BLOCK_SZ / 7);
random_str_test((12 + 128) * BLOCK_SZ);
random_str_test(400 * BLOCK_SZ);
random_str_test(1000 * BLOCK_SZ);
random_str_test(2000 * BLOCK_SZ);
Ok(())
}

@ -0,0 +1,3 @@
.idea/
target/
Cargo.lock

@ -0,0 +1,14 @@
[package]
name = "easy-fs"
version = "0.1.0"
authors = ["Yifan Wu <shinbokuow@163.com>"]
edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
spin = "0.7.0"
lazy_static = { version = "1.4.0", features = ["spin_no_std"] }
[profile.release]
debug = true

@ -0,0 +1,69 @@
use super::{get_block_cache, BlockDevice, BLOCK_SZ};
use alloc::sync::Arc;
type BitmapBlock = [u64; 64];
const BLOCK_BITS: usize = BLOCK_SZ * 8;
pub struct Bitmap {
start_block_id: usize,
blocks: usize,
}
/// Return (block_pos, bits64_pos, inner_pos)
fn decomposition(mut bit: usize) -> (usize, usize, usize) {
let block_pos = bit / BLOCK_BITS;
bit %= BLOCK_BITS;
(block_pos, bit / 64, bit % 64)
}
impl Bitmap {
pub fn new(start_block_id: usize, blocks: usize) -> Self {
Self {
start_block_id,
blocks,
}
}
pub fn alloc(&self, block_device: &Arc<dyn BlockDevice>) -> Option<usize> {
for block_id in 0..self.blocks {
let pos = get_block_cache(
block_id + self.start_block_id as usize,
Arc::clone(block_device),
)
.lock()
.modify(0, |bitmap_block: &mut BitmapBlock| {
if let Some((bits64_pos, inner_pos)) = bitmap_block
.iter()
.enumerate()
.find(|(_, bits64)| **bits64 != u64::MAX)
.map(|(bits64_pos, bits64)| (bits64_pos, bits64.trailing_ones() as usize))
{
// modify cache
bitmap_block[bits64_pos] |= 1u64 << inner_pos;
Some(block_id * BLOCK_BITS + bits64_pos * 64 + inner_pos as usize)
} else {
None
}
});
if pos.is_some() {
return pos;
}
}
None
}
pub fn dealloc(&self, block_device: &Arc<dyn BlockDevice>, bit: usize) {
let (block_pos, bits64_pos, inner_pos) = decomposition(bit);
get_block_cache(block_pos + self.start_block_id, Arc::clone(block_device))
.lock()
.modify(0, |bitmap_block: &mut BitmapBlock| {
assert!(bitmap_block[bits64_pos] & (1u64 << inner_pos) > 0);
bitmap_block[bits64_pos] -= 1u64 << inner_pos;
});
}
pub fn maximum(&self) -> usize {
self.blocks * BLOCK_BITS
}
}

@ -0,0 +1,139 @@
use super::{BlockDevice, BLOCK_SZ};
use alloc::collections::VecDeque;
use alloc::sync::Arc;
use lazy_static::*;
use spin::Mutex;
pub struct BlockCache {
cache: [u8; BLOCK_SZ],
block_id: usize,
block_device: Arc<dyn BlockDevice>,
modified: bool,
}
impl BlockCache {
/// Load a new BlockCache from disk.
pub fn new(block_id: usize, block_device: Arc<dyn BlockDevice>) -> Self {
let mut cache = [0u8; BLOCK_SZ];
block_device.read_block(block_id, &mut cache);
Self {
cache,
block_id,
block_device,
modified: false,
}
}
fn addr_of_offset(&self, offset: usize) -> usize {
&self.cache[offset] as *const _ as usize
}
pub fn get_ref<T>(&self, offset: usize) -> &T
where
T: Sized,
{
let type_size = core::mem::size_of::<T>();
assert!(offset + type_size <= BLOCK_SZ);
let addr = self.addr_of_offset(offset);
unsafe { &*(addr as *const T) }
}
pub fn get_mut<T>(&mut self, offset: usize) -> &mut T
where
T: Sized,
{
let type_size = core::mem::size_of::<T>();
assert!(offset + type_size <= BLOCK_SZ);
self.modified = true;
let addr = self.addr_of_offset(offset);
unsafe { &mut *(addr as *mut T) }
}
pub fn read<T, V>(&self, offset: usize, f: impl FnOnce(&T) -> V) -> V {
f(self.get_ref(offset))
}
pub fn modify<T, V>(&mut self, offset: usize, f: impl FnOnce(&mut T) -> V) -> V {
f(self.get_mut(offset))
}
pub fn sync(&mut self) {
if self.modified {
self.modified = false;
self.block_device.write_block(self.block_id, &self.cache);
}
}
}
impl Drop for BlockCache {
fn drop(&mut self) {
self.sync()
}
}
const BLOCK_CACHE_SIZE: usize = 16;
pub struct BlockCacheManager {
queue: VecDeque<(usize, Arc<Mutex<BlockCache>>)>,
}
impl BlockCacheManager {
pub fn new() -> Self {
Self {
queue: VecDeque::new(),
}
}
pub fn get_block_cache(
&mut self,
block_id: usize,
block_device: Arc<dyn BlockDevice>,
) -> Arc<Mutex<BlockCache>> {
if let Some(pair) = self.queue.iter().find(|pair| pair.0 == block_id) {
Arc::clone(&pair.1)
} else {
// substitute
if self.queue.len() == BLOCK_CACHE_SIZE {
// from front to tail
if let Some((idx, _)) = self
.queue
.iter()
.enumerate()
.find(|(_, pair)| Arc::strong_count(&pair.1) == 1)
{
self.queue.drain(idx..=idx);
} else {
panic!("Run out of BlockCache!");
}
}
// load block into mem and push back
let block_cache = Arc::new(Mutex::new(BlockCache::new(
block_id,
Arc::clone(&block_device),
)));
self.queue.push_back((block_id, Arc::clone(&block_cache)));
block_cache
}
}
}
lazy_static! {
pub static ref BLOCK_CACHE_MANAGER: Mutex<BlockCacheManager> =
Mutex::new(BlockCacheManager::new());
}
pub fn get_block_cache(
block_id: usize,
block_device: Arc<dyn BlockDevice>,
) -> Arc<Mutex<BlockCache>> {
BLOCK_CACHE_MANAGER
.lock()
.get_block_cache(block_id, block_device)
}
pub fn block_cache_sync_all() {
let manager = BLOCK_CACHE_MANAGER.lock();
for (_, cache) in manager.queue.iter() {
cache.lock().sync();
}
}

@ -0,0 +1,7 @@
use core::any::Any;
pub trait BlockDevice: Send + Sync + Any {
fn read_block(&self, block_id: usize, buf: &mut [u8]);
fn write_block(&self, block_id: usize, buf: &[u8]);
fn handle_irq(&self);
}

@ -0,0 +1,147 @@
use super::{
block_cache_sync_all, get_block_cache, Bitmap, BlockDevice, DiskInode, DiskInodeType, Inode,
SuperBlock,
};
use crate::BLOCK_SZ;
use alloc::sync::Arc;
use spin::Mutex;
pub struct EasyFileSystem {
pub block_device: Arc<dyn BlockDevice>,
pub inode_bitmap: Bitmap,
pub data_bitmap: Bitmap,
inode_area_start_block: u32,
data_area_start_block: u32,
}
type DataBlock = [u8; BLOCK_SZ];
impl EasyFileSystem {
pub fn create(
block_device: Arc<dyn BlockDevice>,
total_blocks: u32,
inode_bitmap_blocks: u32,
) -> Arc<Mutex<Self>> {
// calculate block size of areas & create bitmaps
let inode_bitmap = Bitmap::new(1, inode_bitmap_blocks as usize);
let inode_num = inode_bitmap.maximum();
let inode_area_blocks =
((inode_num * core::mem::size_of::<DiskInode>() + BLOCK_SZ - 1) / BLOCK_SZ) as u32;
let inode_total_blocks = inode_bitmap_blocks + inode_area_blocks;
let data_total_blocks = total_blocks - 1 - inode_total_blocks;
let data_bitmap_blocks = (data_total_blocks + 4096) / 4097;
let data_area_blocks = data_total_blocks - data_bitmap_blocks;
let data_bitmap = Bitmap::new(
(1 + inode_bitmap_blocks + inode_area_blocks) as usize,
data_bitmap_blocks as usize,
);
let mut efs = Self {
block_device: Arc::clone(&block_device),
inode_bitmap,
data_bitmap,
inode_area_start_block: 1 + inode_bitmap_blocks,
data_area_start_block: 1 + inode_total_blocks + data_bitmap_blocks,
};
// clear all blocks
for i in 0..total_blocks {
get_block_cache(i as usize, Arc::clone(&block_device))
.lock()
.modify(0, |data_block: &mut DataBlock| {
for byte in data_block.iter_mut() {
*byte = 0;
}
});
}
// initialize SuperBlock
get_block_cache(0, Arc::clone(&block_device)).lock().modify(
0,
|super_block: &mut SuperBlock| {
super_block.initialize(
total_blocks,
inode_bitmap_blocks,
inode_area_blocks,
data_bitmap_blocks,
data_area_blocks,
);
},
);
// write back immediately
// create a inode for root node "/"
assert_eq!(efs.alloc_inode(), 0);
let (root_inode_block_id, root_inode_offset) = efs.get_disk_inode_pos(0);
get_block_cache(root_inode_block_id as usize, Arc::clone(&block_device))
.lock()
.modify(root_inode_offset, |disk_inode: &mut DiskInode| {
disk_inode.initialize(DiskInodeType::Directory);
});
block_cache_sync_all();
Arc::new(Mutex::new(efs))
}
pub fn open(block_device: Arc<dyn BlockDevice>) -> Arc<Mutex<Self>> {
// read SuperBlock
get_block_cache(0, Arc::clone(&block_device))
.lock()
.read(0, |super_block: &SuperBlock| {
assert!(super_block.is_valid(), "Error loading EFS!");
let inode_total_blocks =
super_block.inode_bitmap_blocks + super_block.inode_area_blocks;
let efs = Self {
block_device,
inode_bitmap: Bitmap::new(1, super_block.inode_bitmap_blocks as usize),
data_bitmap: Bitmap::new(
(1 + inode_total_blocks) as usize,
super_block.data_bitmap_blocks as usize,
),
inode_area_start_block: 1 + super_block.inode_bitmap_blocks,
data_area_start_block: 1 + inode_total_blocks + super_block.data_bitmap_blocks,
};
Arc::new(Mutex::new(efs))
})
}
pub fn root_inode(efs: &Arc<Mutex<Self>>) -> Inode {
let block_device = Arc::clone(&efs.lock().block_device);
// acquire efs lock temporarily
let (block_id, block_offset) = efs.lock().get_disk_inode_pos(0);
// release efs lock
Inode::new(block_id, block_offset, Arc::clone(efs), block_device)
}
pub fn get_disk_inode_pos(&self, inode_id: u32) -> (u32, usize) {
let inode_size = core::mem::size_of::<DiskInode>();
let inodes_per_block = (BLOCK_SZ / inode_size) as u32;
let block_id = self.inode_area_start_block + inode_id / inodes_per_block;
(
block_id,
(inode_id % inodes_per_block) as usize * inode_size,
)
}
pub fn get_data_block_id(&self, data_block_id: u32) -> u32 {
self.data_area_start_block + data_block_id
}
pub fn alloc_inode(&mut self) -> u32 {
self.inode_bitmap.alloc(&self.block_device).unwrap() as u32
}
/// Return a block ID not ID in the data area.
pub fn alloc_data(&mut self) -> u32 {
self.data_bitmap.alloc(&self.block_device).unwrap() as u32 + self.data_area_start_block
}
pub fn dealloc_data(&mut self, block_id: u32) {
get_block_cache(block_id as usize, Arc::clone(&self.block_device))
.lock()
.modify(0, |data_block: &mut DataBlock| {
data_block.iter_mut().for_each(|p| {
*p = 0;
})
});
self.data_bitmap.dealloc(
&self.block_device,
(block_id - self.data_area_start_block) as usize,
)
}
}

@ -0,0 +1,409 @@
use super::{get_block_cache, BlockDevice, BLOCK_SZ};
use alloc::sync::Arc;
use alloc::vec::Vec;
use core::fmt::{Debug, Formatter, Result};
const EFS_MAGIC: u32 = 0x3b800001;
const INODE_DIRECT_COUNT: usize = 28;
const NAME_LENGTH_LIMIT: usize = 27;
const INODE_INDIRECT1_COUNT: usize = BLOCK_SZ / 4;
const INODE_INDIRECT2_COUNT: usize = INODE_INDIRECT1_COUNT * INODE_INDIRECT1_COUNT;
const DIRECT_BOUND: usize = INODE_DIRECT_COUNT;
const INDIRECT1_BOUND: usize = DIRECT_BOUND + INODE_INDIRECT1_COUNT;
#[allow(unused)]
const INDIRECT2_BOUND: usize = INDIRECT1_BOUND + INODE_INDIRECT2_COUNT;
#[repr(C)]
pub struct SuperBlock {
magic: u32,
pub total_blocks: u32,
pub inode_bitmap_blocks: u32,
pub inode_area_blocks: u32,
pub data_bitmap_blocks: u32,
pub data_area_blocks: u32,
}
impl Debug for SuperBlock {
fn fmt(&self, f: &mut Formatter<'_>) -> Result {
f.debug_struct("SuperBlock")
.field("total_blocks", &self.total_blocks)
.field("inode_bitmap_blocks", &self.inode_bitmap_blocks)
.field("inode_area_blocks", &self.inode_area_blocks)
.field("data_bitmap_blocks", &self.data_bitmap_blocks)
.field("data_area_blocks", &self.data_area_blocks)
.finish()
}
}
impl SuperBlock {
pub fn initialize(
&mut self,
total_blocks: u32,
inode_bitmap_blocks: u32,
inode_area_blocks: u32,
data_bitmap_blocks: u32,
data_area_blocks: u32,
) {
*self = Self {
magic: EFS_MAGIC,
total_blocks,
inode_bitmap_blocks,
inode_area_blocks,
data_bitmap_blocks,
data_area_blocks,
}
}
pub fn is_valid(&self) -> bool {
self.magic == EFS_MAGIC
}
}
#[derive(PartialEq)]
pub enum DiskInodeType {
File,
Directory,
}
type IndirectBlock = [u32; BLOCK_SZ / 4];
type DataBlock = [u8; BLOCK_SZ];
#[repr(C)]
pub struct DiskInode {
pub size: u32,
pub direct: [u32; INODE_DIRECT_COUNT],
pub indirect1: u32,
pub indirect2: u32,
type_: DiskInodeType,
}
impl DiskInode {
/// indirect1 and indirect2 block are allocated only when they are needed.
pub fn initialize(&mut self, type_: DiskInodeType) {
self.size = 0;
self.direct.iter_mut().for_each(|v| *v = 0);
self.indirect1 = 0;
self.indirect2 = 0;
self.type_ = type_;
}
pub fn is_dir(&self) -> bool {
self.type_ == DiskInodeType::Directory
}
#[allow(unused)]
pub fn is_file(&self) -> bool {
self.type_ == DiskInodeType::File
}
/// Return block number correspond to size.
pub fn data_blocks(&self) -> u32 {
Self::_data_blocks(self.size)
}
fn _data_blocks(size: u32) -> u32 {
(size + BLOCK_SZ as u32 - 1) / BLOCK_SZ as u32
}
/// Return number of blocks needed include indirect1/2.
pub fn total_blocks(size: u32) -> u32 {
let data_blocks = Self::_data_blocks(size) as usize;
let mut total = data_blocks as usize;
// indirect1
if data_blocks > INODE_DIRECT_COUNT {
total += 1;
}
// indirect2
if data_blocks > INDIRECT1_BOUND {
total += 1;
// sub indirect1
total +=
(data_blocks - INDIRECT1_BOUND + INODE_INDIRECT1_COUNT - 1) / INODE_INDIRECT1_COUNT;
}
total as u32
}
pub fn blocks_num_needed(&self, new_size: u32) -> u32 {
assert!(new_size >= self.size);
Self::total_blocks(new_size) - Self::total_blocks(self.size)
}
pub fn get_block_id(&self, inner_id: u32, block_device: &Arc<dyn BlockDevice>) -> u32 {
let inner_id = inner_id as usize;
if inner_id < INODE_DIRECT_COUNT {
self.direct[inner_id]
} else if inner_id < INDIRECT1_BOUND {
get_block_cache(self.indirect1 as usize, Arc::clone(block_device))
.lock()
.read(0, |indirect_block: &IndirectBlock| {
indirect_block[inner_id - INODE_DIRECT_COUNT]
})
} else {
let last = inner_id - INDIRECT1_BOUND;
let indirect1 = get_block_cache(self.indirect2 as usize, Arc::clone(block_device))
.lock()
.read(0, |indirect2: &IndirectBlock| {
indirect2[last / INODE_INDIRECT1_COUNT]
});
get_block_cache(indirect1 as usize, Arc::clone(block_device))
.lock()
.read(0, |indirect1: &IndirectBlock| {
indirect1[last % INODE_INDIRECT1_COUNT]
})
}
}
pub fn increase_size(
&mut self,
new_size: u32,
new_blocks: Vec<u32>,
block_device: &Arc<dyn BlockDevice>,
) {
let mut current_blocks = self.data_blocks();
self.size = new_size;
let mut total_blocks = self.data_blocks();
let mut new_blocks = new_blocks.into_iter();
// fill direct
while current_blocks < total_blocks.min(INODE_DIRECT_COUNT as u32) {
self.direct[current_blocks as usize] = new_blocks.next().unwrap();
current_blocks += 1;
}
// alloc indirect1
if total_blocks > INODE_DIRECT_COUNT as u32 {
if current_blocks == INODE_DIRECT_COUNT as u32 {
self.indirect1 = new_blocks.next().unwrap();
}
current_blocks -= INODE_DIRECT_COUNT as u32;
total_blocks -= INODE_DIRECT_COUNT as u32;
} else {
return;
}
// fill indirect1
get_block_cache(self.indirect1 as usize, Arc::clone(block_device))
.lock()
.modify(0, |indirect1: &mut IndirectBlock| {
while current_blocks < total_blocks.min(INODE_INDIRECT1_COUNT as u32) {
indirect1[current_blocks as usize] = new_blocks.next().unwrap();
current_blocks += 1;
}
});
// alloc indirect2
if total_blocks > INODE_INDIRECT1_COUNT as u32 {
if current_blocks == INODE_INDIRECT1_COUNT as u32 {
self.indirect2 = new_blocks.next().unwrap();
}
current_blocks -= INODE_INDIRECT1_COUNT as u32;
total_blocks -= INODE_INDIRECT1_COUNT as u32;
} else {
return;
}
// fill indirect2 from (a0, b0) -> (a1, b1)
let mut a0 = current_blocks as usize / INODE_INDIRECT1_COUNT;
let mut b0 = current_blocks as usize % INODE_INDIRECT1_COUNT;
let a1 = total_blocks as usize / INODE_INDIRECT1_COUNT;
let b1 = total_blocks as usize % INODE_INDIRECT1_COUNT;
// alloc low-level indirect1
get_block_cache(self.indirect2 as usize, Arc::clone(block_device))
.lock()
.modify(0, |indirect2: &mut IndirectBlock| {
while (a0 < a1) || (a0 == a1 && b0 < b1) {
if b0 == 0 {
indirect2[a0] = new_blocks.next().unwrap();
}
// fill current
get_block_cache(indirect2[a0] as usize, Arc::clone(block_device))
.lock()
.modify(0, |indirect1: &mut IndirectBlock| {
indirect1[b0] = new_blocks.next().unwrap();
});
// move to next
b0 += 1;
if b0 == INODE_INDIRECT1_COUNT {
b0 = 0;
a0 += 1;
}
}
});
}
/// Clear size to zero and return blocks that should be deallocated.
///
/// We will clear the block contents to zero later.
pub fn clear_size(&mut self, block_device: &Arc<dyn BlockDevice>) -> Vec<u32> {
let mut v: Vec<u32> = Vec::new();
let mut data_blocks = self.data_blocks() as usize;
self.size = 0;
let mut current_blocks = 0usize;
// direct
while current_blocks < data_blocks.min(INODE_DIRECT_COUNT) {
v.push(self.direct[current_blocks]);
self.direct[current_blocks] = 0;
current_blocks += 1;
}
// indirect1 block
if data_blocks > INODE_DIRECT_COUNT {
v.push(self.indirect1);
data_blocks -= INODE_DIRECT_COUNT;
current_blocks = 0;
} else {
return v;
}
// indirect1
get_block_cache(self.indirect1 as usize, Arc::clone(block_device))
.lock()
.modify(0, |indirect1: &mut IndirectBlock| {
while current_blocks < data_blocks.min(INODE_INDIRECT1_COUNT) {
v.push(indirect1[current_blocks]);
//indirect1[current_blocks] = 0;
current_blocks += 1;
}
});
self.indirect1 = 0;
// indirect2 block
if data_blocks > INODE_INDIRECT1_COUNT {
v.push(self.indirect2);
data_blocks -= INODE_INDIRECT1_COUNT;
} else {
return v;
}
// indirect2
assert!(data_blocks <= INODE_INDIRECT2_COUNT);
let a1 = data_blocks / INODE_INDIRECT1_COUNT;
let b1 = data_blocks % INODE_INDIRECT1_COUNT;
get_block_cache(self.indirect2 as usize, Arc::clone(block_device))
.lock()
.modify(0, |indirect2: &mut IndirectBlock| {
// full indirect1 blocks
for entry in indirect2.iter_mut().take(a1) {
v.push(*entry);
get_block_cache(*entry as usize, Arc::clone(block_device))
.lock()
.modify(0, |indirect1: &mut IndirectBlock| {
for entry in indirect1.iter() {
v.push(*entry);
}
});
}
// last indirect1 block
if b1 > 0 {
v.push(indirect2[a1]);
get_block_cache(indirect2[a1] as usize, Arc::clone(block_device))
.lock()
.modify(0, |indirect1: &mut IndirectBlock| {
for entry in indirect1.iter().take(b1) {
v.push(*entry);
}
});
//indirect2[a1] = 0;
}
});
self.indirect2 = 0;
v
}
pub fn read_at(
&self,
offset: usize,
buf: &mut [u8],
block_device: &Arc<dyn BlockDevice>,
) -> usize {
let mut start = offset;
let end = (offset + buf.len()).min(self.size as usize);
if start >= end {
return 0;
}
let mut start_block = start / BLOCK_SZ;
let mut read_size = 0usize;
loop {
// calculate end of current block
let mut end_current_block = (start / BLOCK_SZ + 1) * BLOCK_SZ;
end_current_block = end_current_block.min(end);
// read and update read size
let block_read_size = end_current_block - start;
let dst = &mut buf[read_size..read_size + block_read_size];
get_block_cache(
self.get_block_id(start_block as u32, block_device) as usize,
Arc::clone(block_device),
)
.lock()
.read(0, |data_block: &DataBlock| {
let src = &data_block[start % BLOCK_SZ..start % BLOCK_SZ + block_read_size];
dst.copy_from_slice(src);
});
read_size += block_read_size;
// move to next block
if end_current_block == end {
break;
}
start_block += 1;
start = end_current_block;
}
read_size
}
/// File size must be adjusted before.
pub fn write_at(
&mut self,
offset: usize,
buf: &[u8],
block_device: &Arc<dyn BlockDevice>,
) -> usize {
let mut start = offset;
let end = (offset + buf.len()).min(self.size as usize);
assert!(start <= end);
let mut start_block = start / BLOCK_SZ;
let mut write_size = 0usize;
loop {
// calculate end of current block
let mut end_current_block = (start / BLOCK_SZ + 1) * BLOCK_SZ;
end_current_block = end_current_block.min(end);
// write and update write size
let block_write_size = end_current_block - start;
get_block_cache(
self.get_block_id(start_block as u32, block_device) as usize,
Arc::clone(block_device),
)
.lock()
.modify(0, |data_block: &mut DataBlock| {
let src = &buf[write_size..write_size + block_write_size];
let dst = &mut data_block[start % BLOCK_SZ..start % BLOCK_SZ + block_write_size];
dst.copy_from_slice(src);
});
write_size += block_write_size;
// move to next block
if end_current_block == end {
break;
}
start_block += 1;
start = end_current_block;
}
write_size
}
}
#[repr(C)]
pub struct DirEntry {
name: [u8; NAME_LENGTH_LIMIT + 1],
inode_number: u32,
}
pub const DIRENT_SZ: usize = 32;
impl DirEntry {
pub fn empty() -> Self {
Self {
name: [0u8; NAME_LENGTH_LIMIT + 1],
inode_number: 0,
}
}
pub fn new(name: &str, inode_number: u32) -> Self {
let mut bytes = [0u8; NAME_LENGTH_LIMIT + 1];
bytes[..name.len()].copy_from_slice(name.as_bytes());
Self {
name: bytes,
inode_number,
}
}
pub fn as_bytes(&self) -> &[u8] {
unsafe { core::slice::from_raw_parts(self as *const _ as usize as *const u8, DIRENT_SZ) }
}
pub fn as_bytes_mut(&mut self) -> &mut [u8] {
unsafe { core::slice::from_raw_parts_mut(self as *mut _ as usize as *mut u8, DIRENT_SZ) }
}
pub fn name(&self) -> &str {
let len = (0usize..).find(|i| self.name[*i] == 0).unwrap();
core::str::from_utf8(&self.name[..len]).unwrap()
}
pub fn inode_number(&self) -> u32 {
self.inode_number
}
}

@ -0,0 +1,18 @@
#![no_std]
extern crate alloc;
mod bitmap;
mod block_cache;
mod block_dev;
mod efs;
mod layout;
mod vfs;
pub const BLOCK_SZ: usize = 512;
use bitmap::Bitmap;
use block_cache::{block_cache_sync_all, get_block_cache};
pub use block_dev::BlockDevice;
pub use efs::EasyFileSystem;
use layout::*;
pub use vfs::Inode;

@ -0,0 +1,186 @@
use super::{
block_cache_sync_all, get_block_cache, BlockDevice, DirEntry, DiskInode, DiskInodeType,
EasyFileSystem, DIRENT_SZ,
};
use alloc::string::String;
use alloc::sync::Arc;
use alloc::vec::Vec;
use spin::{Mutex, MutexGuard};
pub struct Inode {
block_id: usize,
block_offset: usize,
fs: Arc<Mutex<EasyFileSystem>>,
block_device: Arc<dyn BlockDevice>,
}
impl Inode {
/// We should not acquire efs lock here.
pub fn new(
block_id: u32,
block_offset: usize,
fs: Arc<Mutex<EasyFileSystem>>,
block_device: Arc<dyn BlockDevice>,
) -> Self {
Self {
block_id: block_id as usize,
block_offset,
fs,
block_device,
}
}
fn read_disk_inode<V>(&self, f: impl FnOnce(&DiskInode) -> V) -> V {
get_block_cache(self.block_id, Arc::clone(&self.block_device))
.lock()
.read(self.block_offset, f)
}
fn modify_disk_inode<V>(&self, f: impl FnOnce(&mut DiskInode) -> V) -> V {
get_block_cache(self.block_id, Arc::clone(&self.block_device))
.lock()
.modify(self.block_offset, f)
}
fn find_inode_id(&self, name: &str, disk_inode: &DiskInode) -> Option<u32> {
// assert it is a directory
assert!(disk_inode.is_dir());
let file_count = (disk_inode.size as usize) / DIRENT_SZ;
let mut dirent = DirEntry::empty();
for i in 0..file_count {
assert_eq!(
disk_inode.read_at(DIRENT_SZ * i, dirent.as_bytes_mut(), &self.block_device,),
DIRENT_SZ,
);
if dirent.name() == name {
return Some(dirent.inode_number() as u32);
}
}
None
}
pub fn find(&self, name: &str) -> Option<Arc<Inode>> {
let fs = self.fs.lock();
self.read_disk_inode(|disk_inode| {
self.find_inode_id(name, disk_inode).map(|inode_id| {
let (block_id, block_offset) = fs.get_disk_inode_pos(inode_id);
Arc::new(Self::new(
block_id,
block_offset,
self.fs.clone(),
self.block_device.clone(),
))
})
})
}
fn increase_size(
&self,
new_size: u32,
disk_inode: &mut DiskInode,
fs: &mut MutexGuard<EasyFileSystem>,
) {
if new_size < disk_inode.size {
return;
}
let blocks_needed = disk_inode.blocks_num_needed(new_size);
let mut v: Vec<u32> = Vec::new();
for _ in 0..blocks_needed {
v.push(fs.alloc_data());
}
disk_inode.increase_size(new_size, v, &self.block_device);
}
pub fn create(&self, name: &str) -> Option<Arc<Inode>> {
let mut fs = self.fs.lock();
let op = |root_inode: &mut DiskInode| {
// assert it is a directory
assert!(root_inode.is_dir());
// has the file been created?
self.find_inode_id(name, root_inode)
};
if self.modify_disk_inode(op).is_some() {
return None;
}
// create a new file
// alloc a inode with an indirect block
let new_inode_id = fs.alloc_inode();
// initialize inode
let (new_inode_block_id, new_inode_block_offset) = fs.get_disk_inode_pos(new_inode_id);
get_block_cache(new_inode_block_id as usize, Arc::clone(&self.block_device))
.lock()
.modify(new_inode_block_offset, |new_inode: &mut DiskInode| {
new_inode.initialize(DiskInodeType::File);
});
self.modify_disk_inode(|root_inode| {
// append file in the dirent
let file_count = (root_inode.size as usize) / DIRENT_SZ;
let new_size = (file_count + 1) * DIRENT_SZ;
// increase size
self.increase_size(new_size as u32, root_inode, &mut fs);
// write dirent
let dirent = DirEntry::new(name, new_inode_id);
root_inode.write_at(
file_count * DIRENT_SZ,
dirent.as_bytes(),
&self.block_device,
);
});
let (block_id, block_offset) = fs.get_disk_inode_pos(new_inode_id);
block_cache_sync_all();
// return inode
Some(Arc::new(Self::new(
block_id,
block_offset,
self.fs.clone(),
self.block_device.clone(),
)))
// release efs lock automatically by compiler
}
pub fn ls(&self) -> Vec<String> {
let _fs = self.fs.lock();
self.read_disk_inode(|disk_inode| {
let file_count = (disk_inode.size as usize) / DIRENT_SZ;
let mut v: Vec<String> = Vec::new();
for i in 0..file_count {
let mut dirent = DirEntry::empty();
assert_eq!(
disk_inode.read_at(i * DIRENT_SZ, dirent.as_bytes_mut(), &self.block_device,),
DIRENT_SZ,
);
v.push(String::from(dirent.name()));
}
v
})
}
pub fn read_at(&self, offset: usize, buf: &mut [u8]) -> usize {
let _fs = self.fs.lock();
self.read_disk_inode(|disk_inode| disk_inode.read_at(offset, buf, &self.block_device))
}
pub fn write_at(&self, offset: usize, buf: &[u8]) -> usize {
let mut fs = self.fs.lock();
let size = self.modify_disk_inode(|disk_inode| {
self.increase_size((offset + buf.len()) as u32, disk_inode, &mut fs);
disk_inode.write_at(offset, buf, &self.block_device)
});
block_cache_sync_all();
size
}
pub fn clear(&self) {
let mut fs = self.fs.lock();
self.modify_disk_inode(|disk_inode| {
let size = disk_inode.size;
let data_blocks_dealloc = disk_inode.clear_size(&self.block_device);
assert!(data_blocks_dealloc.len() == DiskInode::total_blocks(size) as usize);
for data_block in data_blocks_dealloc.into_iter() {
fs.dealloc_data(data_block);
}
});
block_cache_sync_all();
}
}

@ -10,10 +10,18 @@ edition = "2018"
riscv = { git = "https://github.com/rcore-os/riscv", features = ["inline-asm"] }
lazy_static = { version = "1.4.0", features = ["spin_no_std"] }
buddy_system_allocator = "0.6"
spin = "0.7.0"
bitflags = "1.2.1"
xmas-elf = "0.7.0"
volatile = "0.3"
virtio-drivers = { git = "https://github.com/rcore-os/virtio-drivers" }
k210-pac = { git = "https://github.com/wyfcyx/k210-pac" }
k210-hal = { git = "https://github.com/wyfcyx/k210-hal" }
k210-soc = { git = "https://github.com/wyfcyx/k210-soc" }
easy-fs = { path = "../easy-fs" }
[features]
board_qemu = []
board_k210 = []
board_k210 = []
[profile.release]
debug = true

@ -3,17 +3,32 @@ TARGET := riscv64gc-unknown-none-elf
MODE := release
KERNEL_ELF := target/$(TARGET)/$(MODE)/os
KERNEL_BIN := $(KERNEL_ELF).bin
KERNEL_ENTRY_PA := 0x80020000
DISASM_TMP := target/$(TARGET)/$(MODE)/asm
FS_IMG := ../user/target/$(TARGET)/$(MODE)/fs.img
SDCARD := /dev/sdb
APPS := ../user/src/bin/*
# BOARD
BOARD ?= qemu
SBI ?= rustsbi
BOOTLOADER := ../bootloader/$(SBI)-$(BOARD).bin
BOARD ?= qemu
SBI ?= rustsbi
BOOTLOADER := ../bootloader/$(SBI)-$(BOARD).bin
K210_BOOTLOADER_SIZE := 131072
# Building mode argument
ifeq ($(MODE), release)
MODE_ARG := --release
endif
# KERNEL ENTRY
ifeq ($(BOARD), qemu)
KERNEL_ENTRY_PA := 0x80200000
else ifeq ($(BOARD), k210)
KERNEL_ENTRY_PA := 0x80020000
endif
# Run K210
K210-SERIALPORT = /dev/ttyUSB0
K210-BURNER = ../tools/kflash.py
K210-BURNER = ../tools/kflash.py
# Binutils
OBJDUMP := rust-objdump --arch-name=riscv64
@ -22,14 +37,44 @@ OBJCOPY := rust-objcopy --binary-architecture=riscv64
# Disassembly
DISASM ?= -x
build: $(KERNEL_BIN)
# Run usertests or usershell
TEST ?=
build: env switch-check $(KERNEL_BIN) fs-img
switch-check:
ifeq ($(BOARD), qemu)
(which last-qemu) || (rm -f last-k210 && touch last-qemu && make clean)
else ifeq ($(BOARD), k210)
(which last-k210) || (rm -f last-qemu && touch last-k210 && make clean)
endif
env:
(rustup target list | grep "riscv64gc-unknown-none-elf (installed)") || rustup target add $(TARGET)
cargo install cargo-binutils --vers =0.3.3
rustup component add rust-src
rustup component add llvm-tools-preview
sdcard: fs-img
@echo "Are you sure write to $(SDCARD) ? [y/N] " && read ans && [ $${ans:-N} = y ]
@sudo dd if=/dev/zero of=$(SDCARD) bs=1048576 count=32
@sudo dd if=$(FS_IMG) of=$(SDCARD)
$(KERNEL_BIN): kernel
@$(OBJCOPY) $(KERNEL_ELF) --strip-all -O binary $@
fs-img: $(APPS)
@cd ../user && make build TEST=$(TEST)
@rm -f $(FS_IMG)
@cd ../easy-fs-fuse && cargo run --release -- -s ../user/src/bin/ -t ../user/target/riscv64gc-unknown-none-elf/release/
$(APPS):
kernel:
@cd ../user && make build
@echo Platform: $(BOARD)
@cp src/linker-$(BOARD).ld src/linker.ld
@cargo build --release --features "board_$(BOARD)"
@rm src/linker.ld
clean:
@cargo clean
@ -39,7 +84,7 @@ disasm: kernel
disasm-vim: kernel
@$(OBJDUMP) $(DISASM) $(KERNEL_ELF) > $(DISASM_TMP)
@vim $(DISASM_TMP)
@nvim $(DISASM_TMP)
@rm $(DISASM_TMP)
run: run-inner
@ -50,14 +95,17 @@ ifeq ($(BOARD),qemu)
-machine virt \
-nographic \
-bios $(BOOTLOADER) \
-device loader,file=$(KERNEL_BIN),addr=$(KERNEL_ENTRY_PA)
-device loader,file=$(KERNEL_BIN),addr=$(KERNEL_ENTRY_PA) \
-drive file=$(FS_IMG),if=none,format=raw,id=x0 \
-device virtio-blk-device,drive=x0,bus=virtio-mmio-bus.0
else
(which $(K210-BURNER)) || (cd .. && git clone https://github.com/sipeed/kflash.py.git && mv kflash.py tools)
@cp $(BOOTLOADER) $(BOOTLOADER).copy
@dd if=$(KERNEL_BIN) of=$(BOOTLOADER).copy bs=128K seek=1
@dd if=$(KERNEL_BIN) of=$(BOOTLOADER).copy bs=$(K210_BOOTLOADER_SIZE) seek=1
@mv $(BOOTLOADER).copy $(KERNEL_BIN)
@sudo chmod 777 $(K210-SERIALPORT)
python3 $(K210-BURNER) -p $(K210-SERIALPORT) -b 1500000 $(KERNEL_BIN)
miniterm --eol LF --dtr 0 --rts 0 --filter direct $(K210-SERIALPORT) 115200
python3 -m serial.tools.miniterm --eol LF --dtr 0 --rts 0 --filter direct $(K210-SERIALPORT) 115200
endif
debug: build
@ -66,4 +114,11 @@ debug: build
tmux split-window -h "riscv64-unknown-elf-gdb -ex 'file $(KERNEL_ELF)' -ex 'set arch riscv:rv64' -ex 'target remote localhost:1234'" && \
tmux -2 attach-session -d
.PHONY: build kernel clean disasm disasm-vim run-inner
gdbserver: build
@qemu-system-riscv64 -machine virt -nographic -bios $(BOOTLOADER) -device loader,file=$(KERNEL_BIN),addr=$(KERNEL_ENTRY_PA) -s -S
gdbclient:
@riscv64-unknown-elf-gdb -ex 'file $(KERNEL_ELF)' -ex 'set arch riscv:rv64' -ex 'target remote localhost:1234'
.PHONY: build env kernel clean disasm disasm-vim run-inner switch-check fs-img gdbserver gdbclient

@ -1,56 +1,6 @@
use std::io::{Result, Write};
use std::fs::{File, read_dir};
static TARGET_PATH: &str = "../user/target/riscv64gc-unknown-none-elf/release/";
fn main() {
println!("cargo:rerun-if-changed=../user/src/");
println!("cargo:rerun-if-changed={}", TARGET_PATH);
insert_app_data().unwrap();
}
static TARGET_PATH: &str = "../user/target/riscv64gc-unknown-none-elf/release/";
fn insert_app_data() -> Result<()> {
let mut f = File::create("src/link_app.S").unwrap();
let mut apps: Vec<_> = read_dir("../user/src/bin")
.unwrap()
.into_iter()
.map(|dir_entry| {
let mut name_with_ext = dir_entry.unwrap().file_name().into_string().unwrap();
name_with_ext.drain(name_with_ext.find('.').unwrap()..name_with_ext.len());
name_with_ext
})
.collect();
apps.sort();
writeln!(f, r#"
.align 4
.section .data
.global _num_app
_num_app:
.quad {}"#, apps.len())?;
for i in 0..apps.len() {
writeln!(f, r#" .quad app_{}_start"#, i)?;
}
writeln!(f, r#" .quad app_{}_end"#, apps.len() - 1)?;
writeln!(f, r#"
.global _app_names
_app_names:"#)?;
for app in apps.iter() {
writeln!(f, r#" .string "{}\n""#, app)?;
}
for (idx, app) in apps.iter().enumerate() {
println!("app_{}: {}", idx, app);
writeln!(f, r#"
.section .data
.global app_{0}_start
.global app_{0}_end
.align 12
app_{0}_start:
.incbin "{2}{1}"
app_{0}_end:"#, idx, app, TARGET_PATH)?;
}
Ok(())
}

@ -0,0 +1,30 @@
pub const CLOCK_FREQ: usize = 403000000 / 62;
pub const MMIO: &[(usize, usize)] = &[
// we don't need clint in S priv when running
// we only need claim/complete for target0 after initializing
(0x0C00_0000, 0x3000), /* PLIC */
(0x0C20_0000, 0x1000), /* PLIC */
(0x3800_0000, 0x1000), /* UARTHS */
(0x3800_1000, 0x1000), /* GPIOHS */
(0x5020_0000, 0x1000), /* GPIO */
(0x5024_0000, 0x1000), /* SPI_SLAVE */
(0x502B_0000, 0x1000), /* FPIOA */
(0x502D_0000, 0x1000), /* TIMER0 */
(0x502E_0000, 0x1000), /* TIMER1 */
(0x502F_0000, 0x1000), /* TIMER2 */
(0x5044_0000, 0x1000), /* SYSCTL */
(0x5200_0000, 0x1000), /* SPI0 */
(0x5300_0000, 0x1000), /* SPI1 */
(0x5400_0000, 0x1000), /* SPI2 */
];
pub type BlockDeviceImpl = crate::drivers::block::SDCardWrapper;
pub fn device_init() {
unimplemented!();
}
pub fn irq_handler() {
unimplemented!();
}

@ -0,0 +1,126 @@
pub const CLOCK_FREQ: usize = 12500000;
pub const MMIO: &[(usize, usize)] = &[
(0x1000_0000, 0x1000), // VIRT_UART0 in virt machine
(0x1000_1000, 0x1000), // VIRT_VIRTIO in virt machine
(0x0C00_0000, 0x40_0000), // VIRT_PLIC in virt machine
(0x0010_0000, 0x00_2000), // VIRT_TEST/RTC in virt machine
];
pub type BlockDeviceImpl = crate::drivers::block::VirtIOBlock;
pub type CharDeviceImpl = crate::drivers::chardev::NS16550a<VIRT_UART>;
pub const VIRT_PLIC: usize = 0xC00_0000;
pub const VIRT_UART: usize = 0x1000_0000;
use crate::drivers::block::BLOCK_DEVICE;
use crate::drivers::chardev::{CharDevice, UART};
use crate::drivers::plic::{IntrTargetPriority, PLIC};
pub fn device_init() {
use riscv::register::sie;
let mut plic = unsafe { PLIC::new(VIRT_PLIC) };
let hart_id: usize = 0;
let supervisor = IntrTargetPriority::Supervisor;
let machine = IntrTargetPriority::Machine;
plic.set_threshold(hart_id, supervisor, 0);
plic.set_threshold(hart_id, machine, 1);
for intr_src_id in [1usize, 10] {
plic.enable(hart_id, supervisor, intr_src_id);
plic.set_priority(intr_src_id, 1);
}
unsafe {
sie::set_sext();
}
}
pub fn irq_handler() {
let mut plic = unsafe { PLIC::new(VIRT_PLIC) };
let intr_src_id = plic.claim(0, IntrTargetPriority::Supervisor);
match intr_src_id {
1 => BLOCK_DEVICE.handle_irq(),
10 => UART.handle_irq(),
_ => panic!("unsupported IRQ {}", intr_src_id),
}
plic.complete(0, IntrTargetPriority::Supervisor, intr_src_id);
}
//ref:: https://github.com/andre-richter/qemu-exit
use core::arch::asm;
const EXIT_SUCCESS: u32 = 0x5555; // Equals `exit(0)`. qemu successful exit
const EXIT_FAILURE_FLAG: u32 = 0x3333;
const EXIT_FAILURE: u32 = exit_code_encode(1); // Equals `exit(1)`. qemu failed exit
const EXIT_RESET: u32 = 0x7777; // qemu reset
pub trait QEMUExit {
/// Exit with specified return code.
///
/// Note: For `X86`, code is binary-OR'ed with `0x1` inside QEMU.
fn exit(&self, code: u32) -> !;
/// Exit QEMU using `EXIT_SUCCESS`, aka `0`, if possible.
///
/// Note: Not possible for `X86`.
fn exit_success(&self) -> !;
/// Exit QEMU using `EXIT_FAILURE`, aka `1`.
fn exit_failure(&self) -> !;
}
/// RISCV64 configuration
pub struct RISCV64 {
/// Address of the sifive_test mapped device.
addr: u64,
}
/// Encode the exit code using EXIT_FAILURE_FLAG.
const fn exit_code_encode(code: u32) -> u32 {
(code << 16) | EXIT_FAILURE_FLAG
}
impl RISCV64 {
/// Create an instance.
pub const fn new(addr: u64) -> Self {
RISCV64 { addr }
}
}
impl QEMUExit for RISCV64 {
/// Exit qemu with specified exit code.
fn exit(&self, code: u32) -> ! {
// If code is not a special value, we need to encode it with EXIT_FAILURE_FLAG.
let code_new = match code {
EXIT_SUCCESS | EXIT_FAILURE | EXIT_RESET => code,
_ => exit_code_encode(code),
};
unsafe {
asm!(
"sw {0}, 0({1})",
in(reg)code_new, in(reg)self.addr
);
// For the case that the QEMU exit attempt did not work, transition into an infinite
// loop. Calling `panic!()` here is unfeasible, since there is a good chance
// this function here is the last expression in the `panic!()` handler
// itself. This prevents a possible infinite loop.
loop {
asm!("wfi", options(nomem, nostack));
}
}
}
fn exit_success(&self) -> ! {
self.exit(EXIT_SUCCESS);
}
fn exit_failure(&self) -> ! {
self.exit(EXIT_FAILURE);
}
}
const VIRT_TEST: u64 = 0x100000;
pub const QEMU_EXIT_HANDLE: RISCV64 = RISCV64::new(VIRT_TEST);

@ -1,15 +1,13 @@
#[allow(unused)]
pub const USER_STACK_SIZE: usize = 4096 * 2;
pub const KERNEL_STACK_SIZE: usize = 4096 * 2;
pub const KERNEL_HEAP_SIZE: usize = 0x20_0000;
pub const MEMORY_END: usize = 0x80600000;
pub const MEMORY_END: usize = 0x80800000;
pub const PAGE_SIZE: usize = 0x1000;
pub const PAGE_SIZE_BITS: usize = 0xc;
pub const TRAMPOLINE: usize = usize::MAX - PAGE_SIZE + 1;
pub const TRAP_CONTEXT: usize = TRAMPOLINE - PAGE_SIZE;
#[cfg(feature = "board_k210")]
pub const CPU_FREQ: usize = 10000000;
pub const TRAP_CONTEXT_BASE: usize = TRAMPOLINE - PAGE_SIZE;
#[cfg(feature = "board_qemu")]
pub const CPU_FREQ: usize = 12500000;
pub use crate::board::{CLOCK_FREQ, MMIO};

@ -1,12 +1,12 @@
use crate::drivers::chardev::{CharDevice, UART};
use core::fmt::{self, Write};
use crate::sbi::console_putchar;
struct Stdout;
impl Write for Stdout {
fn write_str(&mut self, s: &str) -> fmt::Result {
for c in s.chars() {
console_putchar(c as usize);
UART.write(c as u8);
}
Ok(())
}
@ -19,15 +19,13 @@ pub fn print(args: fmt::Arguments) {
#[macro_export]
macro_rules! print {
($fmt: literal $(, $($arg: tt)+)?) => {
$crate::console::print(format_args!($fmt $(, $($arg)+)?));
$crate::console::print(format_args!($fmt $(, $($arg)+)?))
}
}
#[macro_export]
macro_rules! println {
($fmt: literal $(, $($arg: tt)+)?) => {
$crate::console::print(format_args!(concat!($fmt, "\n") $(, $($arg)+)?));
$crate::console::print(format_args!(concat!($fmt, "\n") $(, $($arg)+)?))
}
}

@ -0,0 +1,30 @@
mod sdcard;
mod virtio_blk;
pub use sdcard::SDCardWrapper;
pub use virtio_blk::VirtIOBlock;
use crate::board::BlockDeviceImpl;
use alloc::sync::Arc;
use easy_fs::BlockDevice;
use lazy_static::*;
lazy_static! {
pub static ref BLOCK_DEVICE: Arc<dyn BlockDevice> = Arc::new(BlockDeviceImpl::new());
}
#[allow(unused)]
pub fn block_device_test() {
let block_device = BLOCK_DEVICE.clone();
let mut write_buffer = [0u8; 512];
let mut read_buffer = [0u8; 512];
for i in 0..512 {
for byte in write_buffer.iter_mut() {
*byte = i as u8;
}
block_device.write_block(i as usize, &write_buffer);
block_device.read_block(i as usize, &mut read_buffer);
assert_eq!(write_buffer, read_buffer);
}
println!("block device test passed!");
}

@ -0,0 +1,767 @@
#![allow(non_snake_case)]
#![allow(non_camel_case_types)]
#![allow(unused)]
use super::BlockDevice;
use crate::sync::UPIntrFreeCell;
use core::convert::TryInto;
use k210_hal::prelude::*;
use k210_pac::{Peripherals, SPI0};
use k210_soc::{
fpioa::{self, io},
//dmac::{dma_channel, DMAC, DMACExt},
gpio,
gpiohs,
sleep::usleep,
spi::{aitm, frame_format, tmod, work_mode, SPIExt, SPIImpl, SPI},
sysctl,
};
use lazy_static::*;
pub struct SDCard<SPI> {
spi: SPI,
spi_cs: u32,
cs_gpionum: u8,
//dmac: &'a DMAC,
//channel: dma_channel,
}
/*
* Start Data tokens:
* Tokens (necessary because at nop/idle (and CS active) only 0xff is
* on the data/command line)
*/
/** Data token start byte, Start Single Block Read */
pub const SD_START_DATA_SINGLE_BLOCK_READ: u8 = 0xFE;
/** Data token start byte, Start Multiple Block Read */
pub const SD_START_DATA_MULTIPLE_BLOCK_READ: u8 = 0xFE;
/** Data token start byte, Start Single Block Write */
pub const SD_START_DATA_SINGLE_BLOCK_WRITE: u8 = 0xFE;
/** Data token start byte, Start Multiple Block Write */
pub const SD_START_DATA_MULTIPLE_BLOCK_WRITE: u8 = 0xFC;
pub const SEC_LEN: usize = 512;
/** SD commands */
#[repr(u8)]
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
#[allow(unused)]
pub enum CMD {
/** Software reset */
CMD0 = 0,
/** Check voltage range (SDC V2) */
CMD8 = 8,
/** Read CSD register */
CMD9 = 9,
/** Read CID register */
CMD10 = 10,
/** Stop to read data */
CMD12 = 12,
/** Change R/W block size */
CMD16 = 16,
/** Read block */
CMD17 = 17,
/** Read multiple blocks */
CMD18 = 18,
/** Number of blocks to erase (SDC) */
ACMD23 = 23,
/** Write a block */
CMD24 = 24,
/** Write multiple blocks */
CMD25 = 25,
/** Initiate initialization process (SDC) */
ACMD41 = 41,
/** Leading command for ACMD* */
CMD55 = 55,
/** Read OCR */
CMD58 = 58,
/** Enable/disable CRC check */
CMD59 = 59,
}
#[allow(unused)]
#[derive(Debug, Copy, Clone)]
pub enum InitError {
CMDFailed(CMD, u8),
CardCapacityStatusNotSet([u8; 4]),
CannotGetCardInfo,
}
/**
* Card Specific Data: CSD Register
*/
#[derive(Debug, Copy, Clone)]
pub struct SDCardCSD {
pub CSDStruct: u8, /* CSD structure */
pub SysSpecVersion: u8, /* System specification version */
pub Reserved1: u8, /* Reserved */
pub TAAC: u8, /* Data read access-time 1 */
pub NSAC: u8, /* Data read access-time 2 in CLK cycles */
pub MaxBusClkFrec: u8, /* Max. bus clock frequency */
pub CardComdClasses: u16, /* Card command classes */
pub RdBlockLen: u8, /* Max. read data block length */
pub PartBlockRead: u8, /* Partial blocks for read allowed */
pub WrBlockMisalign: u8, /* Write block misalignment */
pub RdBlockMisalign: u8, /* Read block misalignment */
pub DSRImpl: u8, /* DSR implemented */
pub Reserved2: u8, /* Reserved */
pub DeviceSize: u32, /* Device Size */
//MaxRdCurrentVDDMin: u8, /* Max. read current @ VDD min */
//MaxRdCurrentVDDMax: u8, /* Max. read current @ VDD max */
//MaxWrCurrentVDDMin: u8, /* Max. write current @ VDD min */
//MaxWrCurrentVDDMax: u8, /* Max. write current @ VDD max */
//DeviceSizeMul: u8, /* Device size multiplier */
pub EraseGrSize: u8, /* Erase group size */
pub EraseGrMul: u8, /* Erase group size multiplier */
pub WrProtectGrSize: u8, /* Write protect group size */
pub WrProtectGrEnable: u8, /* Write protect group enable */
pub ManDeflECC: u8, /* Manufacturer default ECC */
pub WrSpeedFact: u8, /* Write speed factor */
pub MaxWrBlockLen: u8, /* Max. write data block length */
pub WriteBlockPaPartial: u8, /* Partial blocks for write allowed */
pub Reserved3: u8, /* Reserded */
pub ContentProtectAppli: u8, /* Content protection application */
pub FileFormatGroup: u8, /* File format group */
pub CopyFlag: u8, /* Copy flag (OTP) */
pub PermWrProtect: u8, /* Permanent write protection */
pub TempWrProtect: u8, /* Temporary write protection */
pub FileFormat: u8, /* File Format */
pub ECC: u8, /* ECC code */
pub CSD_CRC: u8, /* CSD CRC */
pub Reserved4: u8, /* always 1*/
}
/**
* Card Identification Data: CID Register
*/
#[derive(Debug, Copy, Clone)]
pub struct SDCardCID {
pub ManufacturerID: u8, /* ManufacturerID */
pub OEM_AppliID: u16, /* OEM/Application ID */
pub ProdName1: u32, /* Product Name part1 */
pub ProdName2: u8, /* Product Name part2*/
pub ProdRev: u8, /* Product Revision */
pub ProdSN: u32, /* Product Serial Number */
pub Reserved1: u8, /* Reserved1 */
pub ManufactDate: u16, /* Manufacturing Date */
pub CID_CRC: u8, /* CID CRC */
pub Reserved2: u8, /* always 1 */
}
/**
* Card information
*/
#[derive(Debug, Copy, Clone)]
pub struct SDCardInfo {
pub SD_csd: SDCardCSD,
pub SD_cid: SDCardCID,
pub CardCapacity: u64, /* Card Capacity */
pub CardBlockSize: u64, /* Card Block Size */
}
impl</*'a,*/ X: SPI> SDCard</*'a,*/ X> {
pub fn new(
spi: X,
spi_cs: u32,
cs_gpionum: u8, /*, dmac: &'a DMAC, channel: dma_channel*/
) -> Self {
Self {
spi,
spi_cs,
cs_gpionum,
/*
dmac,
channel,
*/
}
}
fn CS_HIGH(&self) {
gpiohs::set_pin(self.cs_gpionum, true);
}
fn CS_LOW(&self) {
gpiohs::set_pin(self.cs_gpionum, false);
}
fn HIGH_SPEED_ENABLE(&self) {
self.spi.set_clk_rate(10000000);
}
fn lowlevel_init(&self) {
gpiohs::set_direction(self.cs_gpionum, gpio::direction::OUTPUT);
self.spi.set_clk_rate(200000);
}
fn write_data(&self, data: &[u8]) {
self.spi.configure(
work_mode::MODE0,
frame_format::STANDARD,
8, /* data bits */
0, /* endian */
0, /*instruction length*/
0, /*address length*/
0, /*wait cycles*/
aitm::STANDARD,
tmod::TRANS,
);
self.spi.send_data(self.spi_cs, data);
}
/*
fn write_data_dma(&self, data: &[u32]) {
self.spi.configure(
work_mode::MODE0,
frame_format::STANDARD,
8, /* data bits */
0, /* endian */
0, /*instruction length*/
0, /*address length*/
0, /*wait cycles*/
aitm::STANDARD,
tmod::TRANS,
);
self.spi
.send_data_dma(self.dmac, self.channel, self.spi_cs, data);
}
*/
fn read_data(&self, data: &mut [u8]) {
self.spi.configure(
work_mode::MODE0,
frame_format::STANDARD,
8, /* data bits */
0, /* endian */
0, /*instruction length*/
0, /*address length*/
0, /*wait cycles*/
aitm::STANDARD,
tmod::RECV,
);
self.spi.recv_data(self.spi_cs, data);
}
/*
fn read_data_dma(&self, data: &mut [u32]) {
self.spi.configure(
work_mode::MODE0,
frame_format::STANDARD,
8, /* data bits */
0, /* endian */
0, /*instruction length*/
0, /*address length*/
0, /*wait cycles*/
aitm::STANDARD,
tmod::RECV,
);
self.spi
.recv_data_dma(self.dmac, self.channel, self.spi_cs, data);
}
*/
/*
* Send 5 bytes command to the SD card.
* @param cmd: The user expected command to send to SD card.
* @param arg: The command argument.
* @param crc: The CRC.
* @retval None
*/
fn send_cmd(&self, cmd: CMD, arg: u32, crc: u8) {
/* SD chip select low */
self.CS_LOW();
/* Send the Cmd bytes */
self.write_data(&[
/* Construct byte 1 */
((cmd as u8) | 0x40),
/* Construct byte 2 */
(arg >> 24) as u8,
/* Construct byte 3 */
((arg >> 16) & 0xff) as u8,
/* Construct byte 4 */
((arg >> 8) & 0xff) as u8,
/* Construct byte 5 */
(arg & 0xff) as u8,
/* Construct CRC: byte 6 */
crc,
]);
}
/* Send end-command sequence to SD card */
fn end_cmd(&self) {
/* SD chip select high */
self.CS_HIGH();
/* Send the cmd byte */
self.write_data(&[0xff]);
}
/*
* Returns the SD response.
* @param None
* @retval The SD Response:
* - 0xFF: Sequence failed
* - 0: Sequence succeed
*/
fn get_response(&self) -> u8 {
let result = &mut [0u8];
let mut timeout = 0x0FFF;
/* Check if response is got or a timeout is happen */
while timeout != 0 {
self.read_data(result);
/* Right response got */
if result[0] != 0xFF {
return result[0];
}
timeout -= 1;
}
/* After time out */
0xFF
}
/*
* Get SD card data response.
* @param None
* @retval The SD status: Read data response xxx0<status>1
* - status 010: Data accepted
* - status 101: Data rejected due to a crc error
* - status 110: Data rejected due to a Write error.
* - status 111: Data rejected due to other error.
*/
fn get_dataresponse(&self) -> u8 {
let response = &mut [0u8];
/* Read response */
self.read_data(response);
/* Mask unused bits */
response[0] &= 0x1F;
if response[0] != 0x05 {
return 0xFF;
}
/* Wait null data */
self.read_data(response);
while response[0] == 0 {
self.read_data(response);
}
/* Return response */
0
}
/*
* Read the CSD card register
* Reading the contents of the CSD register in SPI mode is a simple
* read-block transaction.
* @param SD_csd: pointer on an SCD register structure
* @retval The SD Response:
* - `Err()`: Sequence failed
* - `Ok(info)`: Sequence succeed
*/
fn get_csdregister(&self) -> Result<SDCardCSD, ()> {
let mut csd_tab = [0u8; 18];
/* Send CMD9 (CSD register) */
self.send_cmd(CMD::CMD9, 0, 0);
/* Wait for response in the R1 format (0x00 is no errors) */
if self.get_response() != 0x00 {
self.end_cmd();
return Err(());
}
if self.get_response() != SD_START_DATA_SINGLE_BLOCK_READ {
self.end_cmd();
return Err(());
}
/* Store CSD register value on csd_tab */
/* Get CRC bytes (not really needed by us, but required by SD) */
self.read_data(&mut csd_tab);
self.end_cmd();
/* see also: https://cdn-shop.adafruit.com/datasheets/TS16GUSDHC6.pdf */
Ok(SDCardCSD {
/* Byte 0 */
CSDStruct: (csd_tab[0] & 0xC0) >> 6,
SysSpecVersion: (csd_tab[0] & 0x3C) >> 2,
Reserved1: csd_tab[0] & 0x03,
/* Byte 1 */
TAAC: csd_tab[1],
/* Byte 2 */
NSAC: csd_tab[2],
/* Byte 3 */
MaxBusClkFrec: csd_tab[3],
/* Byte 4, 5 */
CardComdClasses: (u16::from(csd_tab[4]) << 4) | ((u16::from(csd_tab[5]) & 0xF0) >> 4),
/* Byte 5 */
RdBlockLen: csd_tab[5] & 0x0F,
/* Byte 6 */
PartBlockRead: (csd_tab[6] & 0x80) >> 7,
WrBlockMisalign: (csd_tab[6] & 0x40) >> 6,
RdBlockMisalign: (csd_tab[6] & 0x20) >> 5,
DSRImpl: (csd_tab[6] & 0x10) >> 4,
Reserved2: 0,
// DeviceSize: (csd_tab[6] & 0x03) << 10,
/* Byte 7, 8, 9 */
DeviceSize: ((u32::from(csd_tab[7]) & 0x3F) << 16)
| (u32::from(csd_tab[8]) << 8)
| u32::from(csd_tab[9]),
/* Byte 10 */
EraseGrSize: (csd_tab[10] & 0x40) >> 6,
/* Byte 10, 11 */
EraseGrMul: ((csd_tab[10] & 0x3F) << 1) | ((csd_tab[11] & 0x80) >> 7),
/* Byte 11 */
WrProtectGrSize: (csd_tab[11] & 0x7F),
/* Byte 12 */
WrProtectGrEnable: (csd_tab[12] & 0x80) >> 7,
ManDeflECC: (csd_tab[12] & 0x60) >> 5,
WrSpeedFact: (csd_tab[12] & 0x1C) >> 2,
/* Byte 12,13 */
MaxWrBlockLen: ((csd_tab[12] & 0x03) << 2) | ((csd_tab[13] & 0xC0) >> 6),
/* Byte 13 */
WriteBlockPaPartial: (csd_tab[13] & 0x20) >> 5,
Reserved3: 0,
ContentProtectAppli: (csd_tab[13] & 0x01),
/* Byte 14 */
FileFormatGroup: (csd_tab[14] & 0x80) >> 7,
CopyFlag: (csd_tab[14] & 0x40) >> 6,
PermWrProtect: (csd_tab[14] & 0x20) >> 5,
TempWrProtect: (csd_tab[14] & 0x10) >> 4,
FileFormat: (csd_tab[14] & 0x0C) >> 2,
ECC: (csd_tab[14] & 0x03),
/* Byte 15 */
CSD_CRC: (csd_tab[15] & 0xFE) >> 1,
Reserved4: 1,
/* Return the response */
})
}
/*
* Read the CID card register.
* Reading the contents of the CID register in SPI mode is a simple
* read-block transaction.
* @param SD_cid: pointer on an CID register structure
* @retval The SD Response:
* - `Err()`: Sequence failed
* - `Ok(info)`: Sequence succeed
*/
fn get_cidregister(&self) -> Result<SDCardCID, ()> {
let mut cid_tab = [0u8; 18];
/* Send CMD10 (CID register) */
self.send_cmd(CMD::CMD10, 0, 0);
/* Wait for response in the R1 format (0x00 is no errors) */
if self.get_response() != 0x00 {
self.end_cmd();
return Err(());
}
if self.get_response() != SD_START_DATA_SINGLE_BLOCK_READ {
self.end_cmd();
return Err(());
}
/* Store CID register value on cid_tab */
/* Get CRC bytes (not really needed by us, but required by SD) */
self.read_data(&mut cid_tab);
self.end_cmd();
Ok(SDCardCID {
/* Byte 0 */
ManufacturerID: cid_tab[0],
/* Byte 1, 2 */
OEM_AppliID: (u16::from(cid_tab[1]) << 8) | u16::from(cid_tab[2]),
/* Byte 3, 4, 5, 6 */
ProdName1: (u32::from(cid_tab[3]) << 24)
| (u32::from(cid_tab[4]) << 16)
| (u32::from(cid_tab[5]) << 8)
| u32::from(cid_tab[6]),
/* Byte 7 */
ProdName2: cid_tab[7],
/* Byte 8 */
ProdRev: cid_tab[8],
/* Byte 9, 10, 11, 12 */
ProdSN: (u32::from(cid_tab[9]) << 24)
| (u32::from(cid_tab[10]) << 16)
| (u32::from(cid_tab[11]) << 8)
| u32::from(cid_tab[12]),
/* Byte 13, 14 */
Reserved1: (cid_tab[13] & 0xF0) >> 4,
ManufactDate: ((u16::from(cid_tab[13]) & 0x0F) << 8) | u16::from(cid_tab[14]),
/* Byte 15 */
CID_CRC: (cid_tab[15] & 0xFE) >> 1,
Reserved2: 1,
})
}
/*
* Returns information about specific card.
* @param cardinfo: pointer to a SD_CardInfo structure that contains all SD
* card information.
* @retval The SD Response:
* - `Err(())`: Sequence failed
* - `Ok(info)`: Sequence succeed
*/
fn get_cardinfo(&self) -> Result<SDCardInfo, ()> {
let mut info = SDCardInfo {
SD_csd: self.get_csdregister()?,
SD_cid: self.get_cidregister()?,
CardCapacity: 0,
CardBlockSize: 0,
};
info.CardBlockSize = 1 << u64::from(info.SD_csd.RdBlockLen);
info.CardCapacity = (u64::from(info.SD_csd.DeviceSize) + 1) * 1024 * info.CardBlockSize;
Ok(info)
}
/*
* Initializes the SD/SD communication in SPI mode.
* @param None
* @retval The SD Response info if succeeeded, otherwise Err
*/
pub fn init(&self) -> Result<SDCardInfo, InitError> {
/* Initialize SD_SPI */
self.lowlevel_init();
/* SD chip select high */
self.CS_HIGH();
/* NOTE: this reset doesn't always seem to work if the SD access was broken off in the
* middle of an operation: CMDFailed(CMD0, 127). */
/* Send dummy byte 0xFF, 10 times with CS high */
/* Rise CS and MOSI for 80 clocks cycles */
/* Send dummy byte 0xFF */
self.write_data(&[0xff; 10]);
/*------------Put SD in SPI mode--------------*/
/* SD initialized and set to SPI mode properly */
/* Send software reset */
self.send_cmd(CMD::CMD0, 0, 0x95);
let result = self.get_response();
self.end_cmd();
if result != 0x01 {
return Err(InitError::CMDFailed(CMD::CMD0, result));
}
/* Check voltage range */
self.send_cmd(CMD::CMD8, 0x01AA, 0x87);
/* 0x01 or 0x05 */
let result = self.get_response();
let mut frame = [0u8; 4];
self.read_data(&mut frame);
self.end_cmd();
if result != 0x01 {
return Err(InitError::CMDFailed(CMD::CMD8, result));
}
let mut index = 255;
while index != 0 {
/* <ACMD> */
self.send_cmd(CMD::CMD55, 0, 0);
let result = self.get_response();
self.end_cmd();
if result != 0x01 {
return Err(InitError::CMDFailed(CMD::CMD55, result));
}
/* Initiate SDC initialization process */
self.send_cmd(CMD::ACMD41, 0x40000000, 0);
let result = self.get_response();
self.end_cmd();
if result == 0x00 {
break;
}
index -= 1;
}
if index == 0 {
return Err(InitError::CMDFailed(CMD::ACMD41, result));
}
index = 255;
let mut frame = [0u8; 4];
while index != 0 {
/* Read OCR */
self.send_cmd(CMD::CMD58, 0, 1);
let result = self.get_response();
self.read_data(&mut frame);
self.end_cmd();
if result == 0 {
break;
}
index -= 1;
}
if index == 0 {
return Err(InitError::CMDFailed(CMD::CMD58, result));
}
if (frame[0] & 0x40) == 0 {
return Err(InitError::CardCapacityStatusNotSet(frame));
}
self.HIGH_SPEED_ENABLE();
self.get_cardinfo()
.map_err(|_| InitError::CannotGetCardInfo)
}
/*
* Reads a block of data from the SD.
* @param data_buf: slice that receives the data read from the SD.
* @param sector: SD's internal address to read from.
* @retval The SD Response:
* - `Err(())`: Sequence failed
* - `Ok(())`: Sequence succeed
*/
pub fn read_sector(&self, data_buf: &mut [u8], sector: u32) -> Result<(), ()> {
assert!(data_buf.len() >= SEC_LEN && (data_buf.len() % SEC_LEN) == 0);
/* Send CMD17 to read one block, or CMD18 for multiple */
let flag = if data_buf.len() == SEC_LEN {
self.send_cmd(CMD::CMD17, sector, 0);
false
} else {
self.send_cmd(CMD::CMD18, sector, 0);
true
};
/* Check if the SD acknowledged the read block command: R1 response (0x00: no errors) */
if self.get_response() != 0x00 {
self.end_cmd();
return Err(());
}
let mut error = false;
//let mut dma_chunk = [0u32; SEC_LEN];
let mut tmp_chunk = [0u8; SEC_LEN];
for chunk in data_buf.chunks_mut(SEC_LEN) {
if self.get_response() != SD_START_DATA_SINGLE_BLOCK_READ {
error = true;
break;
}
/* Read the SD block data : read NumByteToRead data */
//self.read_data_dma(&mut dma_chunk);
self.read_data(&mut tmp_chunk);
/* Place the data received as u32 units from DMA into the u8 target buffer */
for (a, b) in chunk.iter_mut().zip(/*dma_chunk*/ tmp_chunk.iter()) {
//*a = (b & 0xff) as u8;
*a = *b;
}
/* Get CRC bytes (not really needed by us, but required by SD) */
let mut frame = [0u8; 2];
self.read_data(&mut frame);
}
self.end_cmd();
if flag {
self.send_cmd(CMD::CMD12, 0, 0);
self.get_response();
self.end_cmd();
self.end_cmd();
}
/* It is an error if not everything requested was read */
if error {
Err(())
} else {
Ok(())
}
}
/*
* Writes a block to the SD
* @param data_buf: slice containing the data to be written to the SD.
* @param sector: address to write on.
* @retval The SD Response:
* - `Err(())`: Sequence failed
* - `Ok(())`: Sequence succeed
*/
pub fn write_sector(&self, data_buf: &[u8], sector: u32) -> Result<(), ()> {
assert!(data_buf.len() >= SEC_LEN && (data_buf.len() % SEC_LEN) == 0);
let mut frame = [0xff, 0x00];
if data_buf.len() == SEC_LEN {
frame[1] = SD_START_DATA_SINGLE_BLOCK_WRITE;
self.send_cmd(CMD::CMD24, sector, 0);
} else {
frame[1] = SD_START_DATA_MULTIPLE_BLOCK_WRITE;
self.send_cmd(
CMD::ACMD23,
(data_buf.len() / SEC_LEN).try_into().unwrap(),
0,
);
self.get_response();
self.end_cmd();
self.send_cmd(CMD::CMD25, sector, 0);
}
/* Check if the SD acknowledged the write block command: R1 response (0x00: no errors) */
if self.get_response() != 0x00 {
self.end_cmd();
return Err(());
}
//let mut dma_chunk = [0u32; SEC_LEN];
let mut tmp_chunk = [0u8; SEC_LEN];
for chunk in data_buf.chunks(SEC_LEN) {
/* Send the data token to signify the start of the data */
self.write_data(&frame);
/* Write the block data to SD : write count data by block */
for (a, &b) in /*dma_chunk*/ tmp_chunk.iter_mut().zip(chunk.iter()) {
//*a = b.into();
*a = b;
}
//self.write_data_dma(&mut dma_chunk);
self.write_data(&tmp_chunk);
/* Put dummy CRC bytes */
self.write_data(&[0xff, 0xff]);
/* Read data response */
if self.get_dataresponse() != 0x00 {
self.end_cmd();
return Err(());
}
}
self.end_cmd();
self.end_cmd();
Ok(())
}
}
/** GPIOHS GPIO number to use for controlling the SD card CS pin */
const SD_CS_GPIONUM: u8 = 7;
/** CS value passed to SPI controller, this is a dummy value as SPI0_CS3 is not mapping to anything
* in the FPIOA */
const SD_CS: u32 = 3;
/** Connect pins to internal functions */
fn io_init() {
fpioa::set_function(io::SPI0_SCLK, fpioa::function::SPI0_SCLK);
fpioa::set_function(io::SPI0_MOSI, fpioa::function::SPI0_D0);
fpioa::set_function(io::SPI0_MISO, fpioa::function::SPI0_D1);
fpioa::set_function(io::SPI0_CS0, fpioa::function::gpiohs(SD_CS_GPIONUM));
fpioa::set_io_pull(io::SPI0_CS0, fpioa::pull::DOWN); // GPIO output=pull down
}
lazy_static! {
static ref PERIPHERALS: UPIntrFreeCell<Peripherals> =
unsafe { UPIntrFreeCell::new(Peripherals::take().unwrap()) };
}
fn init_sdcard() -> SDCard<SPIImpl<SPI0>> {
// wait previous output
usleep(100000);
let peripherals = unsafe { Peripherals::steal() };
sysctl::pll_set_freq(sysctl::pll::PLL0, 800_000_000).unwrap();
sysctl::pll_set_freq(sysctl::pll::PLL1, 300_000_000).unwrap();
sysctl::pll_set_freq(sysctl::pll::PLL2, 45_158_400).unwrap();
let clocks = k210_hal::clock::Clocks::new();
peripherals.UARTHS.configure(115_200.bps(), &clocks);
io_init();
let spi = peripherals.SPI0.constrain();
let sd = SDCard::new(spi, SD_CS, SD_CS_GPIONUM);
let info = sd.init().unwrap();
let num_sectors = info.CardCapacity / 512;
assert!(num_sectors > 0);
println!("init sdcard!");
sd
}
pub struct SDCardWrapper(UPIntrFreeCell<SDCard<SPIImpl<SPI0>>>);
impl SDCardWrapper {
pub fn new() -> Self {
unsafe { Self(UPIntrFreeCell::new(init_sdcard())) }
}
}
impl BlockDevice for SDCardWrapper {
fn read_block(&self, block_id: usize, buf: &mut [u8]) {
self.0
.exclusive_access()
.read_sector(buf, block_id as u32)
.unwrap();
}
fn write_block(&self, block_id: usize, buf: &[u8]) {
self.0
.exclusive_access()
.write_sector(buf, block_id as u32)
.unwrap();
}
fn handle_irq(&self) {
unimplemented!();
}
}

@ -0,0 +1,131 @@
use super::BlockDevice;
use crate::mm::{
frame_alloc, frame_dealloc, kernel_token, FrameTracker, PageTable, PhysAddr, PhysPageNum,
StepByOne, VirtAddr,
};
use crate::sync::{Condvar, UPIntrFreeCell};
use crate::task::schedule;
use crate::DEV_NON_BLOCKING_ACCESS;
use alloc::collections::BTreeMap;
use alloc::vec::Vec;
use lazy_static::*;
use virtio_drivers::{BlkResp, RespStatus, VirtIOBlk, VirtIOHeader};
#[allow(unused)]
const VIRTIO0: usize = 0x10001000;
pub struct VirtIOBlock {
virtio_blk: UPIntrFreeCell<VirtIOBlk<'static>>,
condvars: BTreeMap<u16, Condvar>,
}
lazy_static! {
static ref QUEUE_FRAMES: UPIntrFreeCell<Vec<FrameTracker>> =
unsafe { UPIntrFreeCell::new(Vec::new()) };
}
impl BlockDevice for VirtIOBlock {
fn read_block(&self, block_id: usize, buf: &mut [u8]) {
let nb = *DEV_NON_BLOCKING_ACCESS.exclusive_access();
if nb {
let mut resp = BlkResp::default();
let task_cx_ptr = self.virtio_blk.exclusive_session(|blk| {
let token = unsafe { blk.read_block_nb(block_id, buf, &mut resp).unwrap() };
self.condvars.get(&token).unwrap().wait_no_sched()
});
schedule(task_cx_ptr);
assert_eq!(
resp.status(),
RespStatus::Ok,
"Error when reading VirtIOBlk"
);
} else {
self.virtio_blk
.exclusive_access()
.read_block(block_id, buf)
.expect("Error when reading VirtIOBlk");
}
}
fn write_block(&self, block_id: usize, buf: &[u8]) {
let nb = *DEV_NON_BLOCKING_ACCESS.exclusive_access();
if nb {
let mut resp = BlkResp::default();
let task_cx_ptr = self.virtio_blk.exclusive_session(|blk| {
let token = unsafe { blk.write_block_nb(block_id, buf, &mut resp).unwrap() };
self.condvars.get(&token).unwrap().wait_no_sched()
});
schedule(task_cx_ptr);
assert_eq!(
resp.status(),
RespStatus::Ok,
"Error when writing VirtIOBlk"
);
} else {
self.virtio_blk
.exclusive_access()
.write_block(block_id, buf)
.expect("Error when writing VirtIOBlk");
}
}
fn handle_irq(&self) {
self.virtio_blk.exclusive_session(|blk| {
while let Ok(token) = blk.pop_used() {
self.condvars.get(&token).unwrap().signal();
}
});
}
}
impl VirtIOBlock {
pub fn new() -> Self {
let virtio_blk = unsafe {
UPIntrFreeCell::new(VirtIOBlk::new(&mut *(VIRTIO0 as *mut VirtIOHeader)).unwrap())
};
let mut condvars = BTreeMap::new();
let channels = virtio_blk.exclusive_access().virt_queue_size();
for i in 0..channels {
let condvar = Condvar::new();
condvars.insert(i, condvar);
}
Self {
virtio_blk,
condvars,
}
}
}
#[no_mangle]
pub extern "C" fn virtio_dma_alloc(pages: usize) -> PhysAddr {
let mut ppn_base = PhysPageNum(0);
for i in 0..pages {
let frame = frame_alloc().unwrap();
if i == 0 {
ppn_base = frame.ppn;
}
assert_eq!(frame.ppn.0, ppn_base.0 + i);
QUEUE_FRAMES.exclusive_access().push(frame);
}
ppn_base.into()
}
#[no_mangle]
pub extern "C" fn virtio_dma_dealloc(pa: PhysAddr, pages: usize) -> i32 {
let mut ppn_base: PhysPageNum = pa.into();
for _ in 0..pages {
frame_dealloc(ppn_base);
ppn_base.step();
}
0
}
#[no_mangle]
pub extern "C" fn virtio_phys_to_virt(paddr: PhysAddr) -> VirtAddr {
VirtAddr(paddr.0)
}
#[no_mangle]
pub extern "C" fn virtio_virt_to_phys(vaddr: VirtAddr) -> PhysAddr {
PageTable::from_token(kernel_token())
.translate_va(vaddr)
.unwrap()
}

@ -0,0 +1,17 @@
mod ns16550a;
pub use ns16550a::NS16550a;
use crate::board::CharDeviceImpl;
use alloc::sync::Arc;
use lazy_static::*;
pub trait CharDevice {
fn read(&self) -> u8;
fn write(&self, ch: u8);
fn handle_irq(&self);
}
lazy_static! {
pub static ref UART: Arc<CharDeviceImpl> = Arc::new(CharDeviceImpl::new());
}

@ -0,0 +1,175 @@
///! Ref: https://www.lammertbies.nl/comm/info/serial-uart
///! Ref: ns16550a datasheet: https://datasheetspdf.com/pdf-file/605590/NationalSemiconductor/NS16550A/1
///! Ref: ns16450 datasheet: https://datasheetspdf.com/pdf-file/1311818/NationalSemiconductor/NS16450/1
use super::CharDevice;
use crate::sync::{Condvar, UPIntrFreeCell};
use crate::task::schedule;
use alloc::collections::VecDeque;
use bitflags::*;
use volatile::{ReadOnly, Volatile, WriteOnly};
bitflags! {
/// InterruptEnableRegister
pub struct IER: u8 {
const RX_AVAILABLE = 1 << 0;
const TX_EMPTY = 1 << 1;
}
/// LineStatusRegister
pub struct LSR: u8 {
const DATA_AVAILABLE = 1 << 0;
const THR_EMPTY = 1 << 5;
}
/// Model Control Register
pub struct MCR: u8 {
const DATA_TERMINAL_READY = 1 << 0;
const REQUEST_TO_SEND = 1 << 1;
const AUX_OUTPUT1 = 1 << 2;
const AUX_OUTPUT2 = 1 << 3;
}
}
#[repr(C)]
#[allow(dead_code)]
struct ReadWithoutDLAB {
/// receiver buffer register
pub rbr: ReadOnly<u8>,
/// interrupt enable register
pub ier: Volatile<IER>,
/// interrupt identification register
pub iir: ReadOnly<u8>,
/// line control register
pub lcr: Volatile<u8>,
/// model control register
pub mcr: Volatile<MCR>,
/// line status register
pub lsr: ReadOnly<LSR>,
/// ignore MSR
_padding1: ReadOnly<u8>,
/// ignore SCR
_padding2: ReadOnly<u8>,
}
#[repr(C)]
#[allow(dead_code)]
struct WriteWithoutDLAB {
/// transmitter holding register
pub thr: WriteOnly<u8>,
/// interrupt enable register
pub ier: Volatile<IER>,
/// ignore FCR
_padding0: ReadOnly<u8>,
/// line control register
pub lcr: Volatile<u8>,
/// modem control register
pub mcr: Volatile<MCR>,
/// line status register
pub lsr: ReadOnly<LSR>,
/// ignore other registers
_padding1: ReadOnly<u16>,
}
pub struct NS16550aRaw {
base_addr: usize,
}
impl NS16550aRaw {
fn read_end(&mut self) -> &mut ReadWithoutDLAB {
unsafe { &mut *(self.base_addr as *mut ReadWithoutDLAB) }
}
fn write_end(&mut self) -> &mut WriteWithoutDLAB {
unsafe { &mut *(self.base_addr as *mut WriteWithoutDLAB) }
}
pub fn new(base_addr: usize) -> Self {
Self { base_addr }
}
pub fn init(&mut self) {
let read_end = self.read_end();
let mut mcr = MCR::empty();
mcr |= MCR::DATA_TERMINAL_READY;
mcr |= MCR::REQUEST_TO_SEND;
mcr |= MCR::AUX_OUTPUT2;
read_end.mcr.write(mcr);
let ier = IER::RX_AVAILABLE;
read_end.ier.write(ier);
}
pub fn read(&mut self) -> Option<u8> {
let read_end = self.read_end();
let lsr = read_end.lsr.read();
if lsr.contains(LSR::DATA_AVAILABLE) {
Some(read_end.rbr.read())
} else {
None
}
}
pub fn write(&mut self, ch: u8) {
let write_end = self.write_end();
loop {
if write_end.lsr.read().contains(LSR::THR_EMPTY) {
write_end.thr.write(ch);
break;
}
}
}
}
struct NS16550aInner {
ns16550a: NS16550aRaw,
read_buffer: VecDeque<u8>,
}
pub struct NS16550a<const BASE_ADDR: usize> {
inner: UPIntrFreeCell<NS16550aInner>,
condvar: Condvar,
}
impl<const BASE_ADDR: usize> NS16550a<BASE_ADDR> {
pub fn new() -> Self {
let mut inner = NS16550aInner {
ns16550a: NS16550aRaw::new(BASE_ADDR),
read_buffer: VecDeque::new(),
};
inner.ns16550a.init();
Self {
inner: unsafe { UPIntrFreeCell::new(inner) },
condvar: Condvar::new(),
}
}
}
impl<const BASE_ADDR: usize> CharDevice for NS16550a<BASE_ADDR> {
fn read(&self) -> u8 {
loop {
let mut inner = self.inner.exclusive_access();
if let Some(ch) = inner.read_buffer.pop_front() {
return ch;
} else {
let task_cx_ptr = self.condvar.wait_no_sched();
drop(inner);
schedule(task_cx_ptr);
}
}
}
fn write(&self, ch: u8) {
let mut inner = self.inner.exclusive_access();
inner.ns16550a.write(ch);
}
fn handle_irq(&self) {
let mut count = 0;
self.inner.exclusive_session(|inner| {
while let Some(ch) = inner.ns16550a.read() {
count += 1;
inner.read_buffer.push_back(ch);
}
});
if count > 0 {
self.condvar.signal();
}
}
}

@ -0,0 +1,6 @@
pub mod block;
pub mod chardev;
pub mod plic;
pub use block::BLOCK_DEVICE;
pub use chardev::UART;

@ -0,0 +1,124 @@
#[allow(clippy::upper_case_acronyms)]
pub struct PLIC {
base_addr: usize,
}
#[derive(Copy, Clone)]
pub enum IntrTargetPriority {
Machine = 0,
Supervisor = 1,
}
impl IntrTargetPriority {
pub fn supported_number() -> usize {
2
}
}
impl PLIC {
fn priority_ptr(&self, intr_source_id: usize) -> *mut u32 {
assert!(intr_source_id > 0 && intr_source_id <= 132);
(self.base_addr + intr_source_id * 4) as *mut u32
}
fn hart_id_with_priority(hart_id: usize, target_priority: IntrTargetPriority) -> usize {
let priority_num = IntrTargetPriority::supported_number();
hart_id * priority_num + target_priority as usize
}
fn enable_ptr(
&self,
hart_id: usize,
target_priority: IntrTargetPriority,
intr_source_id: usize,
) -> (*mut u32, usize) {
let id = Self::hart_id_with_priority(hart_id, target_priority);
let (reg_id, reg_shift) = (intr_source_id / 32, intr_source_id % 32);
(
(self.base_addr + 0x2000 + 0x80 * id + 0x4 * reg_id) as *mut u32,
reg_shift,
)
}
fn threshold_ptr_of_hart_with_priority(
&self,
hart_id: usize,
target_priority: IntrTargetPriority,
) -> *mut u32 {
let id = Self::hart_id_with_priority(hart_id, target_priority);
(self.base_addr + 0x20_0000 + 0x1000 * id) as *mut u32
}
fn claim_comp_ptr_of_hart_with_priority(
&self,
hart_id: usize,
target_priority: IntrTargetPriority,
) -> *mut u32 {
let id = Self::hart_id_with_priority(hart_id, target_priority);
(self.base_addr + 0x20_0004 + 0x1000 * id) as *mut u32
}
pub unsafe fn new(base_addr: usize) -> Self {
Self { base_addr }
}
pub fn set_priority(&mut self, intr_source_id: usize, priority: u32) {
assert!(priority < 8);
unsafe {
self.priority_ptr(intr_source_id).write_volatile(priority);
}
}
#[allow(unused)]
pub fn get_priority(&mut self, intr_source_id: usize) -> u32 {
unsafe { self.priority_ptr(intr_source_id).read_volatile() & 7 }
}
pub fn enable(
&mut self,
hart_id: usize,
target_priority: IntrTargetPriority,
intr_source_id: usize,
) {
let (reg_ptr, shift) = self.enable_ptr(hart_id, target_priority, intr_source_id);
unsafe {
reg_ptr.write_volatile(reg_ptr.read_volatile() | 1 << shift);
}
}
#[allow(unused)]
pub fn disable(
&mut self,
hart_id: usize,
target_priority: IntrTargetPriority,
intr_source_id: usize,
) {
let (reg_ptr, shift) = self.enable_ptr(hart_id, target_priority, intr_source_id);
unsafe {
reg_ptr.write_volatile(reg_ptr.read_volatile() & (!(1u32 << shift)));
}
}
pub fn set_threshold(
&mut self,
hart_id: usize,
target_priority: IntrTargetPriority,
threshold: u32,
) {
assert!(threshold < 8);
let threshold_ptr = self.threshold_ptr_of_hart_with_priority(hart_id, target_priority);
unsafe {
threshold_ptr.write_volatile(threshold);
}
}
#[allow(unused)]
pub fn get_threshold(&mut self, hart_id: usize, target_priority: IntrTargetPriority) -> u32 {
let threshold_ptr = self.threshold_ptr_of_hart_with_priority(hart_id, target_priority);
unsafe { threshold_ptr.read_volatile() & 7 }
}
pub fn claim(&mut self, hart_id: usize, target_priority: IntrTargetPriority) -> u32 {
let claim_comp_ptr = self.claim_comp_ptr_of_hart_with_priority(hart_id, target_priority);
unsafe { claim_comp_ptr.read_volatile() }
}
pub fn complete(
&mut self,
hart_id: usize,
target_priority: IntrTargetPriority,
completion: u32,
) {
let claim_comp_ptr = self.claim_comp_ptr_of_hart_with_priority(hart_id, target_priority);
unsafe {
claim_comp_ptr.write_volatile(completion);
}
}
}

@ -9,4 +9,4 @@ _start:
boot_stack:
.space 4096 * 16
.globl boot_stack_top
boot_stack_top:
boot_stack_top:

@ -0,0 +1,139 @@
use super::File;
use crate::drivers::BLOCK_DEVICE;
use crate::mm::UserBuffer;
use crate::sync::UPIntrFreeCell;
use alloc::sync::Arc;
use alloc::vec::Vec;
use bitflags::*;
use easy_fs::{EasyFileSystem, Inode};
use lazy_static::*;
pub struct OSInode {
readable: bool,
writable: bool,
inner: UPIntrFreeCell<OSInodeInner>,
}
pub struct OSInodeInner {
offset: usize,
inode: Arc<Inode>,
}
impl OSInode {
pub fn new(readable: bool, writable: bool, inode: Arc<Inode>) -> Self {
Self {
readable,
writable,
inner: unsafe { UPIntrFreeCell::new(OSInodeInner { offset: 0, inode }) },
}
}
pub fn read_all(&self) -> Vec<u8> {
let mut inner = self.inner.exclusive_access();
let mut buffer = [0u8; 512];
let mut v: Vec<u8> = Vec::new();
loop {
let len = inner.inode.read_at(inner.offset, &mut buffer);
if len == 0 {
break;
}
inner.offset += len;
v.extend_from_slice(&buffer[..len]);
}
v
}
}
lazy_static! {
pub static ref ROOT_INODE: Arc<Inode> = {
let efs = EasyFileSystem::open(BLOCK_DEVICE.clone());
Arc::new(EasyFileSystem::root_inode(&efs))
};
}
pub fn list_apps() {
println!("/**** APPS ****");
for app in ROOT_INODE.ls() {
println!("{}", app);
}
println!("**************/")
}
bitflags! {
pub struct OpenFlags: u32 {
const RDONLY = 0;
const WRONLY = 1 << 0;
const RDWR = 1 << 1;
const CREATE = 1 << 9;
const TRUNC = 1 << 10;
}
}
impl OpenFlags {
/// Do not check validity for simplicity
/// Return (readable, writable)
pub fn read_write(&self) -> (bool, bool) {
if self.is_empty() {
(true, false)
} else if self.contains(Self::WRONLY) {
(false, true)
} else {
(true, true)
}
}
}
pub fn open_file(name: &str, flags: OpenFlags) -> Option<Arc<OSInode>> {
let (readable, writable) = flags.read_write();
if flags.contains(OpenFlags::CREATE) {
if let Some(inode) = ROOT_INODE.find(name) {
// clear size
inode.clear();
Some(Arc::new(OSInode::new(readable, writable, inode)))
} else {
// create file
ROOT_INODE
.create(name)
.map(|inode| Arc::new(OSInode::new(readable, writable, inode)))
}
} else {
ROOT_INODE.find(name).map(|inode| {
if flags.contains(OpenFlags::TRUNC) {
inode.clear();
}
Arc::new(OSInode::new(readable, writable, inode))
})
}
}
impl File for OSInode {
fn readable(&self) -> bool {
self.readable
}
fn writable(&self) -> bool {
self.writable
}
fn read(&self, mut buf: UserBuffer) -> usize {
let mut inner = self.inner.exclusive_access();
let mut total_read_size = 0usize;
for slice in buf.buffers.iter_mut() {
let read_size = inner.inode.read_at(inner.offset, *slice);
if read_size == 0 {
break;
}
inner.offset += read_size;
total_read_size += read_size;
}
total_read_size
}
fn write(&self, buf: UserBuffer) -> usize {
let mut inner = self.inner.exclusive_access();
let mut total_write_size = 0usize;
for slice in buf.buffers.iter() {
let write_size = inner.inode.write_at(inner.offset, *slice);
assert_eq!(write_size, slice.len());
inner.offset += write_size;
total_write_size += write_size;
}
total_write_size
}
}

@ -1,21 +1,16 @@
mod inode;
mod pipe;
mod stdio;
use crate::mm::UserBuffer;
use core::any::Any;
pub trait File : Any + Send + Sync {
pub trait File: Send + Sync {
fn readable(&self) -> bool;
fn writable(&self) -> bool;
fn read(&self, buf: UserBuffer) -> usize;
fn write(&self, buf: UserBuffer) -> usize;
fn as_any_ref(&self) -> &dyn Any;
}
impl dyn File {
#[allow(unused)]
pub fn downcast_ref<T: File>(&self) -> Option<&T> {
self.as_any_ref().downcast_ref::<T>()
}
}
pub use pipe::{Pipe, make_pipe};
pub use stdio::{Stdin, Stdout};
pub use inode::{list_apps, open_file, OSInode, OpenFlags};
pub use pipe::{make_pipe, Pipe};
pub use stdio::{Stdin, Stdout};

@ -1,27 +1,25 @@
use super::File;
use crate::mm::UserBuffer;
use crate::sync::UPIntrFreeCell;
use alloc::sync::{Arc, Weak};
use spin::Mutex;
use crate::mm::{
UserBuffer,
};
use crate::task::suspend_current_and_run_next;
use core::any::Any;
pub struct Pipe {
readable: bool,
writable: bool,
buffer: Arc<Mutex<PipeRingBuffer>>,
buffer: Arc<UPIntrFreeCell<PipeRingBuffer>>,
}
impl Pipe {
pub fn read_end_with_buffer(buffer: Arc<Mutex<PipeRingBuffer>>) -> Self {
pub fn read_end_with_buffer(buffer: Arc<UPIntrFreeCell<PipeRingBuffer>>) -> Self {
Self {
readable: true,
writable: false,
buffer,
}
}
pub fn write_end_with_buffer(buffer: Arc<Mutex<PipeRingBuffer>>) -> Self {
pub fn write_end_with_buffer(buffer: Arc<UPIntrFreeCell<PipeRingBuffer>>) -> Self {
Self {
readable: false,
writable: true,
@ -34,9 +32,9 @@ const RING_BUFFER_SIZE: usize = 32;
#[derive(Copy, Clone, PartialEq)]
enum RingBufferStatus {
FULL,
EMPTY,
NORMAL,
Full,
Empty,
Normal,
}
pub struct PipeRingBuffer {
@ -53,7 +51,7 @@ impl PipeRingBuffer {
arr: [0; RING_BUFFER_SIZE],
head: 0,
tail: 0,
status: RingBufferStatus::EMPTY,
status: RingBufferStatus::Empty,
write_end: None,
}
}
@ -61,35 +59,33 @@ impl PipeRingBuffer {
self.write_end = Some(Arc::downgrade(write_end));
}
pub fn write_byte(&mut self, byte: u8) {
self.status = RingBufferStatus::NORMAL;
self.status = RingBufferStatus::Normal;
self.arr[self.tail] = byte;
self.tail = (self.tail + 1) % RING_BUFFER_SIZE;
if self.tail == self.head {
self.status = RingBufferStatus::FULL;
self.status = RingBufferStatus::Full;
}
}
pub fn read_byte(&mut self) -> u8 {
self.status = RingBufferStatus::NORMAL;
self.status = RingBufferStatus::Normal;
let c = self.arr[self.head];
self.head = (self.head + 1) % RING_BUFFER_SIZE;
if self.head == self.tail {
self.status = RingBufferStatus::EMPTY;
self.status = RingBufferStatus::Empty;
}
c
}
pub fn available_read(&self) -> usize {
if self.status == RingBufferStatus::EMPTY {
if self.status == RingBufferStatus::Empty {
0
} else if self.tail > self.head {
self.tail - self.head
} else {
if self.tail > self.head {
self.tail - self.head
} else {
self.tail + RING_BUFFER_SIZE - self.head
}
self.tail + RING_BUFFER_SIZE - self.head
}
}
pub fn available_write(&self) -> usize {
if self.status == RingBufferStatus::FULL {
if self.status == RingBufferStatus::Full {
0
} else {
RING_BUFFER_SIZE - self.available_read()
@ -102,24 +98,26 @@ impl PipeRingBuffer {
/// Return (read_end, write_end)
pub fn make_pipe() -> (Arc<Pipe>, Arc<Pipe>) {
let buffer = Arc::new(Mutex::new(PipeRingBuffer::new()));
let read_end = Arc::new(
Pipe::read_end_with_buffer(buffer.clone())
);
let write_end = Arc::new(
Pipe::write_end_with_buffer(buffer.clone())
);
buffer.lock().set_write_end(&write_end);
let buffer = Arc::new(unsafe { UPIntrFreeCell::new(PipeRingBuffer::new()) });
let read_end = Arc::new(Pipe::read_end_with_buffer(buffer.clone()));
let write_end = Arc::new(Pipe::write_end_with_buffer(buffer.clone()));
buffer.exclusive_access().set_write_end(&write_end);
(read_end, write_end)
}
impl File for Pipe {
fn readable(&self) -> bool {
self.readable
}
fn writable(&self) -> bool {
self.writable
}
fn read(&self, buf: UserBuffer) -> usize {
assert_eq!(self.readable, true);
assert!(self.readable());
let mut buf_iter = buf.into_iter();
let mut read_size = 0usize;
loop {
let mut ring_buffer = self.buffer.lock();
let mut ring_buffer = self.buffer.exclusive_access();
let loop_read = ring_buffer.available_read();
if loop_read == 0 {
if ring_buffer.all_write_ends_closed() {
@ -132,7 +130,9 @@ impl File for Pipe {
// read at most loop_read bytes
for _ in 0..loop_read {
if let Some(byte_ref) = buf_iter.next() {
unsafe { *byte_ref = ring_buffer.read_byte(); }
unsafe {
*byte_ref = ring_buffer.read_byte();
}
read_size += 1;
} else {
return read_size;
@ -141,11 +141,11 @@ impl File for Pipe {
}
}
fn write(&self, buf: UserBuffer) -> usize {
assert_eq!(self.writable, true);
assert!(self.writable());
let mut buf_iter = buf.into_iter();
let mut write_size = 0usize;
loop {
let mut ring_buffer = self.buffer.lock();
let mut ring_buffer = self.buffer.exclusive_access();
let loop_write = ring_buffer.available_write();
if loop_write == 0 {
drop(ring_buffer);
@ -163,5 +163,4 @@ impl File for Pipe {
}
}
}
fn as_any_ref(&self) -> &dyn Any { self }
}
}

@ -1,39 +1,39 @@
use super::File;
use crate::mm::{UserBuffer};
use crate::sbi::console_getchar;
use crate::task::suspend_current_and_run_next;
use core::any::Any;
use crate::drivers::chardev::{CharDevice, UART};
use crate::mm::UserBuffer;
pub struct Stdin;
pub struct Stdout;
impl File for Stdin {
fn readable(&self) -> bool {
true
}
fn writable(&self) -> bool {
false
}
fn read(&self, mut user_buf: UserBuffer) -> usize {
assert_eq!(user_buf.len(), 1);
// busy loop
let mut c: usize;
loop {
c = console_getchar();
if c == 0 {
suspend_current_and_run_next();
continue;
} else {
break;
}
//println!("before UART.read() in Stdin::read()");
let ch = UART.read();
unsafe {
user_buf.buffers[0].as_mut_ptr().write_volatile(ch);
}
let ch = c as u8;
unsafe { user_buf.buffers[0].as_mut_ptr().write_volatile(ch); }
1
}
fn write(&self, _user_buf: UserBuffer) -> usize {
panic!("Cannot write to stdin!");
}
fn as_any_ref(&self) -> &dyn Any { self }
}
impl File for Stdout {
fn read(&self, _user_buf: UserBuffer) -> usize{
fn readable(&self) -> bool {
false
}
fn writable(&self) -> bool {
true
}
fn read(&self, _user_buf: UserBuffer) -> usize {
panic!("Cannot read from stdout!");
}
fn write(&self, user_buf: UserBuffer) -> usize {
@ -42,5 +42,4 @@ impl File for Stdout {
}
user_buf.len()
}
fn as_any_ref(&self) -> &dyn Any { self }
}
}

@ -1,12 +1,37 @@
use core::panic::PanicInfo;
use crate::sbi::shutdown;
use crate::task::current_kstack_top;
use core::arch::asm;
use core::panic::PanicInfo;
#[panic_handler]
fn panic(info: &PanicInfo) -> ! {
if let Some(location) = info.location() {
println!("[kernel] Panicked at {}:{} {}", location.file(), location.line(), info.message().unwrap());
println!(
"[kernel] Panicked at {}:{} {}",
location.file(),
location.line(),
info.message().unwrap()
);
} else {
println!("[kernel] Panicked: {}", info.message().unwrap());
}
shutdown()
unsafe {
backtrace();
}
shutdown(255)
}
unsafe fn backtrace() {
let mut fp: usize;
let stop = current_kstack_top();
asm!("mv {}, s0", out(reg) fp);
println!("---START BACKTRACE---");
for i in 0..10 {
if fp == stop {
break;
}
println!("#{}:ra={:#x}", i, *((fp - 8) as *const usize));
fp = *((fp - 16) as *const usize);
}
println!("---END BACKTRACE---");
}

@ -22,6 +22,7 @@ SECTIONS
srodata = .;
.rodata : {
*(.rodata .rodata.*)
*(.srodata .srodata.*)
}
. = ALIGN(4K);
@ -29,6 +30,7 @@ SECTIONS
sdata = .;
.data : {
*(.data .data.*)
*(.sdata .sdata.*)
}
. = ALIGN(4K);
@ -38,6 +40,7 @@ SECTIONS
*(.bss.stack)
sbss = .;
*(.bss .bss.*)
*(.sbss .sbss.*)
}
. = ALIGN(4K);

@ -0,0 +1,53 @@
OUTPUT_ARCH(riscv)
ENTRY(_start)
BASE_ADDRESS = 0x80200000;
SECTIONS
{
. = BASE_ADDRESS;
skernel = .;
stext = .;
.text : {
*(.text.entry)
. = ALIGN(4K);
strampoline = .;
*(.text.trampoline);
. = ALIGN(4K);
*(.text .text.*)
}
. = ALIGN(4K);
etext = .;
srodata = .;
.rodata : {
*(.rodata .rodata.*)
*(.srodata .srodata.*)
}
. = ALIGN(4K);
erodata = .;
sdata = .;
.data : {
*(.data .data.*)
*(.sdata .sdata.*)
}
. = ALIGN(4K);
edata = .;
sbss_with_stack = .;
.bss : {
*(.bss.stack)
sbss = .;
*(.bss .bss.*)
*(.sbss .sbss.*)
}
. = ALIGN(4K);
ebss = .;
ekernel = .;
/DISCARD/ : {
*(.eh_frame)
}
}

@ -1,62 +0,0 @@
use alloc::vec::Vec;
pub fn get_num_app() -> usize {
extern "C" { fn _num_app(); }
unsafe { (_num_app as usize as *const usize).read_volatile() }
}
pub fn get_app_data(app_id: usize) -> &'static [u8] {
extern "C" { fn _num_app(); }
let num_app_ptr = _num_app as usize as *const usize;
let num_app = get_num_app();
let app_start = unsafe {
core::slice::from_raw_parts(num_app_ptr.add(1), num_app + 1)
};
assert!(app_id < num_app);
unsafe {
core::slice::from_raw_parts(
app_start[app_id] as *const u8,
app_start[app_id + 1] - app_start[app_id]
)
}
}
#[allow(unused)]
pub fn get_app_data_by_name(name: &str) -> Option<&'static [u8]> {
let num_app = get_num_app();
let app_names = app_names();
(0..num_app)
.find(|&i| app_names[i] == name)
.map(|i| get_app_data(i))
}
#[allow(unused)]
fn app_names() -> Vec<&'static str> {
let num_app = get_num_app();
extern "C" { fn _app_names(); }
let mut start = _app_names as usize as *const u8;
let mut v = Vec::new();
unsafe {
for _ in 0..num_app {
let mut end = start;
while end.read_volatile() != '\n' as u8 {
end = end.add(1);
}
let slice = core::slice::from_raw_parts(start, end as usize - start as usize);
let str = core::str::from_utf8(slice).unwrap();
v.push(str);
// Mention that there is a extra char between names
start = end.add(2);
}
}
v
}
pub fn list_apps() {
let apps = app_names();
println!("/**** APPS ****");
for app in apps {
println!("{}", app);
}
println!("**************/")
}

@ -1,9 +1,6 @@
#![no_std]
#![no_main]
#![feature(global_asm)]
#![feature(llvm_asm)]
#![feature(panic_info_message)]
#![feature(const_in_array_repeat_expressions)]
#![feature(alloc_error_handler)]
extern crate alloc;
@ -11,44 +8,59 @@ extern crate alloc;
#[macro_use]
extern crate bitflags;
#[cfg(feature = "board_k210")]
#[path = "boards/k210.rs"]
mod board;
#[cfg(not(any(feature = "board_k210")))]
#[path = "boards/qemu.rs"]
mod board;
#[macro_use]
mod console;
mod config;
mod drivers;
mod fs;
mod lang_items;
mod mm;
mod sbi;
mod sync;
mod syscall;
mod trap;
mod loader;
mod config;
mod task;
mod timer;
mod mm;
mod fs;
mod trap;
global_asm!(include_str!("entry.asm"));
global_asm!(include_str!("link_app.S"));
core::arch::global_asm!(include_str!("entry.asm"));
fn clear_bss() {
extern "C" {
fn sbss();
fn ebss();
}
(sbss as usize..ebss as usize).for_each(|a| {
unsafe { (a as *mut u8).write_volatile(0) }
});
unsafe {
core::slice::from_raw_parts_mut(sbss as usize as *mut u8, ebss as usize - sbss as usize)
.fill(0);
}
}
use lazy_static::*;
use sync::UPIntrFreeCell;
lazy_static! {
pub static ref DEV_NON_BLOCKING_ACCESS: UPIntrFreeCell<bool> =
unsafe { UPIntrFreeCell::new(false) };
}
#[no_mangle]
pub fn rust_main() -> ! {
clear_bss();
println!("[kernel] Hello, world!");
mm::init();
mm::remap_test();
task::add_initproc();
println!("after initproc!");
trap::init();
trap::enable_timer_interrupt();
timer::set_next_trigger();
loader::list_apps();
board::device_init();
fs::list_apps();
task::add_initproc();
*DEV_NON_BLOCKING_ACCESS.exclusive_access() = true;
task::run_tasks();
panic!("Unreachable in rust_main!");
}
}

@ -1,17 +1,26 @@
use crate::config::{PAGE_SIZE, PAGE_SIZE_BITS};
use super::PageTableEntry;
use crate::config::{PAGE_SIZE, PAGE_SIZE_BITS};
use core::fmt::{self, Debug, Formatter};
const PA_WIDTH_SV39: usize = 56;
const VA_WIDTH_SV39: usize = 39;
const PPN_WIDTH_SV39: usize = PA_WIDTH_SV39 - PAGE_SIZE_BITS;
const VPN_WIDTH_SV39: usize = VA_WIDTH_SV39 - PAGE_SIZE_BITS;
/// Definitions
#[repr(C)]
#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq)]
pub struct PhysAddr(pub usize);
#[repr(C)]
#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq)]
pub struct VirtAddr(pub usize);
#[repr(C)]
#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq)]
pub struct PhysPageNum(pub usize);
#[repr(C)]
#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq)]
pub struct VirtPageNum(pub usize);
@ -43,35 +52,63 @@ impl Debug for PhysPageNum {
/// usize -> T: usize.into()
impl From<usize> for PhysAddr {
fn from(v: usize) -> Self { Self(v) }
fn from(v: usize) -> Self {
Self(v & ((1 << PA_WIDTH_SV39) - 1))
}
}
impl From<usize> for PhysPageNum {
fn from(v: usize) -> Self { Self(v) }
fn from(v: usize) -> Self {
Self(v & ((1 << PPN_WIDTH_SV39) - 1))
}
}
impl From<usize> for VirtAddr {
fn from(v: usize) -> Self { Self(v) }
fn from(v: usize) -> Self {
Self(v & ((1 << VA_WIDTH_SV39) - 1))
}
}
impl From<usize> for VirtPageNum {
fn from(v: usize) -> Self { Self(v) }
fn from(v: usize) -> Self {
Self(v & ((1 << VPN_WIDTH_SV39) - 1))
}
}
impl From<PhysAddr> for usize {
fn from(v: PhysAddr) -> Self { v.0 }
fn from(v: PhysAddr) -> Self {
v.0
}
}
impl From<PhysPageNum> for usize {
fn from(v: PhysPageNum) -> Self { v.0 }
fn from(v: PhysPageNum) -> Self {
v.0
}
}
impl From<VirtAddr> for usize {
fn from(v: VirtAddr) -> Self { v.0 }
fn from(v: VirtAddr) -> Self {
if v.0 >= (1 << (VA_WIDTH_SV39 - 1)) {
v.0 | (!((1 << VA_WIDTH_SV39) - 1))
} else {
v.0
}
}
}
impl From<VirtPageNum> for usize {
fn from(v: VirtPageNum) -> Self { v.0 }
fn from(v: VirtPageNum) -> Self {
v.0
}
}
impl VirtAddr {
pub fn floor(&self) -> VirtPageNum { VirtPageNum(self.0 / PAGE_SIZE) }
pub fn ceil(&self) -> VirtPageNum { VirtPageNum((self.0 + PAGE_SIZE - 1) / PAGE_SIZE) }
pub fn page_offset(&self) -> usize { self.0 & (PAGE_SIZE - 1) }
pub fn aligned(&self) -> bool { self.page_offset() == 0 }
pub fn floor(&self) -> VirtPageNum {
VirtPageNum(self.0 / PAGE_SIZE)
}
pub fn ceil(&self) -> VirtPageNum {
VirtPageNum((self.0 - 1 + PAGE_SIZE) / PAGE_SIZE)
}
pub fn page_offset(&self) -> usize {
self.0 & (PAGE_SIZE - 1)
}
pub fn aligned(&self) -> bool {
self.page_offset() == 0
}
}
impl From<VirtAddr> for VirtPageNum {
fn from(v: VirtAddr) -> Self {
@ -80,13 +117,23 @@ impl From<VirtAddr> for VirtPageNum {
}
}
impl From<VirtPageNum> for VirtAddr {
fn from(v: VirtPageNum) -> Self { Self(v.0 << PAGE_SIZE_BITS) }
fn from(v: VirtPageNum) -> Self {
Self(v.0 << PAGE_SIZE_BITS)
}
}
impl PhysAddr {
pub fn floor(&self) -> PhysPageNum { PhysPageNum(self.0 / PAGE_SIZE) }
pub fn ceil(&self) -> PhysPageNum { PhysPageNum((self.0 + PAGE_SIZE - 1) / PAGE_SIZE) }
pub fn page_offset(&self) -> usize { self.0 & (PAGE_SIZE - 1) }
pub fn aligned(&self) -> bool { self.page_offset() == 0 }
pub fn floor(&self) -> PhysPageNum {
PhysPageNum(self.0 / PAGE_SIZE)
}
pub fn ceil(&self) -> PhysPageNum {
PhysPageNum((self.0 - 1 + PAGE_SIZE) / PAGE_SIZE)
}
pub fn page_offset(&self) -> usize {
self.0 & (PAGE_SIZE - 1)
}
pub fn aligned(&self) -> bool {
self.page_offset() == 0
}
}
impl From<PhysAddr> for PhysPageNum {
fn from(v: PhysAddr) -> Self {
@ -95,7 +142,9 @@ impl From<PhysAddr> for PhysPageNum {
}
}
impl From<PhysPageNum> for PhysAddr {
fn from(v: PhysPageNum) -> Self { Self(v.0 << PAGE_SIZE_BITS) }
fn from(v: PhysPageNum) -> Self {
Self(v.0 << PAGE_SIZE_BITS)
}
}
impl VirtPageNum {
@ -111,27 +160,24 @@ impl VirtPageNum {
}
impl PhysAddr {
pub fn get_ref<T>(&self) -> &'static T {
unsafe { (self.0 as *const T).as_ref().unwrap() }
}
pub fn get_mut<T>(&self) -> &'static mut T {
unsafe {
(self.0 as *mut T).as_mut().unwrap()
}
unsafe { (self.0 as *mut T).as_mut().unwrap() }
}
}
impl PhysPageNum {
pub fn get_pte_array(&self) -> &'static mut [PageTableEntry] {
let pa: PhysAddr = self.clone().into();
unsafe {
core::slice::from_raw_parts_mut(pa.0 as *mut PageTableEntry, 512)
}
let pa: PhysAddr = (*self).into();
unsafe { core::slice::from_raw_parts_mut(pa.0 as *mut PageTableEntry, 512) }
}
pub fn get_bytes_array(&self) -> &'static mut [u8] {
let pa: PhysAddr = self.clone().into();
unsafe {
core::slice::from_raw_parts_mut(pa.0 as *mut u8, 4096)
}
let pa: PhysAddr = (*self).into();
unsafe { core::slice::from_raw_parts_mut(pa.0 as *mut u8, 4096) }
}
pub fn get_mut<T>(&self) -> &'static mut T {
let pa: PhysAddr = self.clone().into();
let pa: PhysAddr = (*self).into();
pa.get_mut()
}
}
@ -144,43 +190,64 @@ impl StepByOne for VirtPageNum {
self.0 += 1;
}
}
impl StepByOne for PhysPageNum {
fn step(&mut self) {
self.0 += 1;
}
}
#[derive(Copy, Clone)]
pub struct SimpleRange<T> where
T: StepByOne + Copy + PartialEq + PartialOrd + Debug, {
pub struct SimpleRange<T>
where
T: StepByOne + Copy + PartialEq + PartialOrd + Debug,
{
l: T,
r: T,
}
impl<T> SimpleRange<T> where
T: StepByOne + Copy + PartialEq + PartialOrd + Debug, {
impl<T> SimpleRange<T>
where
T: StepByOne + Copy + PartialEq + PartialOrd + Debug,
{
pub fn new(start: T, end: T) -> Self {
assert!(start <= end, "start {:?} > end {:?}!", start, end);
Self { l: start, r: end }
}
pub fn get_start(&self) -> T { self.l }
pub fn get_end(&self) -> T { self.r }
pub fn get_start(&self) -> T {
self.l
}
pub fn get_end(&self) -> T {
self.r
}
}
impl<T> IntoIterator for SimpleRange<T> where
T: StepByOne + Copy + PartialEq + PartialOrd + Debug, {
impl<T> IntoIterator for SimpleRange<T>
where
T: StepByOne + Copy + PartialEq + PartialOrd + Debug,
{
type Item = T;
type IntoIter = SimpleRangeIterator<T>;
fn into_iter(self) -> Self::IntoIter {
SimpleRangeIterator::new(self.l, self.r)
}
}
pub struct SimpleRangeIterator<T> where
T: StepByOne + Copy + PartialEq + PartialOrd + Debug, {
pub struct SimpleRangeIterator<T>
where
T: StepByOne + Copy + PartialEq + PartialOrd + Debug,
{
current: T,
end: T,
}
impl<T> SimpleRangeIterator<T> where
T: StepByOne + Copy + PartialEq + PartialOrd + Debug, {
impl<T> SimpleRangeIterator<T>
where
T: StepByOne + Copy + PartialEq + PartialOrd + Debug,
{
pub fn new(l: T, r: T) -> Self {
Self { current: l, end: r, }
Self { current: l, end: r }
}
}
impl<T> Iterator for SimpleRangeIterator<T> where
T: StepByOne + Copy + PartialEq + PartialOrd + Debug, {
impl<T> Iterator for SimpleRangeIterator<T>
where
T: StepByOne + Copy + PartialEq + PartialOrd + Debug,
{
type Item = T;
fn next(&mut self) -> Option<Self::Item> {
if self.current == self.end {
@ -192,4 +259,4 @@ impl<T> Iterator for SimpleRangeIterator<T> where
}
}
}
pub type VPNRange = SimpleRange<VirtPageNum>;
pub type VPNRange = SimpleRange<VirtPageNum>;

@ -1,9 +1,9 @@
use super::{PhysAddr, PhysPageNum};
use alloc::vec::Vec;
use spin::Mutex;
use crate::config::MEMORY_END;
use lazy_static::*;
use crate::sync::UPIntrFreeCell;
use alloc::vec::Vec;
use core::fmt::{self, Debug, Formatter};
use lazy_static::*;
pub struct FrameTracker {
pub ppn: PhysPageNum,
@ -62,22 +62,17 @@ impl FrameAllocator for StackFrameAllocator {
fn alloc(&mut self) -> Option<PhysPageNum> {
if let Some(ppn) = self.recycled.pop() {
Some(ppn.into())
} else if self.current == self.end {
None
} else {
if self.current == self.end {
None
} else {
self.current += 1;
Some((self.current - 1).into())
}
self.current += 1;
Some((self.current - 1).into())
}
}
fn dealloc(&mut self, ppn: PhysPageNum) {
let ppn = ppn.0;
// validity check
if ppn >= self.current || self.recycled
.iter()
.find(|&v| {*v == ppn})
.is_some() {
if ppn >= self.current || self.recycled.iter().any(|&v| v == ppn) {
panic!("Frame ppn={:#x} has not been allocated!", ppn);
}
// recycle
@ -88,30 +83,29 @@ impl FrameAllocator for StackFrameAllocator {
type FrameAllocatorImpl = StackFrameAllocator;
lazy_static! {
pub static ref FRAME_ALLOCATOR: Mutex<FrameAllocatorImpl> =
Mutex::new(FrameAllocatorImpl::new());
pub static ref FRAME_ALLOCATOR: UPIntrFreeCell<FrameAllocatorImpl> =
unsafe { UPIntrFreeCell::new(FrameAllocatorImpl::new()) };
}
pub fn init_frame_allocator() {
extern "C" {
fn ekernel();
}
FRAME_ALLOCATOR
.lock()
.init(PhysAddr::from(ekernel as usize).ceil(), PhysAddr::from(MEMORY_END).floor());
FRAME_ALLOCATOR.exclusive_access().init(
PhysAddr::from(ekernel as usize).ceil(),
PhysAddr::from(MEMORY_END).floor(),
);
}
pub fn frame_alloc() -> Option<FrameTracker> {
FRAME_ALLOCATOR
.lock()
.exclusive_access()
.alloc()
.map(|ppn| FrameTracker::new(ppn))
.map(FrameTracker::new)
}
fn frame_dealloc(ppn: PhysPageNum) {
FRAME_ALLOCATOR
.lock()
.dealloc(ppn);
pub fn frame_dealloc(ppn: PhysPageNum) {
FRAME_ALLOCATOR.exclusive_access().dealloc(ppn);
}
#[allow(unused)]
@ -130,4 +124,4 @@ pub fn frame_allocator_test() {
}
drop(v);
println!("frame_allocator_test passed!");
}
}

@ -1,5 +1,5 @@
use buddy_system_allocator::LockedHeap;
use crate::config::KERNEL_HEAP_SIZE;
use buddy_system_allocator::LockedHeap;
#[global_allocator]
static HEAP_ALLOCATOR: LockedHeap = LockedHeap::empty();
@ -36,8 +36,8 @@ pub fn heap_test() {
for i in 0..500 {
v.push(i);
}
for i in 0..500 {
assert_eq!(v[i], i);
for (i, val) in v.iter().take(500).enumerate() {
assert_eq!(*val, i);
}
assert!(bss_range.contains(&(v.as_ptr() as usize)));
drop(v);

@ -1,20 +1,15 @@
use super::{PageTable, PageTableEntry, PTEFlags};
use super::{VirtPageNum, VirtAddr, PhysPageNum, PhysAddr};
use super::{FrameTracker, frame_alloc};
use super::{VPNRange, StepByOne};
use super::{frame_alloc, FrameTracker};
use super::{PTEFlags, PageTable, PageTableEntry};
use super::{PhysAddr, PhysPageNum, VirtAddr, VirtPageNum};
use super::{StepByOne, VPNRange};
use crate::config::{MEMORY_END, MMIO, PAGE_SIZE, TRAMPOLINE};
use crate::sync::UPIntrFreeCell;
use alloc::collections::BTreeMap;
use alloc::vec::Vec;
use riscv::register::satp;
use alloc::sync::Arc;
use alloc::vec::Vec;
use core::arch::asm;
use lazy_static::*;
use spin::Mutex;
use crate::config::{
MEMORY_END,
PAGE_SIZE,
TRAMPOLINE,
TRAP_CONTEXT,
USER_STACK_SIZE
};
use riscv::register::satp;
extern "C" {
fn stext();
@ -30,9 +25,12 @@ extern "C" {
}
lazy_static! {
pub static ref KERNEL_SPACE: Arc<Mutex<MemorySet>> = Arc::new(Mutex::new(
MemorySet::new_kernel()
));
pub static ref KERNEL_SPACE: Arc<UPIntrFreeCell<MemorySet>> =
Arc::new(unsafe { UPIntrFreeCell::new(MemorySet::new_kernel()) });
}
pub fn kernel_token() -> usize {
KERNEL_SPACE.exclusive_access().token()
}
pub struct MemorySet {
@ -51,17 +49,24 @@ impl MemorySet {
self.page_table.token()
}
/// Assume that no conflicts.
pub fn insert_framed_area(&mut self, start_va: VirtAddr, end_va: VirtAddr, permission: MapPermission) {
self.push(MapArea::new(
start_va,
end_va,
MapType::Framed,
permission,
), None);
pub fn insert_framed_area(
&mut self,
start_va: VirtAddr,
end_va: VirtAddr,
permission: MapPermission,
) {
self.push(
MapArea::new(start_va, end_va, MapType::Framed, permission),
None,
);
}
pub fn remove_area_with_start_vpn(&mut self, start_vpn: VirtPageNum) {
if let Some((idx, area)) = self.areas.iter_mut().enumerate()
.find(|(_, area)| area.vpn_range.get_start() == start_vpn) {
if let Some((idx, area)) = self
.areas
.iter_mut()
.enumerate()
.find(|(_, area)| area.vpn_range.get_start() == start_vpn)
{
area.unmap(&mut self.page_table);
self.areas.remove(idx);
}
@ -90,46 +95,76 @@ impl MemorySet {
println!(".text [{:#x}, {:#x})", stext as usize, etext as usize);
println!(".rodata [{:#x}, {:#x})", srodata as usize, erodata as usize);
println!(".data [{:#x}, {:#x})", sdata as usize, edata as usize);
println!(".bss [{:#x}, {:#x})", sbss_with_stack as usize, ebss as usize);
println!(
".bss [{:#x}, {:#x})",
sbss_with_stack as usize, ebss as usize
);
println!("mapping .text section");
memory_set.push(MapArea::new(
(stext as usize).into(),
(etext as usize).into(),
MapType::Identical,
MapPermission::R | MapPermission::X,
), None);
memory_set.push(
MapArea::new(
(stext as usize).into(),
(etext as usize).into(),
MapType::Identical,
MapPermission::R | MapPermission::X,
),
None,
);
println!("mapping .rodata section");
memory_set.push(MapArea::new(
(srodata as usize).into(),
(erodata as usize).into(),
MapType::Identical,
MapPermission::R,
), None);
memory_set.push(
MapArea::new(
(srodata as usize).into(),
(erodata as usize).into(),
MapType::Identical,
MapPermission::R,
),
None,
);
println!("mapping .data section");
memory_set.push(MapArea::new(
(sdata as usize).into(),
(edata as usize).into(),
MapType::Identical,
MapPermission::R | MapPermission::W,
), None);
memory_set.push(
MapArea::new(
(sdata as usize).into(),
(edata as usize).into(),
MapType::Identical,
MapPermission::R | MapPermission::W,
),
None,
);
println!("mapping .bss section");
memory_set.push(MapArea::new(
(sbss_with_stack as usize).into(),
(ebss as usize).into(),
MapType::Identical,
MapPermission::R | MapPermission::W,
), None);
memory_set.push(
MapArea::new(
(sbss_with_stack as usize).into(),
(ebss as usize).into(),
MapType::Identical,
MapPermission::R | MapPermission::W,
),
None,
);
println!("mapping physical memory");
memory_set.push(MapArea::new(
(ekernel as usize).into(),
MEMORY_END.into(),
MapType::Identical,
MapPermission::R | MapPermission::W,
), None);
memory_set.push(
MapArea::new(
(ekernel as usize).into(),
MEMORY_END.into(),
MapType::Identical,
MapPermission::R | MapPermission::W,
),
None,
);
println!("mapping memory-mapped registers");
for pair in MMIO {
memory_set.push(
MapArea::new(
(*pair).0.into(),
((*pair).0 + (*pair).1).into(),
MapType::Identical,
MapPermission::R | MapPermission::W,
),
None,
);
}
memory_set
}
/// Include sections in elf and trampoline and TrapContext and user stack,
/// also returns user_sp and entry point.
/// Include sections in elf and trampoline,
/// also returns user_sp_base and entry point.
pub fn from_elf(elf_data: &[u8]) -> (Self, usize, usize) {
let mut memory_set = Self::new_bare();
// map trampoline
@ -148,42 +183,31 @@ impl MemorySet {
let end_va: VirtAddr = ((ph.virtual_addr() + ph.mem_size()) as usize).into();
let mut map_perm = MapPermission::U;
let ph_flags = ph.flags();
if ph_flags.is_read() { map_perm |= MapPermission::R; }
if ph_flags.is_write() { map_perm |= MapPermission::W; }
if ph_flags.is_execute() { map_perm |= MapPermission::X; }
let map_area = MapArea::new(
start_va,
end_va,
MapType::Framed,
map_perm,
);
if ph_flags.is_read() {
map_perm |= MapPermission::R;
}
if ph_flags.is_write() {
map_perm |= MapPermission::W;
}
if ph_flags.is_execute() {
map_perm |= MapPermission::X;
}
let map_area = MapArea::new(start_va, end_va, MapType::Framed, map_perm);
max_end_vpn = map_area.vpn_range.get_end();
memory_set.push(
map_area,
Some(&elf.input[ph.offset() as usize..(ph.offset() + ph.file_size()) as usize])
Some(&elf.input[ph.offset() as usize..(ph.offset() + ph.file_size()) as usize]),
);
}
}
// map user stack with U flags
let max_end_va: VirtAddr = max_end_vpn.into();
let mut user_stack_bottom: usize = max_end_va.into();
// guard page
user_stack_bottom += PAGE_SIZE;
let user_stack_top = user_stack_bottom + USER_STACK_SIZE;
memory_set.push(MapArea::new(
user_stack_bottom.into(),
user_stack_top.into(),
MapType::Framed,
MapPermission::R | MapPermission::W | MapPermission::U,
), None);
// map TrapContext
memory_set.push(MapArea::new(
TRAP_CONTEXT.into(),
TRAMPOLINE.into(),
MapType::Framed,
MapPermission::R | MapPermission::W,
), None);
(memory_set, user_stack_top, elf.header.pt2.entry_point() as usize)
let mut user_stack_base: usize = max_end_va.into();
user_stack_base += PAGE_SIZE;
(
memory_set,
user_stack_base,
elf.header.pt2.entry_point() as usize,
)
}
pub fn from_existed_user(user_space: &MemorySet) -> MemorySet {
let mut memory_set = Self::new_bare();
@ -197,7 +221,9 @@ impl MemorySet {
for vpn in area.vpn_range {
let src_ppn = user_space.translate(vpn).unwrap().ppn();
let dst_ppn = memory_set.translate(vpn).unwrap().ppn();
dst_ppn.get_bytes_array().copy_from_slice(src_ppn.get_bytes_array());
dst_ppn
.get_bytes_array()
.copy_from_slice(src_ppn.get_bytes_array());
}
}
memory_set
@ -206,7 +232,7 @@ impl MemorySet {
let satp = self.page_table.token();
unsafe {
satp::write(satp);
llvm_asm!("sfence.vma" :::: "volatile");
asm!("sfence.vma");
}
}
pub fn translate(&self, vpn: VirtPageNum) -> Option<PageTableEntry> {
@ -230,7 +256,7 @@ impl MapArea {
start_va: VirtAddr,
end_va: VirtAddr,
map_type: MapType,
map_perm: MapPermission
map_perm: MapPermission,
) -> Self {
let start_vpn: VirtPageNum = start_va.floor();
let end_vpn: VirtPageNum = end_va.ceil();
@ -265,11 +291,8 @@ impl MapArea {
page_table.map(vpn, ppn, pte_flags);
}
pub fn unmap_one(&mut self, page_table: &mut PageTable, vpn: VirtPageNum) {
match self.map_type {
MapType::Framed => {
self.data_frames.remove(&vpn);
}
_ => {}
if self.map_type == MapType::Framed {
self.data_frames.remove(&vpn);
}
page_table.unmap(vpn);
}
@ -324,21 +347,24 @@ bitflags! {
#[allow(unused)]
pub fn remap_test() {
let mut kernel_space = KERNEL_SPACE.lock();
let mut kernel_space = KERNEL_SPACE.exclusive_access();
let mid_text: VirtAddr = ((stext as usize + etext as usize) / 2).into();
let mid_rodata: VirtAddr = ((srodata as usize + erodata as usize) / 2).into();
let mid_data: VirtAddr = ((sdata as usize + edata as usize) / 2).into();
assert_eq!(
kernel_space.page_table.translate(mid_text.floor()).unwrap().writable(),
false
);
assert_eq!(
kernel_space.page_table.translate(mid_rodata.floor()).unwrap().writable(),
false,
);
assert_eq!(
kernel_space.page_table.translate(mid_data.floor()).unwrap().executable(),
false,
);
assert!(!kernel_space
.page_table
.translate(mid_text.floor())
.unwrap()
.writable(),);
assert!(!kernel_space
.page_table
.translate(mid_rodata.floor())
.unwrap()
.writable(),);
assert!(!kernel_space
.page_table
.translate(mid_data.floor())
.unwrap()
.executable(),);
println!("remap_test passed!");
}
}

@ -1,26 +1,22 @@
mod heap_allocator;
mod address;
mod frame_allocator;
mod page_table;
mod heap_allocator;
mod memory_set;
mod page_table;
use page_table::{PageTable, PTEFlags};
use address::{VPNRange, StepByOne};
pub use address::{PhysAddr, VirtAddr, PhysPageNum, VirtPageNum};
pub use frame_allocator::{FrameTracker, frame_alloc};
use address::VPNRange;
pub use address::{PhysAddr, PhysPageNum, StepByOne, VirtAddr, VirtPageNum};
pub use frame_allocator::{frame_alloc, frame_dealloc, FrameTracker};
pub use memory_set::remap_test;
pub use memory_set::{kernel_token, MapPermission, MemorySet, KERNEL_SPACE};
use page_table::PTEFlags;
pub use page_table::{
PageTableEntry,
translated_byte_buffer,
translated_str,
translated_refmut,
UserBuffer,
UserBufferIterator,
translated_byte_buffer, translated_ref, translated_refmut, translated_str, PageTable,
PageTableEntry, UserBuffer, UserBufferIterator,
};
pub use memory_set::{MemorySet, KERNEL_SPACE, MapPermission};
pub use memory_set::remap_test;
pub fn init() {
heap_allocator::init_heap();
frame_allocator::init_frame_allocator();
KERNEL_SPACE.clone().lock().activate();
KERNEL_SPACE.exclusive_access().activate();
}

@ -1,15 +1,7 @@
use super::{
frame_alloc,
PhysPageNum,
FrameTracker,
VirtPageNum,
VirtAddr,
PhysAddr,
StepByOne
};
use alloc::vec::Vec;
use alloc::vec;
use super::{frame_alloc, FrameTracker, PhysAddr, PhysPageNum, StepByOne, VirtAddr, VirtPageNum};
use alloc::string::String;
use alloc::vec;
use alloc::vec::Vec;
use bitflags::*;
bitflags! {
@ -38,9 +30,7 @@ impl PageTableEntry {
}
}
pub fn empty() -> Self {
PageTableEntry {
bits: 0,
}
PageTableEntry { bits: 0 }
}
pub fn ppn(&self) -> PhysPageNum {
(self.bits >> 10 & ((1usize << 44) - 1)).into()
@ -87,8 +77,8 @@ impl PageTable {
let idxs = vpn.indexes();
let mut ppn = self.root_ppn;
let mut result: Option<&mut PageTableEntry> = None;
for i in 0..3 {
let pte = &mut ppn.get_pte_array()[idxs[i]];
for (i, idx) in idxs.iter().enumerate() {
let pte = &mut ppn.get_pte_array()[*idx];
if i == 2 {
result = Some(pte);
break;
@ -102,12 +92,12 @@ impl PageTable {
}
result
}
fn find_pte(&self, vpn: VirtPageNum) -> Option<&PageTableEntry> {
fn find_pte(&self, vpn: VirtPageNum) -> Option<&mut PageTableEntry> {
let idxs = vpn.indexes();
let mut ppn = self.root_ppn;
let mut result: Option<&PageTableEntry> = None;
for i in 0..3 {
let pte = &ppn.get_pte_array()[idxs[i]];
let mut result: Option<&mut PageTableEntry> = None;
for (i, idx) in idxs.iter().enumerate() {
let pte = &mut ppn.get_pte_array()[*idx];
if i == 2 {
result = Some(pte);
break;
@ -127,22 +117,20 @@ impl PageTable {
}
#[allow(unused)]
pub fn unmap(&mut self, vpn: VirtPageNum) {
let pte = self.find_pte_create(vpn).unwrap();
let pte = self.find_pte(vpn).unwrap();
assert!(pte.is_valid(), "vpn {:?} is invalid before unmapping", vpn);
*pte = PageTableEntry::empty();
}
pub fn translate(&self, vpn: VirtPageNum) -> Option<PageTableEntry> {
self.find_pte(vpn)
.map(|pte| {pte.clone()})
self.find_pte(vpn).map(|pte| *pte)
}
pub fn translate_va(&self, va: VirtAddr) -> Option<PhysAddr> {
self.find_pte(va.clone().floor())
.map(|pte| {
let aligned_pa: PhysAddr = pte.ppn().into();
let offset = va.page_offset();
let aligned_pa_usize: usize = aligned_pa.into();
(aligned_pa_usize + offset).into()
})
self.find_pte(va.clone().floor()).map(|pte| {
let aligned_pa: PhysAddr = pte.ppn().into();
let offset = va.page_offset();
let aligned_pa_usize: usize = aligned_pa.into();
(aligned_pa_usize + offset).into()
})
}
pub fn token(&self) -> usize {
8usize << 60 | self.root_ppn.0
@ -157,10 +145,7 @@ pub fn translated_byte_buffer(token: usize, ptr: *const u8, len: usize) -> Vec<&
while start < end {
let start_va = VirtAddr::from(start);
let mut vpn = start_va.floor();
let ppn = page_table
.translate(vpn)
.unwrap()
.ppn();
let ppn = page_table.translate(vpn).unwrap().ppn();
vpn.step();
let mut end_va: VirtAddr = vpn.into();
end_va = end_va.min(VirtAddr::from(end));
@ -174,26 +159,40 @@ pub fn translated_byte_buffer(token: usize, ptr: *const u8, len: usize) -> Vec<&
v
}
/// Load a string from other address spaces into kernel space without an end `\0`.
pub fn translated_str(token: usize, ptr: *const u8) -> String {
let page_table = PageTable::from_token(token);
let mut string = String::new();
let mut va = ptr as usize;
loop {
let ch: u8 = *(page_table.translate_va(VirtAddr::from(va)).unwrap().get_mut());
let ch: u8 = *(page_table
.translate_va(VirtAddr::from(va))
.unwrap()
.get_mut());
if ch == 0 {
break;
} else {
string.push(ch as char);
va += 1;
}
string.push(ch as char);
va += 1;
}
string
}
pub fn translated_ref<T>(token: usize, ptr: *const T) -> &'static T {
let page_table = PageTable::from_token(token);
page_table
.translate_va(VirtAddr::from(ptr as usize))
.unwrap()
.get_ref()
}
pub fn translated_refmut<T>(token: usize, ptr: *mut T) -> &'static mut T {
let page_table = PageTable::from_token(token);
let va = ptr as usize;
page_table.translate_va(VirtAddr::from(va)).unwrap().get_mut()
page_table
.translate_va(VirtAddr::from(va))
.unwrap()
.get_mut()
}
pub struct UserBuffer {
@ -247,4 +246,4 @@ impl Iterator for UserBufferIterator {
Some(r)
}
}
}
}

@ -1,5 +1,7 @@
#![allow(unused)]
use core::arch::asm;
const SBI_SET_TIMER: usize = 0;
const SBI_CONSOLE_PUTCHAR: usize = 1;
const SBI_CONSOLE_GETCHAR: usize = 2;
@ -14,11 +16,12 @@ const SBI_SHUTDOWN: usize = 8;
fn sbi_call(which: usize, arg0: usize, arg1: usize, arg2: usize) -> usize {
let mut ret;
unsafe {
llvm_asm!("ecall"
: "={x10}" (ret)
: "{x10}" (arg0), "{x11}" (arg1), "{x12}" (arg2), "{x17}" (which)
: "memory"
: "volatile"
core::arch::asm!(
"ecall",
inlateout("x10") arg0 => ret,
in("x11") arg1,
in("x12") arg2,
in("x17") which,
);
}
ret
@ -36,8 +39,9 @@ pub fn console_getchar() -> usize {
sbi_call(SBI_CONSOLE_GETCHAR, 0, 0, 0)
}
pub fn shutdown() -> ! {
sbi_call(SBI_SHUTDOWN, 0, 0, 0);
use crate::board::QEMUExit;
pub fn shutdown(exit_code: usize) -> ! {
//sbi_call(SBI_SHUTDOWN, exit_code, 0, 0);
crate::board::QEMU_EXIT_HANDLE.exit_failure();
panic!("It should shutdown!");
}

@ -0,0 +1,58 @@
use crate::sync::{Mutex, UPIntrFreeCell};
use crate::task::{
add_task, block_current_and_run_next, block_current_task, current_task, TaskContext,
TaskControlBlock,
};
use alloc::{collections::VecDeque, sync::Arc};
pub struct Condvar {
pub inner: UPIntrFreeCell<CondvarInner>,
}
pub struct CondvarInner {
pub wait_queue: VecDeque<Arc<TaskControlBlock>>,
}
impl Condvar {
pub fn new() -> Self {
Self {
inner: unsafe {
UPIntrFreeCell::new(CondvarInner {
wait_queue: VecDeque::new(),
})
},
}
}
pub fn signal(&self) {
let mut inner = self.inner.exclusive_access();
if let Some(task) = inner.wait_queue.pop_front() {
add_task(task);
}
}
/*
pub fn wait(&self) {
let mut inner = self.inner.exclusive_access();
inner.wait_queue.push_back(current_task().unwrap());
drop(inner);
block_current_and_run_next();
}
*/
pub fn wait_no_sched(&self) -> *mut TaskContext {
self.inner.exclusive_session(|inner| {
inner.wait_queue.push_back(current_task().unwrap());
});
block_current_task()
}
pub fn wait_with_mutex(&self, mutex: Arc<dyn Mutex>) {
mutex.unlock();
self.inner.exclusive_session(|inner| {
inner.wait_queue.push_back(current_task().unwrap());
});
block_current_and_run_next();
mutex.lock();
}
}

@ -0,0 +1,9 @@
mod condvar;
mod mutex;
mod semaphore;
mod up;
pub use condvar::Condvar;
pub use mutex::{Mutex, MutexBlocking, MutexSpin};
pub use semaphore::Semaphore;
pub use up::{UPIntrFreeCell, UPIntrRefMut};

@ -0,0 +1,88 @@
use super::UPIntrFreeCell;
use crate::task::TaskControlBlock;
use crate::task::{add_task, current_task};
use crate::task::{block_current_and_run_next, suspend_current_and_run_next};
use alloc::{collections::VecDeque, sync::Arc};
pub trait Mutex: Sync + Send {
fn lock(&self);
fn unlock(&self);
}
pub struct MutexSpin {
locked: UPIntrFreeCell<bool>,
}
impl MutexSpin {
pub fn new() -> Self {
Self {
locked: unsafe { UPIntrFreeCell::new(false) },
}
}
}
impl Mutex for MutexSpin {
fn lock(&self) {
loop {
let mut locked = self.locked.exclusive_access();
if *locked {
drop(locked);
suspend_current_and_run_next();
continue;
} else {
*locked = true;
return;
}
}
}
fn unlock(&self) {
let mut locked = self.locked.exclusive_access();
*locked = false;
}
}
pub struct MutexBlocking {
inner: UPIntrFreeCell<MutexBlockingInner>,
}
pub struct MutexBlockingInner {
locked: bool,
wait_queue: VecDeque<Arc<TaskControlBlock>>,
}
impl MutexBlocking {
pub fn new() -> Self {
Self {
inner: unsafe {
UPIntrFreeCell::new(MutexBlockingInner {
locked: false,
wait_queue: VecDeque::new(),
})
},
}
}
}
impl Mutex for MutexBlocking {
fn lock(&self) {
let mut mutex_inner = self.inner.exclusive_access();
if mutex_inner.locked {
mutex_inner.wait_queue.push_back(current_task().unwrap());
drop(mutex_inner);
block_current_and_run_next();
} else {
mutex_inner.locked = true;
}
}
fn unlock(&self) {
let mut mutex_inner = self.inner.exclusive_access();
assert!(mutex_inner.locked);
if let Some(waking_task) = mutex_inner.wait_queue.pop_front() {
add_task(waking_task);
} else {
mutex_inner.locked = false;
}
}
}

@ -0,0 +1,45 @@
use crate::sync::UPIntrFreeCell;
use crate::task::{add_task, block_current_and_run_next, current_task, TaskControlBlock};
use alloc::{collections::VecDeque, sync::Arc};
pub struct Semaphore {
pub inner: UPIntrFreeCell<SemaphoreInner>,
}
pub struct SemaphoreInner {
pub count: isize,
pub wait_queue: VecDeque<Arc<TaskControlBlock>>,
}
impl Semaphore {
pub fn new(res_count: usize) -> Self {
Self {
inner: unsafe {
UPIntrFreeCell::new(SemaphoreInner {
count: res_count as isize,
wait_queue: VecDeque::new(),
})
},
}
}
pub fn up(&self) {
let mut inner = self.inner.exclusive_access();
inner.count += 1;
if inner.count <= 0 {
if let Some(task) = inner.wait_queue.pop_front() {
add_task(task);
}
}
}
pub fn down(&self) {
let mut inner = self.inner.exclusive_access();
inner.count -= 1;
if inner.count < 0 {
inner.wait_queue.push_back(current_task().unwrap());
drop(inner);
block_current_and_run_next();
}
}
}

@ -0,0 +1,140 @@
use core::cell::{RefCell, RefMut, UnsafeCell};
use core::ops::{Deref, DerefMut};
use lazy_static::*;
use riscv::register::sstatus;
/*
/// Wrap a static data structure inside it so that we are
/// able to access it without any `unsafe`.
///
/// We should only use it in uniprocessor.
///
/// In order to get mutable reference of inner data, call
/// `exclusive_access`.
pub struct UPSafeCell<T> {
/// inner data
inner: RefCell<T>,
}
unsafe impl<T> Sync for UPSafeCell<T> {}
impl<T> UPSafeCell<T> {
/// User is responsible to guarantee that inner struct is only used in
/// uniprocessor.
pub unsafe fn new(value: T) -> Self {
Self {
inner: RefCell::new(value),
}
}
/// Panic if the data has been borrowed.
pub fn exclusive_access(&self) -> RefMut<'_, T> {
self.inner.borrow_mut()
}
}
*/
pub struct UPSafeCellRaw<T> {
inner: UnsafeCell<T>,
}
unsafe impl<T> Sync for UPSafeCellRaw<T> {}
impl<T> UPSafeCellRaw<T> {
pub unsafe fn new(value: T) -> Self {
Self {
inner: UnsafeCell::new(value),
}
}
pub fn get_mut(&self) -> &mut T {
unsafe { &mut (*self.inner.get()) }
}
}
pub struct IntrMaskingInfo {
nested_level: usize,
sie_before_masking: bool,
}
lazy_static! {
static ref INTR_MASKING_INFO: UPSafeCellRaw<IntrMaskingInfo> =
unsafe { UPSafeCellRaw::new(IntrMaskingInfo::new()) };
}
impl IntrMaskingInfo {
pub fn new() -> Self {
Self {
nested_level: 0,
sie_before_masking: false,
}
}
pub fn enter(&mut self) {
let sie = sstatus::read().sie();
unsafe {
sstatus::clear_sie();
}
if self.nested_level == 0 {
self.sie_before_masking = sie;
}
self.nested_level += 1;
}
pub fn exit(&mut self) {
self.nested_level -= 1;
if self.nested_level == 0 && self.sie_before_masking {
unsafe {
sstatus::set_sie();
}
}
}
}
pub struct UPIntrFreeCell<T> {
/// inner data
inner: RefCell<T>,
}
unsafe impl<T> Sync for UPIntrFreeCell<T> {}
pub struct UPIntrRefMut<'a, T>(Option<RefMut<'a, T>>);
impl<T> UPIntrFreeCell<T> {
pub unsafe fn new(value: T) -> Self {
Self {
inner: RefCell::new(value),
}
}
/// Panic if the data has been borrowed.
pub fn exclusive_access(&self) -> UPIntrRefMut<'_, T> {
INTR_MASKING_INFO.get_mut().enter();
UPIntrRefMut(Some(self.inner.borrow_mut()))
}
pub fn exclusive_session<F, V>(&self, f: F) -> V
where
F: FnOnce(&mut T) -> V,
{
let mut inner = self.exclusive_access();
f(inner.deref_mut())
}
}
impl<'a, T> Drop for UPIntrRefMut<'a, T> {
fn drop(&mut self) {
self.0 = None;
INTR_MASKING_INFO.get_mut().exit();
}
}
impl<'a, T> Deref for UPIntrRefMut<'a, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
self.0.as_ref().unwrap().deref()
}
}
impl<'a, T> DerefMut for UPIntrRefMut<'a, T> {
fn deref_mut(&mut self) -> &mut Self::Target {
self.0.as_mut().unwrap().deref_mut()
}
}

@ -1,21 +1,23 @@
use crate::mm::{UserBuffer, translated_byte_buffer, translated_refmut};
use crate::task::{current_user_token, current_task};
use crate::fs::{make_pipe};
use crate::fs::{make_pipe, open_file, OpenFlags};
use crate::mm::{translated_byte_buffer, translated_refmut, translated_str, UserBuffer};
use crate::task::{current_process, current_user_token};
use alloc::sync::Arc;
pub fn sys_write(fd: usize, buf: *const u8, len: usize) -> isize {
let token = current_user_token();
let task = current_task().unwrap();
let inner = task.acquire_inner_lock();
let process = current_process();
let inner = process.inner_exclusive_access();
if fd >= inner.fd_table.len() {
return -1;
}
if let Some(file) = &inner.fd_table[fd] {
if !file.writable() {
return -1;
}
let file = file.clone();
// release Task lock manually to avoid deadlock
// release current task TCB manually to avoid multi-borrow
drop(inner);
file.write(
UserBuffer::new(translated_byte_buffer(token, buf, len))
) as isize
file.write(UserBuffer::new(translated_byte_buffer(token, buf, len))) as isize
} else {
-1
}
@ -23,26 +25,41 @@ pub fn sys_write(fd: usize, buf: *const u8, len: usize) -> isize {
pub fn sys_read(fd: usize, buf: *const u8, len: usize) -> isize {
let token = current_user_token();
let task = current_task().unwrap();
let inner = task.acquire_inner_lock();
let process = current_process();
let inner = process.inner_exclusive_access();
if fd >= inner.fd_table.len() {
return -1;
}
if let Some(file) = &inner.fd_table[fd] {
let file = file.clone();
// release Task lock manually to avoid deadlock
if !file.readable() {
return -1;
}
// release current task TCB manually to avoid multi-borrow
drop(inner);
file.read(
UserBuffer::new(translated_byte_buffer(token, buf, len))
) as isize
file.read(UserBuffer::new(translated_byte_buffer(token, buf, len))) as isize
} else {
-1
}
}
pub fn sys_open(path: *const u8, flags: u32) -> isize {
let process = current_process();
let token = current_user_token();
let path = translated_str(token, path);
if let Some(inode) = open_file(path.as_str(), OpenFlags::from_bits(flags).unwrap()) {
let mut inner = process.inner_exclusive_access();
let fd = inner.alloc_fd();
inner.fd_table[fd] = Some(inode);
fd as isize
} else {
-1
}
}
pub fn sys_close(fd: usize) -> isize {
let task = current_task().unwrap();
let mut inner = task.acquire_inner_lock();
let process = current_process();
let mut inner = process.inner_exclusive_access();
if fd >= inner.fd_table.len() {
return -1;
}
@ -54,9 +71,9 @@ pub fn sys_close(fd: usize) -> isize {
}
pub fn sys_pipe(pipe: *mut usize) -> isize {
let task = current_task().unwrap();
let process = current_process();
let token = current_user_token();
let mut inner = task.acquire_inner_lock();
let mut inner = process.inner_exclusive_access();
let (pipe_read, pipe_write) = make_pipe();
let read_fd = inner.alloc_fd();
inner.fd_table[read_fd] = Some(pipe_read);
@ -65,4 +82,18 @@ pub fn sys_pipe(pipe: *mut usize) -> isize {
*translated_refmut(token, pipe) = read_fd;
*translated_refmut(token, unsafe { pipe.add(1) }) = write_fd;
0
}
}
pub fn sys_dup(fd: usize) -> isize {
let process = current_process();
let mut inner = process.inner_exclusive_access();
if fd >= inner.fd_table.len() {
return -1;
}
if inner.fd_table[fd].is_none() {
return -1;
}
let new_fd = inner.alloc_fd();
inner.fd_table[new_fd] = Some(Arc::clone(inner.fd_table[fd].as_ref().unwrap()));
new_fd as isize
}

@ -1,35 +1,70 @@
const SYSCALL_DUP: usize = 24;
const SYSCALL_OPEN: usize = 56;
const SYSCALL_CLOSE: usize = 57;
const SYSCALL_PIPE: usize = 59;
const SYSCALL_READ: usize = 63;
const SYSCALL_WRITE: usize = 64;
const SYSCALL_EXIT: usize = 93;
const SYSCALL_SLEEP: usize = 101;
const SYSCALL_YIELD: usize = 124;
const SYSCALL_KILL: usize = 129;
const SYSCALL_GET_TIME: usize = 169;
const SYSCALL_GETPID: usize = 172;
const SYSCALL_FORK: usize = 220;
const SYSCALL_EXEC: usize = 221;
const SYSCALL_WAITPID: usize = 260;
const SYSCALL_THREAD_CREATE: usize = 1000;
const SYSCALL_GETTID: usize = 1001;
const SYSCALL_WAITTID: usize = 1002;
const SYSCALL_MUTEX_CREATE: usize = 1010;
const SYSCALL_MUTEX_LOCK: usize = 1011;
const SYSCALL_MUTEX_UNLOCK: usize = 1012;
const SYSCALL_SEMAPHORE_CREATE: usize = 1020;
const SYSCALL_SEMAPHORE_UP: usize = 1021;
const SYSCALL_SEMAPHORE_DOWN: usize = 1022;
const SYSCALL_CONDVAR_CREATE: usize = 1030;
const SYSCALL_CONDVAR_SIGNAL: usize = 1031;
const SYSCALL_CONDVAR_WAIT: usize = 1032;
mod fs;
mod process;
mod sync;
mod thread;
use fs::*;
use process::*;
use sync::*;
use thread::*;
pub fn syscall(syscall_id: usize, args: [usize; 3]) -> isize {
match syscall_id {
SYSCALL_DUP => sys_dup(args[0]),
SYSCALL_OPEN => sys_open(args[0] as *const u8, args[1] as u32),
SYSCALL_CLOSE => sys_close(args[0]),
SYSCALL_PIPE => sys_pipe(args[0] as *mut usize),
SYSCALL_READ => sys_read(args[0], args[1] as *const u8, args[2]),
SYSCALL_WRITE => sys_write(args[0], args[1] as *const u8, args[2]),
SYSCALL_EXIT => sys_exit(args[0] as i32),
SYSCALL_SLEEP => sys_sleep(args[0]),
SYSCALL_YIELD => sys_yield(),
SYSCALL_KILL => sys_kill(args[0], args[1] as u32),
SYSCALL_GET_TIME => sys_get_time(),
SYSCALL_GETPID => sys_getpid(),
SYSCALL_FORK => sys_fork(),
SYSCALL_EXEC => sys_exec(args[0] as *const u8),
SYSCALL_EXEC => sys_exec(args[0] as *const u8, args[1] as *const usize),
SYSCALL_WAITPID => sys_waitpid(args[0] as isize, args[1] as *mut i32),
SYSCALL_THREAD_CREATE => sys_thread_create(args[0], args[1]),
SYSCALL_GETTID => sys_gettid(),
SYSCALL_WAITTID => sys_waittid(args[0]) as isize,
SYSCALL_MUTEX_CREATE => sys_mutex_create(args[0] == 1),
SYSCALL_MUTEX_LOCK => sys_mutex_lock(args[0]),
SYSCALL_MUTEX_UNLOCK => sys_mutex_unlock(args[0]),
SYSCALL_SEMAPHORE_CREATE => sys_semaphore_create(args[0]),
SYSCALL_SEMAPHORE_UP => sys_semaphore_up(args[0]),
SYSCALL_SEMAPHORE_DOWN => sys_semaphore_down(args[0]),
SYSCALL_CONDVAR_CREATE => sys_condvar_create(args[0]),
SYSCALL_CONDVAR_SIGNAL => sys_condvar_signal(args[0]),
SYSCALL_CONDVAR_WAIT => sys_condvar_wait(args[0], args[1]),
_ => panic!("Unsupported syscall_id: {}", syscall_id),
}
}

@ -1,17 +1,13 @@
use crate::fs::{open_file, OpenFlags};
use crate::mm::{translated_ref, translated_refmut, translated_str};
use crate::task::{
suspend_current_and_run_next,
exit_current_and_run_next,
current_task,
current_user_token,
add_task,
current_process, current_task, current_user_token, exit_current_and_run_next, pid2process,
suspend_current_and_run_next, SignalFlags,
};
use crate::timer::get_time_ms;
use crate::mm::{
translated_str,
translated_refmut,
};
use crate::loader::get_app_data_by_name;
use alloc::string::String;
use alloc::sync::Arc;
use alloc::vec::Vec;
pub fn sys_exit(exit_code: i32) -> ! {
exit_current_and_run_next(exit_code);
@ -28,30 +24,44 @@ pub fn sys_get_time() -> isize {
}
pub fn sys_getpid() -> isize {
current_task().unwrap().pid.0 as isize
current_task().unwrap().process.upgrade().unwrap().getpid() as isize
}
pub fn sys_fork() -> isize {
let current_task = current_task().unwrap();
let new_task = current_task.fork();
let new_pid = new_task.pid.0;
let current_process = current_process();
let new_process = current_process.fork();
let new_pid = new_process.getpid();
// modify trap context of new_task, because it returns immediately after switching
let trap_cx = new_task.acquire_inner_lock().get_trap_cx();
let new_process_inner = new_process.inner_exclusive_access();
let task = new_process_inner.tasks[0].as_ref().unwrap();
let trap_cx = task.inner_exclusive_access().get_trap_cx();
// we do not have to move to next instruction since we have done it before
// for child process, fork returns 0
trap_cx.x[10] = 0;
// add new task to scheduler
add_task(new_task);
new_pid as isize
}
pub fn sys_exec(path: *const u8) -> isize {
pub fn sys_exec(path: *const u8, mut args: *const usize) -> isize {
let token = current_user_token();
let path = translated_str(token, path);
if let Some(data) = get_app_data_by_name(path.as_str()) {
let task = current_task().unwrap();
task.exec(data);
0
let mut args_vec: Vec<String> = Vec::new();
loop {
let arg_str_ptr = *translated_ref(token, args);
if arg_str_ptr == 0 {
break;
}
args_vec.push(translated_str(token, arg_str_ptr as *const u8));
unsafe {
args = args.add(1);
}
}
if let Some(app_inode) = open_file(path.as_str(), OpenFlags::RDONLY) {
let all_data = app_inode.read_all();
let process = current_process();
let argc = args_vec.len();
process.exec(all_data.as_slice(), args_vec);
// return argc because cx.x[10] will be covered with it later
argc as isize
} else {
-1
}
@ -60,38 +70,48 @@ pub fn sys_exec(path: *const u8) -> isize {
/// If there is not a child process whose pid is same as given, return -1.
/// Else if there is a child process but it is still running, return -2.
pub fn sys_waitpid(pid: isize, exit_code_ptr: *mut i32) -> isize {
let task = current_task().unwrap();
let process = current_process();
// find a child process
// ---- hold current PCB lock
let mut inner = task.acquire_inner_lock();
if inner.children
let mut inner = process.inner_exclusive_access();
if !inner
.children
.iter()
.find(|p| {pid == -1 || pid as usize == p.getpid()})
.is_none() {
.any(|p| pid == -1 || pid as usize == p.getpid())
{
return -1;
// ---- release current PCB lock
// ---- release current PCB
}
let pair = inner.children
.iter()
.enumerate()
.find(|(_, p)| {
// ++++ temporarily hold child PCB lock
p.acquire_inner_lock().is_zombie() && (pid == -1 || pid as usize == p.getpid())
// ++++ release child PCB lock
});
let pair = inner.children.iter().enumerate().find(|(_, p)| {
// ++++ temporarily access child PCB exclusively
p.inner_exclusive_access().is_zombie && (pid == -1 || pid as usize == p.getpid())
// ++++ release child PCB
});
if let Some((idx, _)) = pair {
let child = inner.children.remove(idx);
// confirm that child will be deallocated after removing from children list
// confirm that child will be deallocated after being removed from children list
assert_eq!(Arc::strong_count(&child), 1);
let found_pid = child.getpid();
// ++++ temporarily hold child lock
let exit_code = child.acquire_inner_lock().exit_code;
// ++++ release child PCB lock
// ++++ temporarily access child PCB exclusively
let exit_code = child.inner_exclusive_access().exit_code;
// ++++ release child PCB
*translated_refmut(inner.memory_set.token(), exit_code_ptr) = exit_code;
found_pid as isize
} else {
-2
}
// ---- release current PCB lock automatically
}
// ---- release current PCB automatically
}
pub fn sys_kill(pid: usize, signal: u32) -> isize {
if let Some(process) = pid2process(pid) {
if let Some(flag) = SignalFlags::from_bits(signal) {
process.inner_exclusive_access().signals |= flag;
0
} else {
-1
}
} else {
-1
}
}

@ -0,0 +1,134 @@
use crate::sync::{Condvar, Mutex, MutexBlocking, MutexSpin, Semaphore};
use crate::task::{block_current_and_run_next, current_process, current_task};
use crate::timer::{add_timer, get_time_ms};
use alloc::sync::Arc;
pub fn sys_sleep(ms: usize) -> isize {
let expire_ms = get_time_ms() + ms;
let task = current_task().unwrap();
add_timer(expire_ms, task);
block_current_and_run_next();
0
}
pub fn sys_mutex_create(blocking: bool) -> isize {
let process = current_process();
let mutex: Option<Arc<dyn Mutex>> = if !blocking {
Some(Arc::new(MutexSpin::new()))
} else {
Some(Arc::new(MutexBlocking::new()))
};
let mut process_inner = process.inner_exclusive_access();
if let Some(id) = process_inner
.mutex_list
.iter()
.enumerate()
.find(|(_, item)| item.is_none())
.map(|(id, _)| id)
{
process_inner.mutex_list[id] = mutex;
id as isize
} else {
process_inner.mutex_list.push(mutex);
process_inner.mutex_list.len() as isize - 1
}
}
pub fn sys_mutex_lock(mutex_id: usize) -> isize {
let process = current_process();
let process_inner = process.inner_exclusive_access();
let mutex = Arc::clone(process_inner.mutex_list[mutex_id].as_ref().unwrap());
drop(process_inner);
drop(process);
mutex.lock();
0
}
pub fn sys_mutex_unlock(mutex_id: usize) -> isize {
let process = current_process();
let process_inner = process.inner_exclusive_access();
let mutex = Arc::clone(process_inner.mutex_list[mutex_id].as_ref().unwrap());
drop(process_inner);
drop(process);
mutex.unlock();
0
}
pub fn sys_semaphore_create(res_count: usize) -> isize {
let process = current_process();
let mut process_inner = process.inner_exclusive_access();
let id = if let Some(id) = process_inner
.semaphore_list
.iter()
.enumerate()
.find(|(_, item)| item.is_none())
.map(|(id, _)| id)
{
process_inner.semaphore_list[id] = Some(Arc::new(Semaphore::new(res_count)));
id
} else {
process_inner
.semaphore_list
.push(Some(Arc::new(Semaphore::new(res_count))));
process_inner.semaphore_list.len() - 1
};
id as isize
}
pub fn sys_semaphore_up(sem_id: usize) -> isize {
let process = current_process();
let process_inner = process.inner_exclusive_access();
let sem = Arc::clone(process_inner.semaphore_list[sem_id].as_ref().unwrap());
drop(process_inner);
sem.up();
0
}
pub fn sys_semaphore_down(sem_id: usize) -> isize {
let process = current_process();
let process_inner = process.inner_exclusive_access();
let sem = Arc::clone(process_inner.semaphore_list[sem_id].as_ref().unwrap());
drop(process_inner);
sem.down();
0
}
pub fn sys_condvar_create(_arg: usize) -> isize {
let process = current_process();
let mut process_inner = process.inner_exclusive_access();
let id = if let Some(id) = process_inner
.condvar_list
.iter()
.enumerate()
.find(|(_, item)| item.is_none())
.map(|(id, _)| id)
{
process_inner.condvar_list[id] = Some(Arc::new(Condvar::new()));
id
} else {
process_inner
.condvar_list
.push(Some(Arc::new(Condvar::new())));
process_inner.condvar_list.len() - 1
};
id as isize
}
pub fn sys_condvar_signal(condvar_id: usize) -> isize {
let process = current_process();
let process_inner = process.inner_exclusive_access();
let condvar = Arc::clone(process_inner.condvar_list[condvar_id].as_ref().unwrap());
drop(process_inner);
condvar.signal();
0
}
pub fn sys_condvar_wait(condvar_id: usize, mutex_id: usize) -> isize {
let process = current_process();
let process_inner = process.inner_exclusive_access();
let condvar = Arc::clone(process_inner.condvar_list[condvar_id].as_ref().unwrap());
let mutex = Arc::clone(process_inner.mutex_list[mutex_id].as_ref().unwrap());
drop(process_inner);
condvar.wait_with_mutex(mutex);
0
}

@ -0,0 +1,85 @@
use crate::{
mm::kernel_token,
task::{add_task, current_task, TaskControlBlock},
trap::{trap_handler, TrapContext},
};
use alloc::sync::Arc;
pub fn sys_thread_create(entry: usize, arg: usize) -> isize {
let task = current_task().unwrap();
let process = task.process.upgrade().unwrap();
// create a new thread
let new_task = Arc::new(TaskControlBlock::new(
Arc::clone(&process),
task.inner_exclusive_access()
.res
.as_ref()
.unwrap()
.ustack_base,
true,
));
// add new task to scheduler
add_task(Arc::clone(&new_task));
let new_task_inner = new_task.inner_exclusive_access();
let new_task_res = new_task_inner.res.as_ref().unwrap();
let new_task_tid = new_task_res.tid;
let mut process_inner = process.inner_exclusive_access();
// add new thread to current process
let tasks = &mut process_inner.tasks;
while tasks.len() < new_task_tid + 1 {
tasks.push(None);
}
tasks[new_task_tid] = Some(Arc::clone(&new_task));
let new_task_trap_cx = new_task_inner.get_trap_cx();
*new_task_trap_cx = TrapContext::app_init_context(
entry,
new_task_res.ustack_top(),
kernel_token(),
new_task.kstack.get_top(),
trap_handler as usize,
);
(*new_task_trap_cx).x[10] = arg;
new_task_tid as isize
}
pub fn sys_gettid() -> isize {
current_task()
.unwrap()
.inner_exclusive_access()
.res
.as_ref()
.unwrap()
.tid as isize
}
/// thread does not exist, return -1
/// thread has not exited yet, return -2
/// otherwise, return thread's exit code
pub fn sys_waittid(tid: usize) -> i32 {
let task = current_task().unwrap();
let process = task.process.upgrade().unwrap();
let task_inner = task.inner_exclusive_access();
let mut process_inner = process.inner_exclusive_access();
// a thread cannot wait for itself
if task_inner.res.as_ref().unwrap().tid == tid {
return -1;
}
let mut exit_code: Option<i32> = None;
let waited_task = process_inner.tasks[tid].as_ref();
if let Some(waited_task) = waited_task {
if let Some(waited_exit_code) = waited_task.inner_exclusive_access().exit_code {
exit_code = Some(waited_exit_code);
}
} else {
// waited thread does not exist
return -1;
}
if let Some(exit_code) = exit_code {
// dealloc the exited thread
process_inner.tasks[tid] = None;
exit_code
} else {
// waited thread has not exited
-2
}
}

@ -3,15 +3,23 @@ use crate::trap::trap_return;
#[repr(C)]
pub struct TaskContext {
ra: usize,
sp: usize,
s: [usize; 12],
}
impl TaskContext {
pub fn goto_trap_return() -> Self {
pub fn zero_init() -> Self {
Self {
ra: 0,
sp: 0,
s: [0; 12],
}
}
pub fn goto_trap_return(kstack_ptr: usize) -> Self {
Self {
ra: trap_return as usize,
sp: kstack_ptr,
s: [0; 12],
}
}
}

@ -0,0 +1,225 @@
use super::ProcessControlBlock;
use crate::config::{KERNEL_STACK_SIZE, PAGE_SIZE, TRAMPOLINE, TRAP_CONTEXT_BASE, USER_STACK_SIZE};
use crate::mm::{MapPermission, PhysPageNum, VirtAddr, KERNEL_SPACE};
use crate::sync::UPIntrFreeCell;
use alloc::{
sync::{Arc, Weak},
vec::Vec,
};
use lazy_static::*;
pub struct RecycleAllocator {
current: usize,
recycled: Vec<usize>,
}
impl RecycleAllocator {
pub fn new() -> Self {
RecycleAllocator {
current: 0,
recycled: Vec::new(),
}
}
pub fn alloc(&mut self) -> usize {
if let Some(id) = self.recycled.pop() {
id
} else {
self.current += 1;
self.current - 1
}
}
pub fn dealloc(&mut self, id: usize) {
assert!(id < self.current);
assert!(
!self.recycled.iter().any(|i| *i == id),
"id {} has been deallocated!",
id
);
self.recycled.push(id);
}
}
lazy_static! {
static ref PID_ALLOCATOR: UPIntrFreeCell<RecycleAllocator> =
unsafe { UPIntrFreeCell::new(RecycleAllocator::new()) };
static ref KSTACK_ALLOCATOR: UPIntrFreeCell<RecycleAllocator> =
unsafe { UPIntrFreeCell::new(RecycleAllocator::new()) };
}
pub const IDLE_PID: usize = 0;
pub struct PidHandle(pub usize);
pub fn pid_alloc() -> PidHandle {
PidHandle(PID_ALLOCATOR.exclusive_access().alloc())
}
impl Drop for PidHandle {
fn drop(&mut self) {
PID_ALLOCATOR.exclusive_access().dealloc(self.0);
}
}
/// Return (bottom, top) of a kernel stack in kernel space.
pub fn kernel_stack_position(kstack_id: usize) -> (usize, usize) {
let top = TRAMPOLINE - kstack_id * (KERNEL_STACK_SIZE + PAGE_SIZE);
let bottom = top - KERNEL_STACK_SIZE;
(bottom, top)
}
pub struct KernelStack(pub usize);
pub fn kstack_alloc() -> KernelStack {
let kstack_id = KSTACK_ALLOCATOR.exclusive_access().alloc();
let (kstack_bottom, kstack_top) = kernel_stack_position(kstack_id);
KERNEL_SPACE.exclusive_access().insert_framed_area(
kstack_bottom.into(),
kstack_top.into(),
MapPermission::R | MapPermission::W,
);
KernelStack(kstack_id)
}
impl Drop for KernelStack {
fn drop(&mut self) {
let (kernel_stack_bottom, _) = kernel_stack_position(self.0);
let kernel_stack_bottom_va: VirtAddr = kernel_stack_bottom.into();
KERNEL_SPACE
.exclusive_access()
.remove_area_with_start_vpn(kernel_stack_bottom_va.into());
}
}
impl KernelStack {
#[allow(unused)]
pub fn push_on_top<T>(&self, value: T) -> *mut T
where
T: Sized,
{
let kernel_stack_top = self.get_top();
let ptr_mut = (kernel_stack_top - core::mem::size_of::<T>()) as *mut T;
unsafe {
*ptr_mut = value;
}
ptr_mut
}
pub fn get_top(&self) -> usize {
let (_, kernel_stack_top) = kernel_stack_position(self.0);
kernel_stack_top
}
}
pub struct TaskUserRes {
pub tid: usize,
pub ustack_base: usize,
pub process: Weak<ProcessControlBlock>,
}
fn trap_cx_bottom_from_tid(tid: usize) -> usize {
TRAP_CONTEXT_BASE - tid * PAGE_SIZE
}
fn ustack_bottom_from_tid(ustack_base: usize, tid: usize) -> usize {
ustack_base + tid * (PAGE_SIZE + USER_STACK_SIZE)
}
impl TaskUserRes {
pub fn new(
process: Arc<ProcessControlBlock>,
ustack_base: usize,
alloc_user_res: bool,
) -> Self {
let tid = process.inner_exclusive_access().alloc_tid();
let task_user_res = Self {
tid,
ustack_base,
process: Arc::downgrade(&process),
};
if alloc_user_res {
task_user_res.alloc_user_res();
}
task_user_res
}
pub fn alloc_user_res(&self) {
let process = self.process.upgrade().unwrap();
let mut process_inner = process.inner_exclusive_access();
// alloc user stack
let ustack_bottom = ustack_bottom_from_tid(self.ustack_base, self.tid);
let ustack_top = ustack_bottom + USER_STACK_SIZE;
process_inner.memory_set.insert_framed_area(
ustack_bottom.into(),
ustack_top.into(),
MapPermission::R | MapPermission::W | MapPermission::U,
);
// alloc trap_cx
let trap_cx_bottom = trap_cx_bottom_from_tid(self.tid);
let trap_cx_top = trap_cx_bottom + PAGE_SIZE;
process_inner.memory_set.insert_framed_area(
trap_cx_bottom.into(),
trap_cx_top.into(),
MapPermission::R | MapPermission::W,
);
}
fn dealloc_user_res(&self) {
// dealloc tid
let process = self.process.upgrade().unwrap();
let mut process_inner = process.inner_exclusive_access();
// dealloc ustack manually
let ustack_bottom_va: VirtAddr = ustack_bottom_from_tid(self.ustack_base, self.tid).into();
process_inner
.memory_set
.remove_area_with_start_vpn(ustack_bottom_va.into());
// dealloc trap_cx manually
let trap_cx_bottom_va: VirtAddr = trap_cx_bottom_from_tid(self.tid).into();
process_inner
.memory_set
.remove_area_with_start_vpn(trap_cx_bottom_va.into());
}
#[allow(unused)]
pub fn alloc_tid(&mut self) {
self.tid = self
.process
.upgrade()
.unwrap()
.inner_exclusive_access()
.alloc_tid();
}
pub fn dealloc_tid(&self) {
let process = self.process.upgrade().unwrap();
let mut process_inner = process.inner_exclusive_access();
process_inner.dealloc_tid(self.tid);
}
pub fn trap_cx_user_va(&self) -> usize {
trap_cx_bottom_from_tid(self.tid)
}
pub fn trap_cx_ppn(&self) -> PhysPageNum {
let process = self.process.upgrade().unwrap();
let process_inner = process.inner_exclusive_access();
let trap_cx_bottom_va: VirtAddr = trap_cx_bottom_from_tid(self.tid).into();
process_inner
.memory_set
.translate(trap_cx_bottom_va.into())
.unwrap()
.ppn()
}
pub fn ustack_base(&self) -> usize {
self.ustack_base
}
pub fn ustack_top(&self) -> usize {
ustack_bottom_from_tid(self.ustack_base, self.tid) + USER_STACK_SIZE
}
}
impl Drop for TaskUserRes {
fn drop(&mut self) {
self.dealloc_tid();
self.dealloc_user_res();
}
}

@ -1,7 +1,7 @@
use super::TaskControlBlock;
use alloc::collections::VecDeque;
use super::{ProcessControlBlock, TaskControlBlock};
use crate::sync::UPIntrFreeCell;
use alloc::collections::{BTreeMap, VecDeque};
use alloc::sync::Arc;
use spin::Mutex;
use lazy_static::*;
pub struct TaskManager {
@ -11,7 +11,9 @@ pub struct TaskManager {
/// A simple FIFO scheduler.
impl TaskManager {
pub fn new() -> Self {
Self { ready_queue: VecDeque::new(), }
Self {
ready_queue: VecDeque::new(),
}
}
pub fn add(&mut self, task: Arc<TaskControlBlock>) {
self.ready_queue.push_back(task);
@ -22,13 +24,32 @@ impl TaskManager {
}
lazy_static! {
pub static ref TASK_MANAGER: Mutex<TaskManager> = Mutex::new(TaskManager::new());
pub static ref TASK_MANAGER: UPIntrFreeCell<TaskManager> =
unsafe { UPIntrFreeCell::new(TaskManager::new()) };
pub static ref PID2PCB: UPIntrFreeCell<BTreeMap<usize, Arc<ProcessControlBlock>>> =
unsafe { UPIntrFreeCell::new(BTreeMap::new()) };
}
pub fn add_task(task: Arc<TaskControlBlock>) {
TASK_MANAGER.lock().add(task);
TASK_MANAGER.exclusive_access().add(task);
}
pub fn fetch_task() -> Option<Arc<TaskControlBlock>> {
TASK_MANAGER.lock().fetch()
}
TASK_MANAGER.exclusive_access().fetch()
}
pub fn pid2process(pid: usize) -> Option<Arc<ProcessControlBlock>> {
let map = PID2PCB.exclusive_access();
map.get(&pid).map(Arc::clone)
}
pub fn insert_into_pid2process(pid: usize, process: Arc<ProcessControlBlock>) {
PID2PCB.exclusive_access().insert(pid, process);
}
pub fn remove_from_pid2process(pid: usize) {
let mut map = PID2PCB.exclusive_access();
if map.remove(&pid).is_none() {
panic!("cannot find pid {} in pid2task!", pid);
}
}

@ -1,41 +1,42 @@
mod context;
mod switch;
mod task;
mod id;
mod manager;
mod process;
mod processor;
mod pid;
mod signal;
mod switch;
#[allow(clippy::module_inception)]
mod task;
use crate::loader::{get_app_data_by_name};
use switch::__switch;
use task::{TaskControlBlock, TaskStatus};
use alloc::sync::Arc;
use manager::fetch_task;
use self::id::TaskUserRes;
use crate::fs::{open_file, OpenFlags};
use alloc::{sync::Arc, vec::Vec};
use lazy_static::*;
use manager::fetch_task;
use process::ProcessControlBlock;
use switch::__switch;
pub use context::TaskContext;
pub use id::{kstack_alloc, pid_alloc, KernelStack, PidHandle, IDLE_PID};
pub use manager::{add_task, pid2process, remove_from_pid2process};
pub use processor::{
run_tasks,
current_task,
current_user_token,
current_trap_cx,
take_current_task,
schedule,
current_kstack_top, current_process, current_task, current_trap_cx, current_trap_cx_user_va,
current_user_token, run_tasks, schedule, take_current_task,
};
pub use manager::add_task;
pub use pid::{PidHandle, pid_alloc, KernelStack};
pub use signal::SignalFlags;
pub use task::{TaskControlBlock, TaskStatus};
pub fn suspend_current_and_run_next() {
// There must be an application running.
let task = take_current_task().unwrap();
// ---- temporarily hold current PCB lock
let task_cx_ptr = task.acquire_inner_lock().get_task_cx_ptr2();
// ---- release current PCB lock
// ++++ temporarily hold current PCB lock
// ---- access current TCB exclusively
let mut task_inner = task.inner_exclusive_access();
let task_cx_ptr = &mut task_inner.task_cx as *mut TaskContext;
// Change status to Ready
task.acquire_inner_lock().task_status = TaskStatus::Ready;
// ++++ release current PCB lock
task_inner.task_status = TaskStatus::Ready;
drop(task_inner);
// ---- release current TCB
// push back to ready queue.
add_task(task);
@ -43,44 +44,116 @@ pub fn suspend_current_and_run_next() {
schedule(task_cx_ptr);
}
/// This function must be followed by a schedule
pub fn block_current_task() -> *mut TaskContext {
let task = take_current_task().unwrap();
let mut task_inner = task.inner_exclusive_access();
task_inner.task_status = TaskStatus::Blocking;
&mut task_inner.task_cx as *mut TaskContext
}
pub fn block_current_and_run_next() {
let task_cx_ptr = block_current_task();
schedule(task_cx_ptr);
}
use crate::board::QEMUExit;
pub fn exit_current_and_run_next(exit_code: i32) {
// take from Processor
let task = take_current_task().unwrap();
// **** hold current PCB lock
let mut inner = task.acquire_inner_lock();
// Change status to Zombie
inner.task_status = TaskStatus::Zombie;
// Record exit code
inner.exit_code = exit_code;
// do not move to its parent but under initproc
// ++++++ hold initproc PCB lock here
{
let mut initproc_inner = INITPROC.acquire_inner_lock();
for child in inner.children.iter() {
initproc_inner.children.push(child.clone());
let mut task_inner = task.inner_exclusive_access();
let process = task.process.upgrade().unwrap();
let tid = task_inner.res.as_ref().unwrap().tid;
// record exit code
task_inner.exit_code = Some(exit_code);
task_inner.res = None;
// here we do not remove the thread since we are still using the kstack
// it will be deallocated when sys_waittid is called
drop(task_inner);
drop(task);
// however, if this is the main thread of current process
// the process should terminate at once
if tid == 0 {
let pid = process.getpid();
if pid == IDLE_PID {
println!(
"[kernel] Idle process exit with exit_code {} ...",
exit_code
);
if exit_code != 0 {
//crate::sbi::shutdown(255); //255 == -1 for err hint
crate::board::QEMU_EXIT_HANDLE.exit_failure();
} else {
//crate::sbi::shutdown(0); //0 for success hint
crate::board::QEMU_EXIT_HANDLE.exit_success();
}
}
remove_from_pid2process(pid);
let mut process_inner = process.inner_exclusive_access();
// mark this process as a zombie process
process_inner.is_zombie = true;
// record exit code of main process
process_inner.exit_code = exit_code;
{
// move all child processes under init process
let mut initproc_inner = INITPROC.inner_exclusive_access();
for child in process_inner.children.iter() {
child.inner_exclusive_access().parent = Some(Arc::downgrade(&INITPROC));
initproc_inner.children.push(child.clone());
}
}
// deallocate user res (including tid/trap_cx/ustack) of all threads
// it has to be done before we dealloc the whole memory_set
// otherwise they will be deallocated twice
let mut recycle_res = Vec::<TaskUserRes>::new();
for task in process_inner.tasks.iter().filter(|t| t.is_some()) {
let task = task.as_ref().unwrap();
let mut task_inner = task.inner_exclusive_access();
if let Some(res) = task_inner.res.take() {
recycle_res.push(res);
}
}
// dealloc_tid and dealloc_user_res require access to PCB inner, so we
// need to collect those user res first, then release process_inner
// for now to avoid deadlock/double borrow problem.
drop(process_inner);
recycle_res.clear();
let mut process_inner = process.inner_exclusive_access();
process_inner.children.clear();
// deallocate other data in user space i.e. program code/data section
process_inner.memory_set.recycle_data_pages();
// drop file descriptors
process_inner.fd_table.clear();
}
// ++++++ release parent PCB lock here
inner.children.clear();
// deallocate user space
inner.memory_set.recycle_data_pages();
drop(inner);
// **** release current PCB lock
// drop task manually to maintain rc correctly
drop(task);
drop(process);
// we do not have to save task context
let _unused: usize = 0;
schedule(&_unused as *const _);
let mut _unused = TaskContext::zero_init();
schedule(&mut _unused as *mut _);
}
lazy_static! {
pub static ref INITPROC: Arc<TaskControlBlock> = Arc::new(
TaskControlBlock::new(get_app_data_by_name("initproc").unwrap())
);
pub static ref INITPROC: Arc<ProcessControlBlock> = {
let inode = open_file("initproc", OpenFlags::RDONLY).unwrap();
let v = inode.read_all();
ProcessControlBlock::new(v.as_slice())
};
}
pub fn add_initproc() {
add_task(INITPROC.clone());
let _initproc = INITPROC.clone();
}
pub fn check_signals_of_current() -> Option<(i32, &'static str)> {
let process = current_process();
let process_inner = process.inner_exclusive_access();
process_inner.signals.check_error()
}
pub fn current_add_signal(signal: SignalFlags) {
let process = current_process();
let mut process_inner = process.inner_exclusive_access();
process_inner.signals |= signal;
}

@ -1,105 +0,0 @@
use alloc::vec::Vec;
use lazy_static::*;
use spin::Mutex;
use crate::mm::{KERNEL_SPACE, MapPermission, VirtAddr};
use crate::config::{
PAGE_SIZE,
TRAMPOLINE,
KERNEL_STACK_SIZE,
};
struct PidAllocator {
current: usize,
recycled: Vec<usize>,
}
impl PidAllocator {
pub fn new() -> Self {
PidAllocator {
current: 0,
recycled: Vec::new(),
}
}
pub fn alloc(&mut self) -> PidHandle {
if let Some(pid) = self.recycled.pop() {
PidHandle(pid)
} else {
self.current += 1;
PidHandle(self.current - 1)
}
}
pub fn dealloc(&mut self, pid: usize) {
assert!(pid < self.current);
assert!(
self.recycled.iter().find(|ppid| **ppid == pid).is_none(),
"pid {} has been deallocated!", pid
);
self.recycled.push(pid);
}
}
lazy_static! {
static ref PID_ALLOCATOR : Mutex<PidAllocator> = Mutex::new(PidAllocator::new());
}
pub struct PidHandle(pub usize);
impl Drop for PidHandle {
fn drop(&mut self) {
//println!("drop pid {}", self.0);
PID_ALLOCATOR.lock().dealloc(self.0);
}
}
pub fn pid_alloc() -> PidHandle {
PID_ALLOCATOR.lock().alloc()
}
/// Return (bottom, top) of a kernel stack in kernel space.
pub fn kernel_stack_position(app_id: usize) -> (usize, usize) {
let top = TRAMPOLINE - app_id * (KERNEL_STACK_SIZE + PAGE_SIZE);
let bottom = top - KERNEL_STACK_SIZE;
(bottom, top)
}
pub struct KernelStack {
pid: usize,
}
impl KernelStack {
pub fn new(pid_handle: &PidHandle) -> Self {
let pid = pid_handle.0;
let (kernel_stack_bottom, kernel_stack_top) = kernel_stack_position(pid);
KERNEL_SPACE
.lock()
.insert_framed_area(
kernel_stack_bottom.into(),
kernel_stack_top.into(),
MapPermission::R | MapPermission::W,
);
KernelStack {
pid: pid_handle.0,
}
}
pub fn push_on_top<T>(&self, value: T) -> *mut T where
T: Sized, {
let kernel_stack_top = self.get_top();
let ptr_mut = (kernel_stack_top - core::mem::size_of::<T>()) as *mut T;
unsafe { *ptr_mut = value; }
ptr_mut
}
pub fn get_top(&self) -> usize {
let (_, kernel_stack_top) = kernel_stack_position(self.pid);
kernel_stack_top
}
}
impl Drop for KernelStack {
fn drop(&mut self) {
let (kernel_stack_bottom, _) = kernel_stack_position(self.pid);
let kernel_stack_bottom_va: VirtAddr = kernel_stack_bottom.into();
KERNEL_SPACE
.lock()
.remove_area_with_start_vpn(kernel_stack_bottom_va.into());
}
}

@ -0,0 +1,258 @@
use super::id::RecycleAllocator;
use super::manager::insert_into_pid2process;
use super::TaskControlBlock;
use super::{add_task, SignalFlags};
use super::{pid_alloc, PidHandle};
use crate::fs::{File, Stdin, Stdout};
use crate::mm::{translated_refmut, MemorySet, KERNEL_SPACE};
use crate::sync::{Condvar, Mutex, Semaphore, UPIntrFreeCell, UPIntrRefMut};
use crate::trap::{trap_handler, TrapContext};
use alloc::string::String;
use alloc::sync::{Arc, Weak};
use alloc::vec;
use alloc::vec::Vec;
pub struct ProcessControlBlock {
// immutable
pub pid: PidHandle,
// mutable
inner: UPIntrFreeCell<ProcessControlBlockInner>,
}
pub struct ProcessControlBlockInner {
pub is_zombie: bool,
pub memory_set: MemorySet,
pub parent: Option<Weak<ProcessControlBlock>>,
pub children: Vec<Arc<ProcessControlBlock>>,
pub exit_code: i32,
pub fd_table: Vec<Option<Arc<dyn File + Send + Sync>>>,
pub signals: SignalFlags,
pub tasks: Vec<Option<Arc<TaskControlBlock>>>,
pub task_res_allocator: RecycleAllocator,
pub mutex_list: Vec<Option<Arc<dyn Mutex>>>,
pub semaphore_list: Vec<Option<Arc<Semaphore>>>,
pub condvar_list: Vec<Option<Arc<Condvar>>>,
}
impl ProcessControlBlockInner {
#[allow(unused)]
pub fn get_user_token(&self) -> usize {
self.memory_set.token()
}
pub fn alloc_fd(&mut self) -> usize {
if let Some(fd) = (0..self.fd_table.len()).find(|fd| self.fd_table[*fd].is_none()) {
fd
} else {
self.fd_table.push(None);
self.fd_table.len() - 1
}
}
pub fn alloc_tid(&mut self) -> usize {
self.task_res_allocator.alloc()
}
pub fn dealloc_tid(&mut self, tid: usize) {
self.task_res_allocator.dealloc(tid)
}
pub fn thread_count(&self) -> usize {
self.tasks.len()
}
pub fn get_task(&self, tid: usize) -> Arc<TaskControlBlock> {
self.tasks[tid].as_ref().unwrap().clone()
}
}
impl ProcessControlBlock {
pub fn inner_exclusive_access(&self) -> UPIntrRefMut<'_, ProcessControlBlockInner> {
self.inner.exclusive_access()
}
pub fn new(elf_data: &[u8]) -> Arc<Self> {
// memory_set with elf program headers/trampoline/trap context/user stack
let (memory_set, ustack_base, entry_point) = MemorySet::from_elf(elf_data);
// allocate a pid
let pid_handle = pid_alloc();
let process = Arc::new(Self {
pid: pid_handle,
inner: unsafe {
UPIntrFreeCell::new(ProcessControlBlockInner {
is_zombie: false,
memory_set,
parent: None,
children: Vec::new(),
exit_code: 0,
fd_table: vec![
// 0 -> stdin
Some(Arc::new(Stdin)),
// 1 -> stdout
Some(Arc::new(Stdout)),
// 2 -> stderr
Some(Arc::new(Stdout)),
],
signals: SignalFlags::empty(),
tasks: Vec::new(),
task_res_allocator: RecycleAllocator::new(),
mutex_list: Vec::new(),
semaphore_list: Vec::new(),
condvar_list: Vec::new(),
})
},
});
// create a main thread, we should allocate ustack and trap_cx here
let task = Arc::new(TaskControlBlock::new(
Arc::clone(&process),
ustack_base,
true,
));
// prepare trap_cx of main thread
let task_inner = task.inner_exclusive_access();
let trap_cx = task_inner.get_trap_cx();
let ustack_top = task_inner.res.as_ref().unwrap().ustack_top();
let kstack_top = task.kstack.get_top();
drop(task_inner);
*trap_cx = TrapContext::app_init_context(
entry_point,
ustack_top,
KERNEL_SPACE.exclusive_access().token(),
kstack_top,
trap_handler as usize,
);
// add main thread to the process
let mut process_inner = process.inner_exclusive_access();
process_inner.tasks.push(Some(Arc::clone(&task)));
drop(process_inner);
insert_into_pid2process(process.getpid(), Arc::clone(&process));
// add main thread to scheduler
add_task(task);
process
}
/// Only support processes with a single thread.
pub fn exec(self: &Arc<Self>, elf_data: &[u8], args: Vec<String>) {
assert_eq!(self.inner_exclusive_access().thread_count(), 1);
// memory_set with elf program headers/trampoline/trap context/user stack
let (memory_set, ustack_base, entry_point) = MemorySet::from_elf(elf_data);
let new_token = memory_set.token();
// substitute memory_set
self.inner_exclusive_access().memory_set = memory_set;
// then we alloc user resource for main thread again
// since memory_set has been changed
let task = self.inner_exclusive_access().get_task(0);
let mut task_inner = task.inner_exclusive_access();
task_inner.res.as_mut().unwrap().ustack_base = ustack_base;
task_inner.res.as_mut().unwrap().alloc_user_res();
task_inner.trap_cx_ppn = task_inner.res.as_mut().unwrap().trap_cx_ppn();
// push arguments on user stack
let mut user_sp = task_inner.res.as_mut().unwrap().ustack_top();
user_sp -= (args.len() + 1) * core::mem::size_of::<usize>();
let argv_base = user_sp;
let mut argv: Vec<_> = (0..=args.len())
.map(|arg| {
translated_refmut(
new_token,
(argv_base + arg * core::mem::size_of::<usize>()) as *mut usize,
)
})
.collect();
*argv[args.len()] = 0;
for i in 0..args.len() {
user_sp -= args[i].len() + 1;
*argv[i] = user_sp;
let mut p = user_sp;
for c in args[i].as_bytes() {
*translated_refmut(new_token, p as *mut u8) = *c;
p += 1;
}
*translated_refmut(new_token, p as *mut u8) = 0;
}
// make the user_sp aligned to 8B for k210 platform
user_sp -= user_sp % core::mem::size_of::<usize>();
// initialize trap_cx
let mut trap_cx = TrapContext::app_init_context(
entry_point,
user_sp,
KERNEL_SPACE.exclusive_access().token(),
task.kstack.get_top(),
trap_handler as usize,
);
trap_cx.x[10] = args.len();
trap_cx.x[11] = argv_base;
*task_inner.get_trap_cx() = trap_cx;
}
/// Only support processes with a single thread.
pub fn fork(self: &Arc<Self>) -> Arc<Self> {
let mut parent = self.inner_exclusive_access();
assert_eq!(parent.thread_count(), 1);
// clone parent's memory_set completely including trampoline/ustacks/trap_cxs
let memory_set = MemorySet::from_existed_user(&parent.memory_set);
// alloc a pid
let pid = pid_alloc();
// copy fd table
let mut new_fd_table: Vec<Option<Arc<dyn File + Send + Sync>>> = Vec::new();
for fd in parent.fd_table.iter() {
if let Some(file) = fd {
new_fd_table.push(Some(file.clone()));
} else {
new_fd_table.push(None);
}
}
// create child process pcb
let child = Arc::new(Self {
pid,
inner: unsafe {
UPIntrFreeCell::new(ProcessControlBlockInner {
is_zombie: false,
memory_set,
parent: Some(Arc::downgrade(self)),
children: Vec::new(),
exit_code: 0,
fd_table: new_fd_table,
signals: SignalFlags::empty(),
tasks: Vec::new(),
task_res_allocator: RecycleAllocator::new(),
mutex_list: Vec::new(),
semaphore_list: Vec::new(),
condvar_list: Vec::new(),
})
},
});
// add child
parent.children.push(Arc::clone(&child));
// create main thread of child process
let task = Arc::new(TaskControlBlock::new(
Arc::clone(&child),
parent
.get_task(0)
.inner_exclusive_access()
.res
.as_ref()
.unwrap()
.ustack_base(),
// here we do not allocate trap_cx or ustack again
// but mention that we allocate a new kstack here
false,
));
// attach task to child process
let mut child_inner = child.inner_exclusive_access();
child_inner.tasks.push(Some(Arc::clone(&task)));
drop(child_inner);
// modify kstack_top in trap_cx of this thread
let task_inner = task.inner_exclusive_access();
let trap_cx = task_inner.get_trap_cx();
trap_cx.kernel_sp = task.kstack.get_top();
drop(task_inner);
insert_into_pid2process(child.getpid(), Arc::clone(&child));
// add this thread to scheduler
add_task(task);
child
}
pub fn getpid(&self) -> usize {
self.pid.0
}
}

@ -1,93 +1,103 @@
use super::TaskControlBlock;
use alloc::sync::Arc;
use spin::Mutex;
use lazy_static::*;
use super::{fetch_task, TaskStatus};
use super::__switch;
use super::{fetch_task, TaskStatus};
use super::{ProcessControlBlock, TaskContext, TaskControlBlock};
use crate::sync::UPIntrFreeCell;
use crate::trap::TrapContext;
use alloc::sync::Arc;
use lazy_static::*;
pub struct Processor {
inner: Mutex<ProcessorInner>,
}
unsafe impl Sync for Processor {}
struct ProcessorInner {
current: Option<Arc<TaskControlBlock>>,
idle_task_cx_ptr: usize,
idle_task_cx: TaskContext,
}
impl Processor {
pub fn new() -> Self {
Self {
inner: Mutex::new(ProcessorInner {
current: None,
idle_task_cx_ptr: 0,
}),
current: None,
idle_task_cx: TaskContext::zero_init(),
}
}
fn get_idle_task_cx_ptr2(&self) -> *const usize {
let inner = self.inner.lock();
&inner.idle_task_cx_ptr as *const usize
fn get_idle_task_cx_ptr(&mut self) -> *mut TaskContext {
&mut self.idle_task_cx as *mut _
}
pub fn run(&self) {
loop {
if let Some(task) = fetch_task() {
let idle_task_cx_ptr = self.get_idle_task_cx_ptr2();
// acquire
let next_task_cx_ptr = task.acquire_inner_lock().get_task_cx_ptr2();
task.acquire_inner_lock().task_status = TaskStatus::Running;
// release
self.inner.lock().current = Some(task);
unsafe {
__switch(
idle_task_cx_ptr,
next_task_cx_ptr,
);
}
}
}
}
pub fn take_current(&self) -> Option<Arc<TaskControlBlock>> {
self.inner.lock().current.take()
pub fn take_current(&mut self) -> Option<Arc<TaskControlBlock>> {
self.current.take()
}
pub fn current(&self) -> Option<Arc<TaskControlBlock>> {
self.inner.lock().current.as_ref().map(|task| task.clone())
self.current.as_ref().map(Arc::clone)
}
}
lazy_static! {
pub static ref PROCESSOR: Processor = Processor::new();
pub static ref PROCESSOR: UPIntrFreeCell<Processor> =
unsafe { UPIntrFreeCell::new(Processor::new()) };
}
pub fn run_tasks() {
PROCESSOR.run();
loop {
let mut processor = PROCESSOR.exclusive_access();
if let Some(task) = fetch_task() {
let idle_task_cx_ptr = processor.get_idle_task_cx_ptr();
// access coming task TCB exclusively
let next_task_cx_ptr = task.inner.exclusive_session(|task_inner| {
task_inner.task_status = TaskStatus::Running;
&task_inner.task_cx as *const TaskContext
});
processor.current = Some(task);
// release processor manually
drop(processor);
unsafe {
__switch(idle_task_cx_ptr, next_task_cx_ptr);
}
} else {
println!("no tasks available in run_tasks");
}
}
}
pub fn take_current_task() -> Option<Arc<TaskControlBlock>> {
PROCESSOR.take_current()
PROCESSOR.exclusive_access().take_current()
}
pub fn current_task() -> Option<Arc<TaskControlBlock>> {
PROCESSOR.current()
PROCESSOR.exclusive_access().current()
}
pub fn current_process() -> Arc<ProcessControlBlock> {
current_task().unwrap().process.upgrade().unwrap()
}
pub fn current_user_token() -> usize {
let task = current_task().unwrap();
let token = task.acquire_inner_lock().get_user_token();
token
task.get_user_token()
}
pub fn current_trap_cx() -> &'static mut TrapContext {
current_task().unwrap().acquire_inner_lock().get_trap_cx()
current_task()
.unwrap()
.inner_exclusive_access()
.get_trap_cx()
}
pub fn current_trap_cx_user_va() -> usize {
current_task()
.unwrap()
.inner_exclusive_access()
.res
.as_ref()
.unwrap()
.trap_cx_user_va()
}
pub fn current_kstack_top() -> usize {
current_task().unwrap().kstack.get_top()
}
pub fn schedule(switched_task_cx_ptr2: *const usize) {
let idle_task_cx_ptr2 = PROCESSOR.get_idle_task_cx_ptr2();
pub fn schedule(switched_task_cx_ptr: *mut TaskContext) {
let idle_task_cx_ptr =
PROCESSOR.exclusive_session(|processor| processor.get_idle_task_cx_ptr());
unsafe {
__switch(
switched_task_cx_ptr2,
idle_task_cx_ptr2,
);
__switch(switched_task_cx_ptr, idle_task_cx_ptr);
}
}

@ -0,0 +1,29 @@
use bitflags::*;
bitflags! {
pub struct SignalFlags: u32 {
const SIGINT = 1 << 2;
const SIGILL = 1 << 4;
const SIGABRT = 1 << 6;
const SIGFPE = 1 << 8;
const SIGSEGV = 1 << 11;
}
}
impl SignalFlags {
pub fn check_error(&self) -> Option<(i32, &'static str)> {
if self.contains(Self::SIGINT) {
Some((-2, "Killed, SIGINT=2"))
} else if self.contains(Self::SIGILL) {
Some((-4, "Illegal Instruction, SIGILL=4"))
} else if self.contains(Self::SIGABRT) {
Some((-6, "Aborted, SIGABRT=6"))
} else if self.contains(Self::SIGFPE) {
Some((-8, "Erroneous Arithmetic Operation, SIGFPE=8"))
} else if self.contains(Self::SIGSEGV) {
Some((-11, "Segmentation Fault, SIGSEGV=11"))
} else {
None
}
}
}

@ -1,34 +1,34 @@
.altmacro
.macro SAVE_SN n
sd s\n, (\n+1)*8(sp)
sd s\n, (\n+2)*8(a0)
.endm
.macro LOAD_SN n
ld s\n, (\n+1)*8(sp)
ld s\n, (\n+2)*8(a1)
.endm
.section .text
.globl __switch
__switch:
# __switch(current_task_cx: &*const TaskContext, next_task_cx: &*const TaskContext)
# push TaskContext to current sp and save its address to where a0 points to
addi sp, sp, -13*8
sd sp, 0(a0)
# fill TaskContext with ra & s0-s11
sd ra, 0(sp)
# __switch(
# current_task_cx_ptr: *mut TaskContext,
# next_task_cx_ptr: *const TaskContext
# )
# save kernel stack of current task
sd sp, 8(a0)
# save ra & s0~s11 of current execution
sd ra, 0(a0)
.set n, 0
.rept 12
SAVE_SN %n
.set n, n + 1
.endr
# ready for loading TaskContext a1 points to
ld sp, 0(a1)
# load registers in the TaskContext
ld ra, 0(sp)
# restore ra & s0~s11 of next execution
ld ra, 0(a1)
.set n, 0
.rept 12
LOAD_SN %n
.set n, n + 1
.endr
# pop TaskContext
addi sp, sp, 13*8
# restore kernel stack of next task
ld sp, 8(a1)
ret

@ -1,5 +1,8 @@
use super::TaskContext;
use core::arch::global_asm;
global_asm!(include_str!("switch.S"));
extern "C" {
pub fn __switch(current_task_cx: *const usize, next_task_cx: *const usize);
pub fn __switch(current_task_cx_ptr: *mut TaskContext, next_task_cx_ptr: *const TaskContext);
}

@ -1,205 +1,80 @@
use crate::mm::{MemorySet, PhysPageNum, KERNEL_SPACE, VirtAddr};
use crate::trap::{TrapContext, trap_handler};
use crate::config::{TRAP_CONTEXT};
use super::TaskContext;
use super::{PidHandle, pid_alloc, KernelStack};
use alloc::sync::{Weak, Arc};
use alloc::vec;
use alloc::vec::Vec;
use spin::{Mutex, MutexGuard};
use crate::fs::{File, Stdin, Stdout};
use super::id::TaskUserRes;
use super::{kstack_alloc, KernelStack, ProcessControlBlock, TaskContext};
use crate::trap::TrapContext;
use crate::{
mm::PhysPageNum,
sync::{UPIntrFreeCell, UPIntrRefMut},
};
use alloc::sync::{Arc, Weak};
pub struct TaskControlBlock {
// immutable
pub pid: PidHandle,
pub kernel_stack: KernelStack,
pub process: Weak<ProcessControlBlock>,
pub kstack: KernelStack,
// mutable
inner: Mutex<TaskControlBlockInner>,
pub inner: UPIntrFreeCell<TaskControlBlockInner>,
}
impl TaskControlBlock {
pub fn inner_exclusive_access(&self) -> UPIntrRefMut<'_, TaskControlBlockInner> {
self.inner.exclusive_access()
}
pub fn get_user_token(&self) -> usize {
let process = self.process.upgrade().unwrap();
let inner = process.inner_exclusive_access();
inner.memory_set.token()
}
}
pub struct TaskControlBlockInner {
pub res: Option<TaskUserRes>,
pub trap_cx_ppn: PhysPageNum,
pub base_size: usize,
pub task_cx_ptr: usize,
pub task_cx: TaskContext,
pub task_status: TaskStatus,
pub memory_set: MemorySet,
pub parent: Option<Weak<TaskControlBlock>>,
pub children: Vec<Arc<TaskControlBlock>>,
pub exit_code: i32,
pub fd_table: Vec<Option<Arc<dyn File + Send + Sync>>>,
pub exit_code: Option<i32>,
}
impl TaskControlBlockInner {
pub fn get_task_cx_ptr2(&self) -> *const usize {
&self.task_cx_ptr as *const usize
}
pub fn get_trap_cx(&self) -> &'static mut TrapContext {
self.trap_cx_ppn.get_mut()
}
pub fn get_user_token(&self) -> usize {
self.memory_set.token()
}
#[allow(unused)]
fn get_status(&self) -> TaskStatus {
self.task_status
}
pub fn is_zombie(&self) -> bool {
self.get_status() == TaskStatus::Zombie
}
pub fn alloc_fd(&mut self) -> usize {
if let Some(fd) = (0..self.fd_table.len())
.find(|fd| self.fd_table[*fd].is_none()) {
fd
} else {
self.fd_table.push(None);
self.fd_table.len() - 1
}
}
}
impl TaskControlBlock {
pub fn acquire_inner_lock(&self) -> MutexGuard<TaskControlBlockInner> {
self.inner.lock()
}
pub fn new(elf_data: &[u8]) -> Self {
// memory_set with elf program headers/trampoline/trap context/user stack
let (memory_set, user_sp, entry_point) = MemorySet::from_elf(elf_data);
let trap_cx_ppn = memory_set
.translate(VirtAddr::from(TRAP_CONTEXT).into())
.unwrap()
.ppn();
let task_status = TaskStatus::Ready;
// alloc a pid and a kernel stack in kernel space
let pid_handle = pid_alloc();
let kernel_stack = KernelStack::new(&pid_handle);
let kernel_stack_top = kernel_stack.get_top();
// push a task context which goes to trap_return to the top of kernel stack
let task_cx_ptr = kernel_stack.push_on_top(TaskContext::goto_trap_return());
let task_control_block = Self {
pid: pid_handle,
kernel_stack,
inner: Mutex::new(TaskControlBlockInner {
trap_cx_ppn,
base_size: user_sp,
task_cx_ptr: task_cx_ptr as usize,
task_status,
memory_set,
parent: None,
children: Vec::new(),
exit_code: 0,
fd_table: vec![
// 0 -> stdin
Some(Arc::new(Stdin)),
// 1 -> stdout
Some(Arc::new(Stdout)),
// 2 -> stderr
Some(Arc::new(Stdout)),
],
}),
};
// prepare TrapContext in user space
// ---- acquire child PCB lock
let trap_cx = task_control_block.acquire_inner_lock().get_trap_cx();
// ---- release child PCB lock
*trap_cx = TrapContext::app_init_context(
entry_point,
user_sp,
KERNEL_SPACE.lock().token(),
kernel_stack_top,
trap_handler as usize,
);
task_control_block
}
pub fn exec(&self, elf_data: &[u8]) {
// memory_set with elf program headers/trampoline/trap context/user stack
let (memory_set, user_sp, entry_point) = MemorySet::from_elf(elf_data);
let trap_cx_ppn = memory_set
.translate(VirtAddr::from(TRAP_CONTEXT).into())
.unwrap()
.ppn();
// **** hold current PCB lock
let mut inner = self.inner.lock();
// substitute memory_set
inner.memory_set = memory_set;
// update trap_cx ppn
inner.trap_cx_ppn = trap_cx_ppn;
drop(inner);
// **** release current PCB lock manually
// initialize trap_cx
// **** acquire current PCB lock
let trap_cx = self.acquire_inner_lock().get_trap_cx();
// **** release current PCB lock
*trap_cx = TrapContext::app_init_context(
entry_point,
user_sp,
KERNEL_SPACE.lock().token(),
self.kernel_stack.get_top(),
trap_handler as usize,
);
}
pub fn fork(self: &Arc<TaskControlBlock>) -> Arc<TaskControlBlock> {
// ---- hold parent PCB lock
let mut parent_inner = self.inner.lock();
// copy user space(include trap context)
let memory_set = MemorySet::from_existed_user(
&parent_inner.memory_set
);
let trap_cx_ppn = memory_set
.translate(VirtAddr::from(TRAP_CONTEXT).into())
.unwrap()
.ppn();
let task_status = TaskStatus::Ready;
// alloc a pid and a kernel stack in kernel space
let pid_handle = pid_alloc();
let kernel_stack = KernelStack::new(&pid_handle);
let kernel_stack_top = kernel_stack.get_top();
// push a goto_trap_return task_cx on the top of kernel stack
let task_cx_ptr = kernel_stack.push_on_top(TaskContext::goto_trap_return());
// copy fd table
let mut new_fd_table: Vec<Option<Arc<dyn File + Send + Sync>>> = Vec::new();
for fd in parent_inner.fd_table.iter() {
if let Some(file) = fd {
new_fd_table.push(Some(file.clone()));
} else {
new_fd_table.push(None);
}
pub fn new(
process: Arc<ProcessControlBlock>,
ustack_base: usize,
alloc_user_res: bool,
) -> Self {
let res = TaskUserRes::new(Arc::clone(&process), ustack_base, alloc_user_res);
let trap_cx_ppn = res.trap_cx_ppn();
let kstack = kstack_alloc();
let kstack_top = kstack.get_top();
Self {
process: Arc::downgrade(&process),
kstack,
inner: unsafe {
UPIntrFreeCell::new(TaskControlBlockInner {
res: Some(res),
trap_cx_ppn,
task_cx: TaskContext::goto_trap_return(kstack_top),
task_status: TaskStatus::Ready,
exit_code: None,
})
},
}
let task_control_block = Arc::new(TaskControlBlock {
pid: pid_handle,
kernel_stack,
inner: Mutex::new(TaskControlBlockInner {
trap_cx_ppn,
base_size: parent_inner.base_size,
task_cx_ptr: task_cx_ptr as usize,
task_status,
memory_set,
parent: Some(Arc::downgrade(self)),
children: Vec::new(),
exit_code: 0,
fd_table: new_fd_table,
}),
});
// add child
parent_inner.children.push(task_control_block.clone());
// modify kernel_sp in trap_cx
// **** acquire child PCB lock
let trap_cx = task_control_block.acquire_inner_lock().get_trap_cx();
// **** release child PCB lock
trap_cx.kernel_sp = kernel_stack_top;
// return
task_control_block
// ---- release parent PCB lock
}
pub fn getpid(&self) -> usize {
self.pid.0
}
}
#[derive(Copy, Clone, PartialEq)]
pub enum TaskStatus {
Ready,
Running,
Zombie,
}
Blocking,
}

@ -1,6 +1,13 @@
use riscv::register::time;
use core::cmp::Ordering;
use crate::config::CLOCK_FREQ;
use crate::sbi::set_timer;
use crate::config::CPU_FREQ;
use crate::sync::UPIntrFreeCell;
use crate::task::{add_task, TaskControlBlock};
use alloc::collections::BinaryHeap;
use alloc::sync::Arc;
use lazy_static::*;
use riscv::register::time;
const TICKS_PER_SEC: usize = 100;
const MSEC_PER_SEC: usize = 1000;
@ -10,9 +17,57 @@ pub fn get_time() -> usize {
}
pub fn get_time_ms() -> usize {
time::read() / (CPU_FREQ / MSEC_PER_SEC)
time::read() / (CLOCK_FREQ / MSEC_PER_SEC)
}
pub fn set_next_trigger() {
set_timer(get_time() + CPU_FREQ / TICKS_PER_SEC);
}
set_timer(get_time() + CLOCK_FREQ / TICKS_PER_SEC);
}
pub struct TimerCondVar {
pub expire_ms: usize,
pub task: Arc<TaskControlBlock>,
}
impl PartialEq for TimerCondVar {
fn eq(&self, other: &Self) -> bool {
self.expire_ms == other.expire_ms
}
}
impl Eq for TimerCondVar {}
impl PartialOrd for TimerCondVar {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
let a = -(self.expire_ms as isize);
let b = -(other.expire_ms as isize);
Some(a.cmp(&b))
}
}
impl Ord for TimerCondVar {
fn cmp(&self, other: &Self) -> Ordering {
self.partial_cmp(other).unwrap()
}
}
lazy_static! {
static ref TIMERS: UPIntrFreeCell<BinaryHeap<TimerCondVar>> =
unsafe { UPIntrFreeCell::new(BinaryHeap::<TimerCondVar>::new()) };
}
pub fn add_timer(expire_ms: usize, task: Arc<TaskControlBlock>) {
let mut timers = TIMERS.exclusive_access();
timers.push(TimerCondVar { expire_ms, task });
}
pub fn check_timer() {
let current_ms = get_time_ms();
let mut timers = TIMERS.exclusive_access();
while let Some(timer) = timers.peek() {
if timer.expire_ms <= current_ms {
add_task(Arc::clone(&timer.task));
timers.pop();
} else {
break;
}
}
}

@ -1,6 +1,7 @@
use riscv::register::sstatus::{Sstatus, self, SPP};
use riscv::register::sstatus::{self, Sstatus, SPP};
#[repr(C)]
#[derive(Debug)]
pub struct TrapContext {
pub x: [usize; 32],
pub sstatus: Sstatus,
@ -11,7 +12,9 @@ pub struct TrapContext {
}
impl TrapContext {
pub fn set_sp(&mut self, sp: usize) { self.x[2] = sp; }
pub fn set_sp(&mut self, sp: usize) {
self.x[2] = sp;
}
pub fn app_init_context(
entry: usize,
sp: usize,
@ -22,8 +25,6 @@ impl TrapContext {
let mut sstatus = sstatus::read();
// set CPU privilege to User after trapping back
sstatus.set_spp(SPP::User);
// enable Supervisor mode interrupt after trapping back
sstatus.set_spie(true);
let mut cx = Self {
x: [0; 32],
sstatus,

@ -1,27 +1,18 @@
mod context;
use riscv::register::{
mtvec::TrapMode,
stvec,
scause::{
self,
Trap,
Exception,
Interrupt,
},
stval,
sstatus,
sie,
};
use crate::config::TRAMPOLINE;
use crate::syscall::syscall;
use crate::task::{
exit_current_and_run_next,
suspend_current_and_run_next,
current_user_token,
current_trap_cx,
check_signals_of_current, current_add_signal, current_trap_cx, current_trap_cx_user_va,
current_user_token, exit_current_and_run_next, suspend_current_and_run_next, SignalFlags,
};
use crate::timer::{check_timer, set_next_trigger};
use core::arch::{asm, global_asm};
use riscv::register::{
mtvec::TrapMode,
scause::{self, Exception, Interrupt, Trap},
sie, sscratch, sstatus, stval, stvec,
};
use crate::timer::set_next_trigger;
use crate::config::{TRAP_CONTEXT, TRAMPOLINE};
global_asm!(include_str!("trap.S"));
@ -30,8 +21,14 @@ pub fn init() {
}
fn set_kernel_trap_entry() {
extern "C" {
fn __alltraps();
fn __alltraps_k();
}
let __alltraps_k_va = __alltraps_k as usize - __alltraps as usize + TRAMPOLINE;
unsafe {
stvec::write(trap_from_kernel as usize, TrapMode::Direct);
stvec::write(__alltraps_k_va, TrapMode::Direct);
sscratch::write(trap_from_kernel as usize);
}
}
@ -41,13 +38,22 @@ fn set_user_trap_entry() {
}
}
#[allow(unused)]
pub fn enable_interrupt() {
unsafe { sstatus::set_sie(); }
pub fn enable_timer_interrupt() {
unsafe {
sie::set_stimer();
}
}
pub fn enable_timer_interrupt() {
unsafe { sie::set_stimer(); }
fn enable_supervisor_interrupt() {
unsafe {
sstatus::set_sie();
}
}
fn disable_supervisor_interrupt() {
unsafe {
sstatus::clear_sie();
}
}
#[no_mangle]
@ -55,68 +61,109 @@ pub fn trap_handler() -> ! {
set_kernel_trap_entry();
let scause = scause::read();
let stval = stval::read();
//println!("into {:?}", scause.cause());
match scause.cause() {
Trap::Exception(Exception::UserEnvCall) => {
// jump to next instruction anyway
let mut cx = current_trap_cx();
cx.sepc += 4;
enable_supervisor_interrupt();
// get system call return value
let result = syscall(cx.x[17], [cx.x[10], cx.x[11], cx.x[12]]);
// cx is changed during sys_exec, so we have to call it again
cx = current_trap_cx();
cx.x[10] = result as usize;
}
Trap::Exception(Exception::StoreFault) |
Trap::Exception(Exception::StorePageFault) |
Trap::Exception(Exception::InstructionFault) |
Trap::Exception(Exception::InstructionPageFault) |
Trap::Exception(Exception::LoadFault) |
Trap::Exception(Exception::LoadPageFault) => {
Trap::Exception(Exception::StoreFault)
| Trap::Exception(Exception::StorePageFault)
| Trap::Exception(Exception::InstructionFault)
| Trap::Exception(Exception::InstructionPageFault)
| Trap::Exception(Exception::LoadFault)
| Trap::Exception(Exception::LoadPageFault) => {
/*
println!(
"[kernel] {:?} in application, bad addr = {:#x}, bad instruction = {:#x}, core dumped.",
"[kernel] {:?} in application, bad addr = {:#x}, bad instruction = {:#x}, kernel killed it.",
scause.cause(),
stval,
current_trap_cx().sepc,
);
// page fault exit code
exit_current_and_run_next(-2);
*/
current_add_signal(SignalFlags::SIGSEGV);
}
Trap::Exception(Exception::IllegalInstruction) => {
println!("[kernel] IllegalInstruction in application, core dumped.");
// illegal instruction exit code
exit_current_and_run_next(-3);
current_add_signal(SignalFlags::SIGILL);
}
Trap::Interrupt(Interrupt::SupervisorTimer) => {
set_next_trigger();
check_timer();
suspend_current_and_run_next();
}
Trap::Interrupt(Interrupt::SupervisorExternal) => {
crate::board::irq_handler();
}
_ => {
panic!("Unsupported trap {:?}, stval = {:#x}!", scause.cause(), stval);
panic!(
"Unsupported trap {:?}, stval = {:#x}!",
scause.cause(),
stval
);
}
}
// check signals
if let Some((errno, msg)) = check_signals_of_current() {
println!("[kernel] {}", msg);
exit_current_and_run_next(errno);
}
trap_return();
}
#[no_mangle]
pub fn trap_return() -> ! {
disable_supervisor_interrupt();
set_user_trap_entry();
let trap_cx_ptr = TRAP_CONTEXT;
let trap_cx_user_va = current_trap_cx_user_va();
let user_satp = current_user_token();
extern "C" {
fn __alltraps();
fn __restore();
}
let restore_va = __restore as usize - __alltraps as usize + TRAMPOLINE;
//println!("before return");
unsafe {
llvm_asm!("fence.i" :::: "volatile");
llvm_asm!("jr $0" :: "r"(restore_va), "{a0}"(trap_cx_ptr), "{a1}"(user_satp) :: "volatile");
asm!(
"fence.i",
"jr {restore_va}",
restore_va = in(reg) restore_va,
in("a0") trap_cx_user_va,
in("a1") user_satp,
options(noreturn)
);
}
panic!("Unreachable in back_to_user!");
}
#[no_mangle]
pub fn trap_from_kernel() -> ! {
panic!("a trap {:?} from kernel!", scause::read().cause());
pub fn trap_from_kernel(_trap_cx: &TrapContext) {
let scause = scause::read();
let stval = stval::read();
match scause.cause() {
Trap::Interrupt(Interrupt::SupervisorExternal) => {
crate::board::irq_handler();
}
Trap::Interrupt(Interrupt::SupervisorTimer) => {
set_next_trigger();
check_timer();
// do not schedule now
}
_ => {
panic!(
"Unsupported trap from kernel: {:?}, stval = {:#x}!",
scause.cause(),
stval
);
}
}
}
pub use context::{TrapContext};
pub use context::TrapContext;

@ -8,6 +8,8 @@
.section .text.trampoline
.globl __alltraps
.globl __restore
.globl __alltraps_k
.globl __restore_k
.align 2
__alltraps:
csrrw sp, sscratch, sp
@ -67,3 +69,36 @@ __restore:
# back to user stack
ld sp, 2*8(sp)
sret
.align 2
__alltraps_k:
addi sp, sp, -34*8
sd x1, 1*8(sp)
sd x3, 3*8(sp)
.set n, 5
.rept 27
SAVE_GP %n
.set n, n+1
.endr
csrr t0, sstatus
csrr t1, sepc
sd t0, 32*8(sp)
sd t1, 33*8(sp)
mv a0, sp
csrr t2, sscratch
jalr t2
__restore_k:
ld t0, 32*8(sp)
ld t1, 33*8(sp)
csrw sstatus, t0
csrw sepc, t1
ld x1, 1*8(sp)
ld x3, 3*8(sp)
.set n, 5
.rept 27
LOAD_GP %n
.set n, n+1
.endr
addi sp, sp, 34*8
sret

@ -1 +1 @@
nightly-2020-11-01
nightly-2022-04-11

@ -0,0 +1,2 @@
export PATH=$(rustc --print sysroot)/bin:$PATH
export RUST_SRC_PATH=$(rustc --print sysroot)/lib/rustlib/src/rust/library/

File diff suppressed because one or more lines are too long

@ -7,4 +7,9 @@ edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
buddy_system_allocator = "0.6"
buddy_system_allocator = "0.6"
bitflags = "1.2.1"
riscv = { git = "https://github.com/rcore-os/riscv", features = ["inline-asm"] }
[profile.release]
debug = true

@ -8,9 +8,15 @@ BINS := $(patsubst $(APP_DIR)/%.rs, $(TARGET_DIR)/%.bin, $(APPS))
OBJDUMP := rust-objdump --arch-name=riscv64
OBJCOPY := rust-objcopy --binary-architecture=riscv64
CP := cp
TEST ?=
elf: $(APPS)
@cargo build --release
ifeq ($(TEST), 1)
@$(CP) $(TARGET_DIR)/usertests $(TARGET_DIR)/initproc
endif
binary: elf
$(foreach elf, $(ELFS), $(OBJCOPY) $(elf) --strip-all -O binary $(patsubst $(TARGET_DIR)/%, $(TARGET_DIR)/%.bin, $(elf));)
@ -20,4 +26,4 @@ build: binary
clean:
@cargo clean
.PHONY: elf binary build clean
.PHONY: elf binary build clean

@ -0,0 +1,32 @@
#![no_std]
#![no_main]
#[macro_use]
extern crate user_lib;
extern crate alloc;
use user_lib::{close, open, read, OpenFlags};
#[no_mangle]
pub fn main(argc: usize, argv: &[&str]) -> i32 {
println!("argc = {}", argc);
for (i, arg) in argv.iter().enumerate() {
println!("argv[{}] = {}", i, arg);
}
assert!(argc == 2);
let fd = open(argv[1], OpenFlags::RDONLY);
if fd == -1 {
panic!("Error occurred when opening file");
}
let fd = fd as usize;
let mut buf = [0u8; 256];
loop {
let size = read(fd, &mut buf) as usize;
if size == 0 {
break;
}
print!("{}", core::str::from_utf8(&buf[..size]).unwrap());
}
close(fd);
0
}

@ -0,0 +1,16 @@
#![no_std]
#![no_main]
extern crate alloc;
#[macro_use]
extern crate user_lib;
#[no_mangle]
pub fn main(argc: usize, argv: &[&str]) -> i32 {
println!("argc = {}", argc);
for (i, arg) in argv.iter().enumerate() {
println!("argv[{}] = {}", i, arg);
}
0
}

@ -0,0 +1,30 @@
#![no_std]
#![no_main]
#[macro_use]
extern crate user_lib;
use user_lib::read;
#[no_mangle]
pub fn main(_argc: usize, _argv: &[&str]) -> i32 {
let mut buf = [0u8; 256];
let mut lines = 0usize;
let mut total_size = 0usize;
loop {
let len = read(0, &mut buf) as usize;
if len == 0 {
break;
}
total_size += len;
let string = core::str::from_utf8(&buf[..len]).unwrap();
lines += string
.chars()
.fold(0, |acc, c| acc + if c == '\n' { 1 } else { 0 });
}
if total_size > 0 {
lines += 1;
}
println!("{}", lines);
0
}

@ -0,0 +1,138 @@
#![no_std]
#![no_main]
#![feature(core_intrinsics)]
#[macro_use]
extern crate user_lib;
extern crate alloc;
extern crate core;
use alloc::vec::Vec;
use core::sync::atomic::{AtomicUsize, Ordering};
use user_lib::{exit, sleep, thread_create, waittid};
const N: usize = 2;
const THREAD_NUM: usize = 10;
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
enum FlagState {
Out,
Want,
In,
}
static mut TURN: usize = 0;
static mut FLAG: [FlagState; THREAD_NUM] = [FlagState::Out; THREAD_NUM];
static GUARD: AtomicUsize = AtomicUsize::new(0);
fn critical_test_enter() {
assert_eq!(GUARD.fetch_add(1, Ordering::SeqCst), 0);
}
fn critical_test_claim() {
assert_eq!(GUARD.load(Ordering::SeqCst), 1);
}
fn critical_test_exit() {
assert_eq!(GUARD.fetch_sub(1, Ordering::SeqCst), 1);
}
fn eisenberg_enter_critical(id: usize) {
/* announce that we want to enter */
loop {
println!("Thread[{}] try enter", id);
vstore!(&FLAG[id], FlagState::Want);
loop {
/* check if any with higher priority is `Want` or `In` */
let mut prior_thread: Option<usize> = None;
let turn = vload!(&TURN);
let ring_id = if id < turn { id + THREAD_NUM } else { id };
// FLAG.iter() may lead to some errors, use for-loop instead
for i in turn..ring_id {
if vload!(&FLAG[i % THREAD_NUM]) != FlagState::Out {
prior_thread = Some(i % THREAD_NUM);
break;
}
}
if prior_thread.is_none() {
break;
}
println!(
"Thread[{}]: prior thread {} exist, sleep and retry",
id,
prior_thread.unwrap()
);
sleep(1);
}
/* now tentatively claim the resource */
vstore!(&FLAG[id], FlagState::In);
/* enforce the order of `claim` and `conflict check`*/
memory_fence!();
/* check if anthor thread is also `In`, which imply a conflict*/
let mut conflict = false;
for i in 0..THREAD_NUM {
if i != id && vload!(&FLAG[i]) == FlagState::In {
conflict = true;
}
}
if !conflict {
break;
}
println!("Thread[{}]: CONFLECT!", id);
/* no need to sleep */
}
/* clain the trun */
vstore!(&TURN, id);
println!("Thread[{}] enter", id);
}
fn eisenberg_exit_critical(id: usize) {
/* find next one who wants to enter and give the turn to it*/
let mut next = id;
let ring_id = id + THREAD_NUM;
for i in (id + 1)..ring_id {
let idx = i % THREAD_NUM;
if vload!(&FLAG[idx]) == FlagState::Want {
next = idx;
break;
}
}
vstore!(&TURN, next);
/* All done */
vstore!(&FLAG[id], FlagState::Out);
println!("Thread[{}] exit, give turn to {}", id, next);
}
pub fn thread_fn(id: usize) -> ! {
println!("Thread[{}] init.", id);
for _ in 0..N {
eisenberg_enter_critical(id);
critical_test_enter();
for _ in 0..3 {
critical_test_claim();
sleep(2);
}
critical_test_exit();
eisenberg_exit_critical(id);
}
exit(0)
}
#[no_mangle]
pub fn main() -> i32 {
let mut v = Vec::new();
// TODO: really shuffle
assert_eq!(THREAD_NUM, 10);
let shuffle: [usize; 10] = [0, 7, 4, 6, 2, 9, 8, 1, 3, 5];
for i in 0..THREAD_NUM {
v.push(thread_create(thread_fn as usize, shuffle[i]));
}
for tid in v.iter() {
let exit_code = waittid(*tid as usize);
assert_eq!(exit_code, 0, "thread conflict happened!");
println!("thread#{} exited with code {}", tid, exit_code);
}
println!("main thread exited.");
0
}

@ -3,7 +3,7 @@
#[macro_use]
extern crate user_lib;
use user_lib::{fork, yield_, waitpid, exit, wait};
use user_lib::{exit, fork, wait, waitpid, yield_};
const MAGIC: i32 = -0x10384;
@ -13,7 +13,9 @@ pub fn main() -> i32 {
let pid = fork();
if pid == 0 {
println!("I am the child.");
for _ in 0..7 { yield_(); }
for _ in 0..7 {
yield_();
}
exit(MAGIC);
} else {
println!("I am parent, fork a child pid {}", pid);
@ -26,4 +28,3 @@ pub fn main() -> i32 {
println!("exit pass.");
0
}

@ -41,4 +41,4 @@ pub fn main() -> i32 {
println!("{}", color_text!(text, i));
}
0
}
}

@ -0,0 +1,29 @@
#![no_std]
#![no_main]
#[macro_use]
extern crate user_lib;
use user_lib::{close, open, read, write, OpenFlags};
#[no_mangle]
pub fn main() -> i32 {
let test_str = "Hello, world!";
let filea = "filea\0";
let fd = open(filea, OpenFlags::CREATE | OpenFlags::WRONLY);
assert!(fd > 0);
let fd = fd as usize;
write(fd, test_str.as_bytes());
close(fd);
let fd = open(filea, OpenFlags::RDONLY);
assert!(fd > 0);
let fd = fd as usize;
let mut buffer = [0u8; 100];
let read_len = read(fd, &mut buffer) as usize;
close(fd);
assert_eq!(test_str, core::str::from_utf8(&buffer[..read_len]).unwrap(),);
println!("file_test passed!");
0
}

@ -4,9 +4,9 @@
#[macro_use]
extern crate user_lib;
use user_lib::{fork, wait, exit};
use user_lib::{exit, fork, wait};
const MAX_CHILD: usize = 40;
const MAX_CHILD: usize = 30;
#[no_mangle]
pub fn main() -> i32 {
@ -31,4 +31,4 @@ pub fn main() -> i32 {
}
println!("forktest pass.");
0
}
}

@ -4,7 +4,7 @@
#[macro_use]
extern crate user_lib;
use user_lib::{fork, wait, getpid, exit, sleep, get_time};
use user_lib::{exit, fork, get_time, getpid, sleep, wait};
static NUM: usize = 30;
@ -14,7 +14,8 @@ pub fn main() -> i32 {
let pid = fork();
if pid == 0 {
let current_time = get_time();
let sleep_length = (current_time as i32 as isize) * (current_time as i32 as isize) % 1000 + 1000;
let sleep_length =
(current_time as i32 as isize) * (current_time as i32 as isize) % 1000 + 1000;
println!("pid {} sleep for {} ms", getpid(), sleep_length);
sleep(sleep_length as usize);
println!("pid {} OK!", getpid());
@ -22,12 +23,12 @@ pub fn main() -> i32 {
}
}
let mut xstate: i32 = 0;
let mut exit_code: i32 = 0;
for _ in 0..NUM {
assert!(wait(&mut xstate) > 0);
assert_eq!(xstate, 0);
assert!(wait(&mut exit_code) > 0);
assert_eq!(exit_code, 0);
}
assert!(wait(&mut xstate) < 0);
assert!(wait(&mut exit_code) < 0);
println!("forktest2 test passed!");
0
}
}

@ -18,11 +18,11 @@ pub fn main() -> i32 {
100
} else {
// parent process
let mut xstate: i32 = 0;
let mut exit_code: i32 = 0;
println!("ready waiting on parent process!");
assert_eq!(pid, wait(&mut xstate));
assert_eq!(xstate, 100);
println!("child process pid = {}, exit code = {}", pid, xstate);
assert_eq!(pid, wait(&mut exit_code));
assert_eq!(exit_code, 100);
println!("child process pid = {}, exit code = {}", pid, exit_code);
0
}
}
}

@ -4,7 +4,7 @@
#[macro_use]
extern crate user_lib;
use user_lib::{sleep, getpid, fork, exit, yield_};
use user_lib::{exit, fork, getpid, sleep, wait, yield_};
const DEPTH: usize = 4;
@ -14,7 +14,7 @@ fn fork_child(cur: &str, branch: char) {
if l >= DEPTH {
return;
}
&mut next[..l].copy_from_slice(cur.as_bytes());
next[..l].copy_from_slice(cur.as_bytes());
next[l] = branch as u8;
if fork() == 0 {
fork_tree(core::str::from_utf8(&next[..l + 1]).unwrap());
@ -27,11 +27,19 @@ fn fork_tree(cur: &str) {
println!("pid{}: {}", getpid(), cur);
fork_child(cur, '0');
fork_child(cur, '1');
let mut exit_code: i32 = 0;
for _ in 0..2 {
wait(&mut exit_code);
}
}
#[no_mangle]
pub fn main() -> i32 {
fork_tree("");
let mut exit_code: i32 = 0;
for _ in 0..2 {
wait(&mut exit_code);
}
sleep(3000);
0
}

@ -8,4 +8,4 @@ extern crate user_lib;
pub fn main() -> i32 {
println!("Hello world from user mode program!");
0
}
}

@ -0,0 +1,33 @@
#![no_std]
#![no_main]
#[macro_use]
extern crate user_lib;
use user_lib::{close, get_time, open, write, OpenFlags};
#[no_mangle]
pub fn main() -> i32 {
let mut buffer = [0u8; 1024]; // 1KiB
for (i, ch) in buffer.iter_mut().enumerate() {
*ch = i as u8;
}
let f = open("testf\0", OpenFlags::CREATE | OpenFlags::WRONLY);
if f < 0 {
panic!("Open test file failed!");
}
let f = f as usize;
let start = get_time();
let size_mb = 1usize;
for _ in 0..1024 * size_mb {
write(f, &buffer);
}
close(f);
let time_ms = (get_time() - start) as usize;
let speed_kbs = (size_mb << 20) / time_ms;
println!(
"{}MiB written, time cost = {}ms, write speed = {}KiB/s",
size_mb, time_ms, speed_kbs
);
0
}

@ -0,0 +1,56 @@
#![no_std]
#![no_main]
#[macro_use]
extern crate user_lib;
extern crate alloc;
use alloc::{fmt::format, string::String, vec::Vec};
use user_lib::{close, get_time, gettid, open, write, OpenFlags};
use user_lib::{exit, thread_create, waittid};
fn worker(size_kib: usize) {
let mut buffer = [0u8; 1024]; // 1KiB
for (i, ch) in buffer.iter_mut().enumerate() {
*ch = i as u8;
}
let filename = format(format_args!("testf{}\0", gettid()));
let f = open(filename.as_str(), OpenFlags::CREATE | OpenFlags::WRONLY);
if f < 0 {
panic!("Open test file failed!");
}
let f = f as usize;
for _ in 0..size_kib {
write(f, &buffer);
}
close(f);
exit(0)
}
#[no_mangle]
pub fn main(argc: usize, argv: &[&str]) -> i32 {
assert_eq!(argc, 2, "wrong argument");
let size_mb = 1usize;
let size_kb = size_mb << 10;
let workers = argv[1].parse::<usize>().expect("wrong argument");
assert!(workers >= 1 && size_kb % workers == 0, "wrong argument");
let start = get_time();
let mut v = Vec::new();
let size_mb = 1usize;
for _ in 0..workers {
v.push(thread_create(worker as usize, size_kb / workers));
}
for tid in v.iter() {
assert_eq!(0, waittid(*tid as usize));
}
let time_ms = (get_time() - start) as usize;
let speed_kbs = size_kb * 1000 / time_ms;
println!(
"{}MiB written by {} threads, time cost = {}ms, write speed = {}KiB/s",
size_mb, workers, time_ms, speed_kbs
);
0
}

@ -0,0 +1,10 @@
#![no_std]
#![no_main]
#![allow(clippy::empty_loop)]
extern crate user_lib;
#[no_mangle]
pub fn main(_argc: usize, _argv: &[&str]) -> ! {
loop {}
}

@ -1,20 +1,14 @@
#![no_std]
#![no_main]
#[macro_use]
extern crate user_lib;
use user_lib::{
fork,
wait,
exec,
yield_,
};
use user_lib::{exec, fork, wait, yield_};
#[no_mangle]
fn main() -> i32 {
if fork() == 0 {
exec("user_shell\0");
exec("user_shell\0", &[core::ptr::null::<u8>()]);
} else {
loop {
let mut exit_code: i32 = 0;
@ -23,12 +17,14 @@ fn main() -> i32 {
yield_();
continue;
}
/*
println!(
"[initproc] Released a zombie process, pid={}, exit_code={}",
pid,
exit_code,
);
*/
}
}
0
}
}

@ -1,12 +1,13 @@
#![no_std]
#![no_main]
#![allow(clippy::needless_range_loop)]
#[macro_use]
extern crate user_lib;
use user_lib::{fork, wait, yield_, exit, getpid, get_time};
use user_lib::{exit, fork, get_time, getpid, wait, yield_};
static NUM: usize = 35;
static NUM: usize = 30;
const N: usize = 10;
static P: i32 = 10007;
type Arr = [[i32; N]; N];
@ -56,13 +57,13 @@ pub fn main() -> i32 {
println!("fork ok.");
let mut xstate: i32 = 0;
let mut exit_code: i32 = 0;
for _ in 0..NUM {
if wait(&mut xstate) < 0 {
if wait(&mut exit_code) < 0 {
panic!("wait failed.");
}
}
assert!(wait(&mut xstate) < 0);
assert!(wait(&mut exit_code) < 0);
println!("matrix passed.");
0
}
}

@ -0,0 +1,73 @@
#![no_std]
#![no_main]
#![allow(clippy::println_empty_string)]
#[macro_use]
extern crate user_lib;
extern crate alloc;
use alloc::vec::Vec;
use user_lib::exit;
use user_lib::{semaphore_create, semaphore_down, semaphore_up};
use user_lib::{thread_create, waittid};
const SEM_MUTEX: usize = 0;
const SEM_EMPTY: usize = 1;
const SEM_EXISTED: usize = 2;
const BUFFER_SIZE: usize = 8;
static mut BUFFER: [usize; BUFFER_SIZE] = [0; BUFFER_SIZE];
static mut FRONT: usize = 0;
static mut TAIL: usize = 0;
const PRODUCER_COUNT: usize = 4;
const NUMBER_PER_PRODUCER: usize = 100;
unsafe fn producer(id: *const usize) -> ! {
let id = *id;
for _ in 0..NUMBER_PER_PRODUCER {
semaphore_down(SEM_EMPTY);
semaphore_down(SEM_MUTEX);
BUFFER[FRONT] = id;
FRONT = (FRONT + 1) % BUFFER_SIZE;
semaphore_up(SEM_MUTEX);
semaphore_up(SEM_EXISTED);
}
exit(0)
}
unsafe fn consumer() -> ! {
for _ in 0..PRODUCER_COUNT * NUMBER_PER_PRODUCER {
semaphore_down(SEM_EXISTED);
semaphore_down(SEM_MUTEX);
print!("{} ", BUFFER[TAIL]);
TAIL = (TAIL + 1) % BUFFER_SIZE;
semaphore_up(SEM_MUTEX);
semaphore_up(SEM_EMPTY);
}
println!("");
exit(0)
}
#[no_mangle]
pub fn main() -> i32 {
// create semaphores
assert_eq!(semaphore_create(1) as usize, SEM_MUTEX);
assert_eq!(semaphore_create(BUFFER_SIZE) as usize, SEM_EMPTY);
assert_eq!(semaphore_create(0) as usize, SEM_EXISTED);
// create threads
let ids: Vec<_> = (0..PRODUCER_COUNT).collect();
let mut threads = Vec::new();
for i in 0..PRODUCER_COUNT {
threads.push(thread_create(
producer as usize,
&ids.as_slice()[i] as *const _ as usize,
));
}
threads.push(thread_create(consumer as usize, 0));
// wait for all threads to complete
for thread in threads.iter() {
waittid(*thread as usize);
}
println!("mpsc_sem passed!");
0
}

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save