Compare commits

...

20 Commits

@ -6,3 +6,5 @@ docker:
build_docker:
docker build -t ${DOCKER_NAME} .
fmt:
cd easy-fs; cargo fmt; cd ../easy-fs-fuse cargo fmt; cd ../os ; cargo fmt; cd ../user; cargo fmt; cd ..

@ -3,6 +3,8 @@ rCore-Tutorial version 3.5. See the [Documentation in Chinese](https://rcore-os.
rCore-Tutorial API Docs. See the [API Docs of Ten OSes ](#OS-API-DOCS)
If you don't know Rust Language and try to learn it, please visit [Rust Learning Resources](https://github.com/rcore-os/rCore/wiki/study-resource-of-system-programming-in-RUST)
Official QQ group number: 735045051
## news
@ -226,6 +228,10 @@ $ make run
[KERN] mm::frame_allocator::lazy_static!FRAME_ALLOCATOR begin
......
```
<<<<<<< HEAD
=======
>>>>>>> ch9
## Rustdoc
Currently it can only help you view the code since only a tiny part of the code has been documented.

@ -9,6 +9,3 @@ edition = "2018"
[dependencies]
spin = "0.7.0"
lazy_static = { version = "1.4.0", features = ["spin_no_std"] }
[profile.release]
debug = true

@ -14,14 +14,12 @@ bitflags = "1.2.1"
xmas-elf = "0.7.0"
volatile = "0.3"
virtio-drivers = { git = "https://github.com/rcore-os/virtio-drivers" }
k210-pac = { git = "https://github.com/wyfcyx/k210-pac" }
k210-hal = { git = "https://github.com/wyfcyx/k210-hal" }
k210-soc = { git = "https://github.com/wyfcyx/k210-soc" }
easy-fs = { path = "../easy-fs" }
# k210-pac = { git = "https://github.com/wyfcyx/k210-pac" }
# k210-hal = { git = "https://github.com/wyfcyx/k210-hal" }
# k210-soc = { git = "https://github.com/wyfcyx/k210-soc" }
# easy-fs = { path = "../easy-fs" }
#log = "0.4"
spin = "0.7.0"
[features]
board_qemu = []
board_k210 = []
[profile.release]
debug = true
board_k210 = []

@ -73,7 +73,8 @@ $(APPS):
kernel:
@echo Platform: $(BOARD)
@cp src/linker-$(BOARD).ld src/linker.ld
@cargo build --release --features "board_$(BOARD)"
# @cargo build --release --features "board_$(BOARD)"
@cargo build --release
@rm src/linker.ld
clean:

@ -0,0 +1,50 @@
mod qemu;
pub use qemu::*;
pub const CLOCK_FREQ: usize = 12500000;
pub const MMIO: &[(usize, usize)] = &[
(0x1000_0000, 0x1000),
(0x1000_1000, 0x1000),
(0xC00_0000, 0x40_0000),
];
pub type BlockDeviceImpl = crate::drivers::block::VirtIOBlock;
pub type CharDeviceImpl = crate::drivers::chardev::NS16550a<VIRT_UART>;
pub const VIRT_PLIC: usize = 0xC00_0000;
pub const VIRT_UART: usize = 0x1000_0000;
use crate::drivers::block::BLOCK_DEVICE;
use crate::drivers::chardev::{CharDevice, UART};
use crate::drivers::plic::{IntrTargetPriority, PLIC};
pub fn device_init() {
use riscv::register::sie;
kprintln!("[KERN] board::qemu::device_init() begin");
let mut plic = unsafe { PLIC::new(VIRT_PLIC) };
let hart_id: usize = 0;
let supervisor = IntrTargetPriority::Supervisor;
let machine = IntrTargetPriority::Machine;
plic.set_threshold(hart_id, supervisor, 0);
plic.set_threshold(hart_id, machine, 1);
for intr_src_id in [1usize, 10] {
plic.enable(hart_id, supervisor, intr_src_id);
plic.set_priority(intr_src_id, 1);
}
unsafe {
sie::set_sext();
}
kprintln!("[KERN] board::qemu::device_init() end");
}
pub fn irq_handler() {
let mut plic = unsafe { PLIC::new(VIRT_PLIC) };
let intr_src_id = plic.claim(0, IntrTargetPriority::Supervisor);
match intr_src_id {
1 => BLOCK_DEVICE.handle_irq(),
10 => UART.handle_irq(),
_ => panic!("unsupported IRQ {}", intr_src_id),
}
plic.complete(0, IntrTargetPriority::Supervisor, intr_src_id);
}

@ -1,10 +1,10 @@
pub const CLOCK_FREQ: usize = 12500000;
pub const MMIO: &[(usize, usize)] = &[
(0x1000_0000, 0x1000), // VIRT_UART0 in virt machine
(0x1000_1000, 0x1000), // VIRT_VIRTIO in virt machine
(0x0C00_0000, 0x40_0000), // VIRT_PLIC in virt machine
(0x0010_0000, 0x00_2000), // VIRT_TEST/RTC in virt machine
(0x1000_0000, 0x1000), // VIRT_UART0 in virt machine
(0x1000_1000, 0x1000), // VIRT_VIRTIO in virt machine
(0x0C00_0000, 0x40_0000), // VIRT_PLIC in virt machine
(0x0010_0000, 0x00_2000), // VIRT_TEST/RTC in virt machine
];
pub type BlockDeviceImpl = crate::drivers::block::VirtIOBlock;
@ -19,6 +19,8 @@ use crate::drivers::plic::{IntrTargetPriority, PLIC};
pub fn device_init() {
use riscv::register::sie;
kprintln!("[KERN] board::qemu::device_init() begin");
let mut plic = unsafe { PLIC::new(VIRT_PLIC) };
let hart_id: usize = 0;
let supervisor = IntrTargetPriority::Supervisor;
@ -32,6 +34,7 @@ pub fn device_init() {
unsafe {
sie::set_sext();
}
kprintln!("[KERN] board::qemu::device_init() end");
}
pub fn irq_handler() {
@ -51,8 +54,8 @@ use core::arch::asm;
const EXIT_SUCCESS: u32 = 0x5555; // Equals `exit(0)`. qemu successful exit
const EXIT_FAILURE_FLAG: u32 = 0x3333;
const EXIT_FAILURE: u32 = exit_code_encode(1); // Equals `exit(1)`. qemu failed exit
const EXIT_RESET: u32 = 0x7777; // qemu reset
const EXIT_FAILURE: u32 = exit_code_encode(1); // Equals `exit(1)`. qemu failed exit
const EXIT_RESET: u32 = 0x7777; // qemu reset
pub trait QEMUExit {
/// Exit with specified return code.
@ -69,7 +72,6 @@ pub trait QEMUExit {
fn exit_failure(&self) -> !;
}
/// RISCV64 configuration
pub struct RISCV64 {
/// Address of the sifive_test mapped device.
@ -122,6 +124,6 @@ impl QEMUExit for RISCV64 {
}
}
const VIRT_TEST: u64 =0x100000;
const VIRT_TEST: u64 = 0x100000;
pub const QEMU_EXIT_HANDLE: RISCV64 = RISCV64::new(VIRT_TEST);
pub const QEMU_EXIT_HANDLE: RISCV64 = RISCV64::new(VIRT_TEST);

@ -29,3 +29,33 @@ macro_rules! println {
$crate::console::print(format_args!(concat!($fmt, "\n") $(, $($arg)+)?))
}
}
use crate::sbi::console_putchar;
struct Kstdout;
impl Write for Kstdout {
fn write_str(&mut self, s: &str) -> fmt::Result {
for c in s.chars() {
console_putchar(c as usize);
}
Ok(())
}
}
pub fn kprint(args: fmt::Arguments) {
Kstdout.write_fmt(args).unwrap();
}
#[macro_export]
macro_rules! kprint {
($fmt: literal $(, $($arg: tt)+)?) => {
$crate::console::kprint(format_args!($fmt $(, $($arg)+)?));
}
}
#[macro_export]
macro_rules! kprintln {
($fmt: literal $(, $($arg: tt)+)?) => {
$crate::console::kprint(format_args!(concat!($fmt, "\n") $(, $($arg)+)?));
}
}

@ -1,16 +1,19 @@
mod sdcard;
// mod sdcard;
mod virtio_blk;
pub use sdcard::SDCardWrapper;
// pub use sdcard::SDCardWrapper;
pub use virtio_blk::VirtIOBlock;
use crate::board::BlockDeviceImpl;
use crate::fs::easy_fs::BlockDevice;
use alloc::sync::Arc;
use easy_fs::BlockDevice;
use lazy_static::*;
lazy_static! {
pub static ref BLOCK_DEVICE: Arc<dyn BlockDevice> = Arc::new(BlockDeviceImpl::new());
pub static ref BLOCK_DEVICE: Arc<dyn BlockDevice> = {
kprintln!("[KERN] drivers::block::lazy_static!BLOCK_DEVICE begin");
Arc::new(BlockDeviceImpl::new())
};
}
#[allow(unused)]

@ -20,8 +20,10 @@ pub struct VirtIOBlock {
}
lazy_static! {
static ref QUEUE_FRAMES: UPIntrFreeCell<Vec<FrameTracker>> =
unsafe { UPIntrFreeCell::new(Vec::new()) };
static ref QUEUE_FRAMES: UPIntrFreeCell<Vec<FrameTracker>> = {
kprintln!("[KERN] drivers::block::virtio_blk::lazy_static!QUEUE_FRAMES begin");
unsafe { UPIntrFreeCell::new(Vec::new()) }
};
}
impl BlockDevice for VirtIOBlock {

@ -13,5 +13,8 @@ pub trait CharDevice {
}
lazy_static! {
pub static ref UART: Arc<CharDeviceImpl> = Arc::new(CharDeviceImpl::new());
pub static ref UART: Arc<CharDeviceImpl> = {
kprintln!("[KERN] drivers::chardev::lazy_static!UART begin");
Arc::new(CharDeviceImpl::new())
};
}

@ -0,0 +1,72 @@
use super::{get_block_cache, BlockDevice, BLOCK_SZ};
use alloc::sync::Arc;
type BitmapBlock = [u64; 64];
const BLOCK_BITS: usize = BLOCK_SZ * 8;
pub struct Bitmap {
start_block_id: usize,
blocks: usize,
}
/// Return (block_pos, bits64_pos, inner_pos)
fn decomposition(mut bit: usize) -> (usize, usize, usize) {
let block_pos = bit / BLOCK_BITS;
bit %= BLOCK_BITS;
(block_pos, bit / 64, bit % 64)
}
impl Bitmap {
pub fn new(start_block_id: usize, blocks: usize) -> Self {
kprintln!("[KERN EASYFS] bitmap::Bitmap::new() begin");
Self {
start_block_id,
blocks,
}
}
pub fn alloc(&self, block_device: &Arc<dyn BlockDevice>) -> Option<usize> {
kprintln!("[KERN EASYFS] bitmap::Bitmap::alloc() begin");
for block_id in 0..self.blocks {
let pos = get_block_cache(
block_id + self.start_block_id as usize,
Arc::clone(block_device),
)
.lock()
.modify(0, |bitmap_block: &mut BitmapBlock| {
if let Some((bits64_pos, inner_pos)) = bitmap_block
.iter()
.enumerate()
.find(|(_, bits64)| **bits64 != u64::MAX)
.map(|(bits64_pos, bits64)| (bits64_pos, bits64.trailing_ones() as usize))
{
// modify cache
bitmap_block[bits64_pos] |= 1u64 << inner_pos;
Some(block_id * BLOCK_BITS + bits64_pos * 64 + inner_pos as usize)
} else {
None
}
});
if pos.is_some() {
return pos;
}
}
None
}
pub fn dealloc(&self, block_device: &Arc<dyn BlockDevice>, bit: usize) {
kprintln!("[KERN EASYFS] bitmap::Bitmap::dealloc() begin");
let (block_pos, bits64_pos, inner_pos) = decomposition(bit);
get_block_cache(block_pos + self.start_block_id, Arc::clone(block_device))
.lock()
.modify(0, |bitmap_block: &mut BitmapBlock| {
assert!(bitmap_block[bits64_pos] & (1u64 << inner_pos) > 0);
bitmap_block[bits64_pos] -= 1u64 << inner_pos;
});
}
pub fn maximum(&self) -> usize {
self.blocks * BLOCK_BITS
}
}

@ -0,0 +1,146 @@
use super::{BlockDevice, BLOCK_SZ};
use alloc::collections::VecDeque;
use alloc::sync::Arc;
use lazy_static::*;
use spin::Mutex;
pub struct BlockCache {
cache: [u8; BLOCK_SZ],
block_id: usize,
block_device: Arc<dyn BlockDevice>,
modified: bool,
}
impl BlockCache {
/// Load a new BlockCache from disk.
pub fn new(block_id: usize, block_device: Arc<dyn BlockDevice>) -> Self {
//kprintln!("[KERN EASYFS] block_cache::BlockCache::new() begin");
let mut cache = [0u8; BLOCK_SZ];
block_device.read_block(block_id, &mut cache);
Self {
cache,
block_id,
block_device,
modified: false,
}
}
fn addr_of_offset(&self, offset: usize) -> usize {
&self.cache[offset] as *const _ as usize
}
pub fn get_ref<T>(&self, offset: usize) -> &T
where
T: Sized,
{
let type_size = core::mem::size_of::<T>();
assert!(offset + type_size <= BLOCK_SZ);
let addr = self.addr_of_offset(offset);
unsafe { &*(addr as *const T) }
}
pub fn get_mut<T>(&mut self, offset: usize) -> &mut T
where
T: Sized,
{
let type_size = core::mem::size_of::<T>();
assert!(offset + type_size <= BLOCK_SZ);
self.modified = true;
let addr = self.addr_of_offset(offset);
unsafe { &mut *(addr as *mut T) }
}
pub fn read<T, V>(&self, offset: usize, f: impl FnOnce(&T) -> V) -> V {
f(self.get_ref(offset))
}
pub fn modify<T, V>(&mut self, offset: usize, f: impl FnOnce(&mut T) -> V) -> V {
f(self.get_mut(offset))
}
pub fn sync(&mut self) {
if self.modified {
self.modified = false;
self.block_device.write_block(self.block_id, &self.cache);
}
}
}
impl Drop for BlockCache {
fn drop(&mut self) {
self.sync()
}
}
const BLOCK_CACHE_SIZE: usize = 16;
pub struct BlockCacheManager {
queue: VecDeque<(usize, Arc<Mutex<BlockCache>>)>,
}
impl BlockCacheManager {
pub fn new() -> Self {
kprintln!("[KERN EASYFS] block_cache::BlockCacheManager::new() begin");
Self {
queue: VecDeque::new(),
}
}
pub fn get_block_cache(
&mut self,
block_id: usize,
block_device: Arc<dyn BlockDevice>,
) -> Arc<Mutex<BlockCache>> {
//kprintln!("[KERN EASYFS] block_cache::BlockCacheManager::get_block_cache() begin");
if let Some(pair) = self.queue.iter().find(|pair| pair.0 == block_id) {
Arc::clone(&pair.1)
} else {
// substitute
if self.queue.len() == BLOCK_CACHE_SIZE {
// from front to tail
if let Some((idx, _)) = self
.queue
.iter()
.enumerate()
.find(|(_, pair)| Arc::strong_count(&pair.1) == 1)
{
self.queue.drain(idx..=idx);
} else {
panic!("Run out of BlockCache!");
}
}
// load block into mem and push back
let block_cache = Arc::new(Mutex::new(BlockCache::new(
block_id,
Arc::clone(&block_device),
)));
self.queue.push_back((block_id, Arc::clone(&block_cache)));
block_cache
}
}
}
lazy_static! {
pub static ref BLOCK_CACHE_MANAGER: Mutex<BlockCacheManager> = {
kprintln!("[KERN EASYFS] block_cache::lazy_static!BLOCK_CACHE_MANAGER begin");
Mutex::new(BlockCacheManager::new())
};
}
pub fn get_block_cache(
block_id: usize,
block_device: Arc<dyn BlockDevice>,
) -> Arc<Mutex<BlockCache>> {
//kprintln!("[KERN EASYFS] block_cache::get_block_cache() begin");
BLOCK_CACHE_MANAGER
.lock()
.get_block_cache(block_id, block_device)
}
pub fn block_cache_sync_all() {
kprintln!("[KERN EASYFS] block_cache::block_cache_sync_all() begin");
let manager = BLOCK_CACHE_MANAGER.lock();
for (_, cache) in manager.queue.iter() {
cache.lock().sync();
}
}

@ -0,0 +1,7 @@
use core::any::Any;
pub trait BlockDevice: Send + Sync + Any {
fn read_block(&self, block_id: usize, buf: &mut [u8]);
fn write_block(&self, block_id: usize, buf: &[u8]);
fn handle_irq(&self);
}

@ -0,0 +1,158 @@
use super::BLOCK_SZ;
use super::{
block_cache_sync_all, get_block_cache, Bitmap, BlockDevice, DiskInode, DiskInodeType, Inode,
SuperBlock,
};
use alloc::sync::Arc;
use spin::Mutex;
pub struct EasyFileSystem {
pub block_device: Arc<dyn BlockDevice>,
pub inode_bitmap: Bitmap,
pub data_bitmap: Bitmap,
inode_area_start_block: u32,
data_area_start_block: u32,
}
type DataBlock = [u8; BLOCK_SZ];
impl EasyFileSystem {
pub fn create(
block_device: Arc<dyn BlockDevice>,
total_blocks: u32,
inode_bitmap_blocks: u32,
) -> Arc<Mutex<Self>> {
kprintln!("[KERN EASYFS] efs::EasyFileSystem::create() begin");
// calculate block size of areas & create bitmaps
let inode_bitmap = Bitmap::new(1, inode_bitmap_blocks as usize);
let inode_num = inode_bitmap.maximum();
let inode_area_blocks =
((inode_num * core::mem::size_of::<DiskInode>() + BLOCK_SZ - 1) / BLOCK_SZ) as u32;
let inode_total_blocks = inode_bitmap_blocks + inode_area_blocks;
let data_total_blocks = total_blocks - 1 - inode_total_blocks;
let data_bitmap_blocks = (data_total_blocks + 4096) / 4097;
let data_area_blocks = data_total_blocks - data_bitmap_blocks;
let data_bitmap = Bitmap::new(
(1 + inode_bitmap_blocks + inode_area_blocks) as usize,
data_bitmap_blocks as usize,
);
let mut efs = Self {
block_device: Arc::clone(&block_device),
inode_bitmap,
data_bitmap,
inode_area_start_block: 1 + inode_bitmap_blocks,
data_area_start_block: 1 + inode_total_blocks + data_bitmap_blocks,
};
// clear all blocks
for i in 0..total_blocks {
get_block_cache(i as usize, Arc::clone(&block_device))
.lock()
.modify(0, |data_block: &mut DataBlock| {
for byte in data_block.iter_mut() {
*byte = 0;
}
});
}
// initialize SuperBlock
get_block_cache(0, Arc::clone(&block_device)).lock().modify(
0,
|super_block: &mut SuperBlock| {
super_block.initialize(
total_blocks,
inode_bitmap_blocks,
inode_area_blocks,
data_bitmap_blocks,
data_area_blocks,
);
},
);
// write back immediately
// create a inode for root node "/"
assert_eq!(efs.alloc_inode(), 0);
let (root_inode_block_id, root_inode_offset) = efs.get_disk_inode_pos(0);
get_block_cache(root_inode_block_id as usize, Arc::clone(&block_device))
.lock()
.modify(root_inode_offset, |disk_inode: &mut DiskInode| {
disk_inode.initialize(DiskInodeType::Directory);
});
block_cache_sync_all();
kprintln!("[KERN EASYFS] efs::EasyFileSystem::create() end");
Arc::new(Mutex::new(efs))
}
pub fn open(block_device: Arc<dyn BlockDevice>) -> Arc<Mutex<Self>> {
kprintln!("[KERN EASYFS] efs::EasyFileSystem::open() begin");
// read SuperBlock
get_block_cache(0, Arc::clone(&block_device))
.lock()
.read(0, |super_block: &SuperBlock| {
assert!(super_block.is_valid(), "Error loading EFS!");
let inode_total_blocks =
super_block.inode_bitmap_blocks + super_block.inode_area_blocks;
let efs = Self {
block_device,
inode_bitmap: Bitmap::new(1, super_block.inode_bitmap_blocks as usize),
data_bitmap: Bitmap::new(
(1 + inode_total_blocks) as usize,
super_block.data_bitmap_blocks as usize,
),
inode_area_start_block: 1 + super_block.inode_bitmap_blocks,
data_area_start_block: 1 + inode_total_blocks + super_block.data_bitmap_blocks,
};
kprintln!("[KERN EASYFS] efs::EasyFileSystem::open() end");
Arc::new(Mutex::new(efs))
})
}
pub fn root_inode(efs: &Arc<Mutex<Self>>) -> Inode {
kprintln!("[KERN EASYFS] efs::EasyFileSystem::root_inode() begin");
let block_device = Arc::clone(&efs.lock().block_device);
// acquire efs lock temporarily
let (block_id, block_offset) = efs.lock().get_disk_inode_pos(0);
kprintln!("[KERN EASYFS] efs::EasyFileSystem::root_inode() end");
// release efs lock
Inode::new(block_id, block_offset, Arc::clone(efs), block_device)
}
pub fn get_disk_inode_pos(&self, inode_id: u32) -> (u32, usize) {
kprintln!("[KERN EASYFS] efs::EasyFileSystem::get_disk_inode_pos() begin");
let inode_size = core::mem::size_of::<DiskInode>();
let inodes_per_block = (BLOCK_SZ / inode_size) as u32;
let block_id = self.inode_area_start_block + inode_id / inodes_per_block;
kprintln!("[KERN EASYFS] efs::EasyFileSystem::get_disk_inode_pos() end");
(
block_id,
(inode_id % inodes_per_block) as usize * inode_size,
)
}
pub fn get_data_block_id(&self, data_block_id: u32) -> u32 {
self.data_area_start_block + data_block_id
}
pub fn alloc_inode(&mut self) -> u32 {
kprintln!("[KERN EASYFS] efs::EasyFileSystem::alloc_inode() begin");
self.inode_bitmap.alloc(&self.block_device).unwrap() as u32
}
/// Return a block ID not ID in the data area.
pub fn alloc_data(&mut self) -> u32 {
kprintln!("[KERN EASYFS] efs::EasyFileSystem::alloc_data() begin");
self.data_bitmap.alloc(&self.block_device).unwrap() as u32 + self.data_area_start_block
}
pub fn dealloc_data(&mut self, block_id: u32) {
kprintln!("[KERN EASYFS] efs::EasyFileSystem::dealloc_data() begin");
get_block_cache(block_id as usize, Arc::clone(&self.block_device))
.lock()
.modify(0, |data_block: &mut DataBlock| {
data_block.iter_mut().for_each(|p| {
*p = 0;
})
});
self.data_bitmap.dealloc(
&self.block_device,
(block_id - self.data_area_start_block) as usize,
)
}
}

@ -0,0 +1,418 @@
use super::{get_block_cache, BlockDevice, BLOCK_SZ};
use alloc::sync::Arc;
use alloc::vec::Vec;
use core::fmt::{Debug, Formatter, Result};
const EFS_MAGIC: u32 = 0x3b800001;
const INODE_DIRECT_COUNT: usize = 28;
const NAME_LENGTH_LIMIT: usize = 27;
const INODE_INDIRECT1_COUNT: usize = BLOCK_SZ / 4;
const INODE_INDIRECT2_COUNT: usize = INODE_INDIRECT1_COUNT * INODE_INDIRECT1_COUNT;
const DIRECT_BOUND: usize = INODE_DIRECT_COUNT;
const INDIRECT1_BOUND: usize = DIRECT_BOUND + INODE_INDIRECT1_COUNT;
#[allow(unused)]
const INDIRECT2_BOUND: usize = INDIRECT1_BOUND + INODE_INDIRECT2_COUNT;
#[repr(C)]
pub struct SuperBlock {
magic: u32,
pub total_blocks: u32,
pub inode_bitmap_blocks: u32,
pub inode_area_blocks: u32,
pub data_bitmap_blocks: u32,
pub data_area_blocks: u32,
}
impl Debug for SuperBlock {
fn fmt(&self, f: &mut Formatter<'_>) -> Result {
f.debug_struct("SuperBlock")
.field("total_blocks", &self.total_blocks)
.field("inode_bitmap_blocks", &self.inode_bitmap_blocks)
.field("inode_area_blocks", &self.inode_area_blocks)
.field("data_bitmap_blocks", &self.data_bitmap_blocks)
.field("data_area_blocks", &self.data_area_blocks)
.finish()
}
}
impl SuperBlock {
pub fn initialize(
&mut self,
total_blocks: u32,
inode_bitmap_blocks: u32,
inode_area_blocks: u32,
data_bitmap_blocks: u32,
data_area_blocks: u32,
) {
kprintln!("[KERN EASYFS] layout::SuperBlock::initialize() begin");
*self = Self {
magic: EFS_MAGIC,
total_blocks,
inode_bitmap_blocks,
inode_area_blocks,
data_bitmap_blocks,
data_area_blocks,
}
}
pub fn is_valid(&self) -> bool {
self.magic == EFS_MAGIC
}
}
#[derive(PartialEq)]
pub enum DiskInodeType {
File,
Directory,
}
type IndirectBlock = [u32; BLOCK_SZ / 4];
type DataBlock = [u8; BLOCK_SZ];
#[repr(C)]
pub struct DiskInode {
pub size: u32,
pub direct: [u32; INODE_DIRECT_COUNT],
pub indirect1: u32,
pub indirect2: u32,
type_: DiskInodeType,
}
impl DiskInode {
/// indirect1 and indirect2 block are allocated only when they are needed.
pub fn initialize(&mut self, type_: DiskInodeType) {
kprintln!("[KERN EASYFS] layout::DiskInode::initialize() begin");
self.size = 0;
self.direct.iter_mut().for_each(|v| *v = 0);
self.indirect1 = 0;
self.indirect2 = 0;
self.type_ = type_;
}
pub fn is_dir(&self) -> bool {
self.type_ == DiskInodeType::Directory
}
#[allow(unused)]
pub fn is_file(&self) -> bool {
self.type_ == DiskInodeType::File
}
/// Return block number correspond to size.
pub fn data_blocks(&self) -> u32 {
Self::_data_blocks(self.size)
}
fn _data_blocks(size: u32) -> u32 {
(size + BLOCK_SZ as u32 - 1) / BLOCK_SZ as u32
}
/// Return number of blocks needed include indirect1/2.
pub fn total_blocks(size: u32) -> u32 {
let data_blocks = Self::_data_blocks(size) as usize;
let mut total = data_blocks as usize;
// indirect1
if data_blocks > INODE_DIRECT_COUNT {
total += 1;
}
// indirect2
if data_blocks > INDIRECT1_BOUND {
total += 1;
// sub indirect1
total +=
(data_blocks - INDIRECT1_BOUND + INODE_INDIRECT1_COUNT - 1) / INODE_INDIRECT1_COUNT;
}
total as u32
}
pub fn blocks_num_needed(&self, new_size: u32) -> u32 {
assert!(new_size >= self.size);
Self::total_blocks(new_size) - Self::total_blocks(self.size)
}
pub fn get_block_id(&self, inner_id: u32, block_device: &Arc<dyn BlockDevice>) -> u32 {
//kprintln!("[KERN EASYFS] layout::DiskInode::get_block_id() begin");
let inner_id = inner_id as usize;
if inner_id < INODE_DIRECT_COUNT {
self.direct[inner_id]
} else if inner_id < INDIRECT1_BOUND {
get_block_cache(self.indirect1 as usize, Arc::clone(block_device))
.lock()
.read(0, |indirect_block: &IndirectBlock| {
indirect_block[inner_id - INODE_DIRECT_COUNT]
})
} else {
let last = inner_id - INDIRECT1_BOUND;
let indirect1 = get_block_cache(self.indirect2 as usize, Arc::clone(block_device))
.lock()
.read(0, |indirect2: &IndirectBlock| {
indirect2[last / INODE_INDIRECT1_COUNT]
});
get_block_cache(indirect1 as usize, Arc::clone(block_device))
.lock()
.read(0, |indirect1: &IndirectBlock| {
indirect1[last % INODE_INDIRECT1_COUNT]
})
}
}
pub fn increase_size(
&mut self,
new_size: u32,
new_blocks: Vec<u32>,
block_device: &Arc<dyn BlockDevice>,
) {
kprintln!("[KERN EASYFS] layout::DiskInode::increase_size() begin");
let mut current_blocks = self.data_blocks();
self.size = new_size;
let mut total_blocks = self.data_blocks();
let mut new_blocks = new_blocks.into_iter();
// fill direct
while current_blocks < total_blocks.min(INODE_DIRECT_COUNT as u32) {
self.direct[current_blocks as usize] = new_blocks.next().unwrap();
current_blocks += 1;
}
// alloc indirect1
if total_blocks > INODE_DIRECT_COUNT as u32 {
if current_blocks == INODE_DIRECT_COUNT as u32 {
self.indirect1 = new_blocks.next().unwrap();
}
current_blocks -= INODE_DIRECT_COUNT as u32;
total_blocks -= INODE_DIRECT_COUNT as u32;
} else {
return;
}
// fill indirect1
get_block_cache(self.indirect1 as usize, Arc::clone(block_device))
.lock()
.modify(0, |indirect1: &mut IndirectBlock| {
while current_blocks < total_blocks.min(INODE_INDIRECT1_COUNT as u32) {
indirect1[current_blocks as usize] = new_blocks.next().unwrap();
current_blocks += 1;
}
});
// alloc indirect2
if total_blocks > INODE_INDIRECT1_COUNT as u32 {
if current_blocks == INODE_INDIRECT1_COUNT as u32 {
self.indirect2 = new_blocks.next().unwrap();
}
current_blocks -= INODE_INDIRECT1_COUNT as u32;
total_blocks -= INODE_INDIRECT1_COUNT as u32;
} else {
return;
}
// fill indirect2 from (a0, b0) -> (a1, b1)
let mut a0 = current_blocks as usize / INODE_INDIRECT1_COUNT;
let mut b0 = current_blocks as usize % INODE_INDIRECT1_COUNT;
let a1 = total_blocks as usize / INODE_INDIRECT1_COUNT;
let b1 = total_blocks as usize % INODE_INDIRECT1_COUNT;
// alloc low-level indirect1
get_block_cache(self.indirect2 as usize, Arc::clone(block_device))
.lock()
.modify(0, |indirect2: &mut IndirectBlock| {
while (a0 < a1) || (a0 == a1 && b0 < b1) {
if b0 == 0 {
indirect2[a0] = new_blocks.next().unwrap();
}
// fill current
get_block_cache(indirect2[a0] as usize, Arc::clone(block_device))
.lock()
.modify(0, |indirect1: &mut IndirectBlock| {
indirect1[b0] = new_blocks.next().unwrap();
});
// move to next
b0 += 1;
if b0 == INODE_INDIRECT1_COUNT {
b0 = 0;
a0 += 1;
}
}
});
}
/// Clear size to zero and return blocks that should be deallocated.
///
/// We will clear the block contents to zero later.
pub fn clear_size(&mut self, block_device: &Arc<dyn BlockDevice>) -> Vec<u32> {
kprintln!("[KERN EASYFS] layout::DiskInode::clear_size() begin");
let mut v: Vec<u32> = Vec::new();
let mut data_blocks = self.data_blocks() as usize;
self.size = 0;
let mut current_blocks = 0usize;
// direct
while current_blocks < data_blocks.min(INODE_DIRECT_COUNT) {
v.push(self.direct[current_blocks]);
self.direct[current_blocks] = 0;
current_blocks += 1;
}
// indirect1 block
if data_blocks > INODE_DIRECT_COUNT {
v.push(self.indirect1);
data_blocks -= INODE_DIRECT_COUNT;
current_blocks = 0;
} else {
return v;
}
// indirect1
get_block_cache(self.indirect1 as usize, Arc::clone(block_device))
.lock()
.modify(0, |indirect1: &mut IndirectBlock| {
while current_blocks < data_blocks.min(INODE_INDIRECT1_COUNT) {
v.push(indirect1[current_blocks]);
//indirect1[current_blocks] = 0;
current_blocks += 1;
}
});
self.indirect1 = 0;
// indirect2 block
if data_blocks > INODE_INDIRECT1_COUNT {
v.push(self.indirect2);
data_blocks -= INODE_INDIRECT1_COUNT;
} else {
return v;
}
// indirect2
assert!(data_blocks <= INODE_INDIRECT2_COUNT);
let a1 = data_blocks / INODE_INDIRECT1_COUNT;
let b1 = data_blocks % INODE_INDIRECT1_COUNT;
get_block_cache(self.indirect2 as usize, Arc::clone(block_device))
.lock()
.modify(0, |indirect2: &mut IndirectBlock| {
// full indirect1 blocks
for entry in indirect2.iter_mut().take(a1) {
v.push(*entry);
get_block_cache(*entry as usize, Arc::clone(block_device))
.lock()
.modify(0, |indirect1: &mut IndirectBlock| {
for entry in indirect1.iter() {
v.push(*entry);
}
});
}
// last indirect1 block
if b1 > 0 {
v.push(indirect2[a1]);
get_block_cache(indirect2[a1] as usize, Arc::clone(block_device))
.lock()
.modify(0, |indirect1: &mut IndirectBlock| {
for entry in indirect1.iter().take(b1) {
v.push(*entry);
}
});
//indirect2[a1] = 0;
}
});
self.indirect2 = 0;
kprintln!("[KERN EASYFS] layout::DiskInode::clear_size() end");
v
}
pub fn read_at(
&self,
offset: usize,
buf: &mut [u8],
block_device: &Arc<dyn BlockDevice>,
) -> usize {
//kprintln!("[KERN EASYFS] layout::DiskInode::read_at() begin");
let mut start = offset;
let end = (offset + buf.len()).min(self.size as usize);
if start >= end {
return 0;
}
let mut start_block = start / BLOCK_SZ;
let mut read_size = 0usize;
loop {
// calculate end of current block
let mut end_current_block = (start / BLOCK_SZ + 1) * BLOCK_SZ;
end_current_block = end_current_block.min(end);
// read and update read size
let block_read_size = end_current_block - start;
let dst = &mut buf[read_size..read_size + block_read_size];
get_block_cache(
self.get_block_id(start_block as u32, block_device) as usize,
Arc::clone(block_device),
)
.lock()
.read(0, |data_block: &DataBlock| {
let src = &data_block[start % BLOCK_SZ..start % BLOCK_SZ + block_read_size];
dst.copy_from_slice(src);
});
read_size += block_read_size;
// move to next block
if end_current_block == end {
break;
}
start_block += 1;
start = end_current_block;
}
read_size
}
/// File size must be adjusted before.
pub fn write_at(
&mut self,
offset: usize,
buf: &[u8],
block_device: &Arc<dyn BlockDevice>,
) -> usize {
kprintln!("[KERN EASYFS] layout::DiskInode::write_at() begin");
let mut start = offset;
let end = (offset + buf.len()).min(self.size as usize);
assert!(start <= end);
let mut start_block = start / BLOCK_SZ;
let mut write_size = 0usize;
loop {
// calculate end of current block
let mut end_current_block = (start / BLOCK_SZ + 1) * BLOCK_SZ;
end_current_block = end_current_block.min(end);
// write and update write size
let block_write_size = end_current_block - start;
get_block_cache(
self.get_block_id(start_block as u32, block_device) as usize,
Arc::clone(block_device),
)
.lock()
.modify(0, |data_block: &mut DataBlock| {
let src = &buf[write_size..write_size + block_write_size];
let dst = &mut data_block[start % BLOCK_SZ..start % BLOCK_SZ + block_write_size];
dst.copy_from_slice(src);
});
write_size += block_write_size;
// move to next block
if end_current_block == end {
break;
}
start_block += 1;
start = end_current_block;
}
write_size
}
}
#[repr(C)]
pub struct DirEntry {
name: [u8; NAME_LENGTH_LIMIT + 1],
inode_number: u32,
}
pub const DIRENT_SZ: usize = 32;
impl DirEntry {
pub fn empty() -> Self {
Self {
name: [0u8; NAME_LENGTH_LIMIT + 1],
inode_number: 0,
}
}
pub fn new(name: &str, inode_number: u32) -> Self {
kprintln!("[KERN EASYFS] layout::DirEntry::new() begin");
let mut bytes = [0u8; NAME_LENGTH_LIMIT + 1];
bytes[..name.len()].copy_from_slice(name.as_bytes());
Self {
name: bytes,
inode_number,
}
}
pub fn as_bytes(&self) -> &[u8] {
unsafe { core::slice::from_raw_parts(self as *const _ as usize as *const u8, DIRENT_SZ) }
}
pub fn as_bytes_mut(&mut self) -> &mut [u8] {
unsafe { core::slice::from_raw_parts_mut(self as *mut _ as usize as *mut u8, DIRENT_SZ) }
}
pub fn name(&self) -> &str {
let len = (0usize..).find(|i| self.name[*i] == 0).unwrap();
core::str::from_utf8(&self.name[..len]).unwrap()
}
pub fn inode_number(&self) -> u32 {
self.inode_number
}
}

@ -0,0 +1,18 @@
// #![no_std]
// extern crate alloc;
mod bitmap;
mod block_cache;
mod block_dev;
mod efs;
mod layout;
mod vfs;
pub const BLOCK_SZ: usize = 512;
pub use bitmap::Bitmap;
pub use block_cache::{block_cache_sync_all, get_block_cache};
pub use block_dev::BlockDevice;
pub use efs::EasyFileSystem;
pub use layout::*;
pub use vfs::Inode;

@ -0,0 +1,198 @@
use super::{
block_cache_sync_all, get_block_cache, BlockDevice, DirEntry, DiskInode, DiskInodeType,
EasyFileSystem, DIRENT_SZ,
};
use alloc::string::String;
use alloc::sync::Arc;
use alloc::vec::Vec;
use spin::{Mutex, MutexGuard};
pub struct Inode {
block_id: usize,
block_offset: usize,
fs: Arc<Mutex<EasyFileSystem>>,
block_device: Arc<dyn BlockDevice>,
}
impl Inode {
/// We should not acquire efs lock here.
pub fn new(
block_id: u32,
block_offset: usize,
fs: Arc<Mutex<EasyFileSystem>>,
block_device: Arc<dyn BlockDevice>,
) -> Self {
kprintln!("[KERN EASYFS] vfs::Inode::new() begin");
Self {
block_id: block_id as usize,
block_offset,
fs,
block_device,
}
}
fn read_disk_inode<V>(&self, f: impl FnOnce(&DiskInode) -> V) -> V {
kprintln!("[KERN EASYFS] vfs::Inode::read_disk_inode() begin");
get_block_cache(self.block_id, Arc::clone(&self.block_device))
.lock()
.read(self.block_offset, f)
}
fn modify_disk_inode<V>(&self, f: impl FnOnce(&mut DiskInode) -> V) -> V {
kprintln!("[KERN EASYFS] vfs::Inode::modify_disk_inode() begin");
get_block_cache(self.block_id, Arc::clone(&self.block_device))
.lock()
.modify(self.block_offset, f)
}
fn find_inode_id(&self, name: &str, disk_inode: &DiskInode) -> Option<u32> {
kprintln!("[KERN EASYFS] vfs::Inode::find_inode_id() begin");
// assert it is a directory
assert!(disk_inode.is_dir());
let file_count = (disk_inode.size as usize) / DIRENT_SZ;
let mut dirent = DirEntry::empty();
for i in 0..file_count {
assert_eq!(
disk_inode.read_at(DIRENT_SZ * i, dirent.as_bytes_mut(), &self.block_device,),
DIRENT_SZ,
);
if dirent.name() == name {
return Some(dirent.inode_number() as u32);
}
}
None
}
pub fn find(&self, name: &str) -> Option<Arc<Inode>> {
kprintln!("[KERN EASYFS] vfs::Inode::find() begin");
let fs = self.fs.lock();
self.read_disk_inode(|disk_inode| {
self.find_inode_id(name, disk_inode).map(|inode_id| {
let (block_id, block_offset) = fs.get_disk_inode_pos(inode_id);
Arc::new(Self::new(
block_id,
block_offset,
self.fs.clone(),
self.block_device.clone(),
))
})
})
}
fn increase_size(
&self,
new_size: u32,
disk_inode: &mut DiskInode,
fs: &mut MutexGuard<EasyFileSystem>,
) {
kprintln!("[KERN EASYFS] vfs::Inode::increase_size() begin");
if new_size < disk_inode.size {
return;
}
let blocks_needed = disk_inode.blocks_num_needed(new_size);
let mut v: Vec<u32> = Vec::new();
for _ in 0..blocks_needed {
v.push(fs.alloc_data());
}
disk_inode.increase_size(new_size, v, &self.block_device);
}
pub fn create(&self, name: &str) -> Option<Arc<Inode>> {
kprintln!("[KERN EASYFS] vfs::Inode::create() begin");
let mut fs = self.fs.lock();
let op = |root_inode: &mut DiskInode| {
// assert it is a directory
assert!(root_inode.is_dir());
// has the file been created?
self.find_inode_id(name, root_inode)
};
if self.modify_disk_inode(op).is_some() {
return None;
}
// create a new file
// alloc a inode with an indirect block
let new_inode_id = fs.alloc_inode();
// initialize inode
let (new_inode_block_id, new_inode_block_offset) = fs.get_disk_inode_pos(new_inode_id);
get_block_cache(new_inode_block_id as usize, Arc::clone(&self.block_device))
.lock()
.modify(new_inode_block_offset, |new_inode: &mut DiskInode| {
new_inode.initialize(DiskInodeType::File);
});
self.modify_disk_inode(|root_inode| {
// append file in the dirent
let file_count = (root_inode.size as usize) / DIRENT_SZ;
let new_size = (file_count + 1) * DIRENT_SZ;
// increase size
self.increase_size(new_size as u32, root_inode, &mut fs);
// write dirent
let dirent = DirEntry::new(name, new_inode_id);
root_inode.write_at(
file_count * DIRENT_SZ,
dirent.as_bytes(),
&self.block_device,
);
});
let (block_id, block_offset) = fs.get_disk_inode_pos(new_inode_id);
block_cache_sync_all();
kprintln!("[KERN EASYFS] vfs::Inode::create() end");
// return inode
Some(Arc::new(Self::new(
block_id,
block_offset,
self.fs.clone(),
self.block_device.clone(),
)))
// release efs lock automatically by compiler
}
pub fn ls(&self) -> Vec<String> {
kprintln!("[KERN EASYFS] vfs::Inode::ls() begin");
let _fs = self.fs.lock();
self.read_disk_inode(|disk_inode| {
let file_count = (disk_inode.size as usize) / DIRENT_SZ;
let mut v: Vec<String> = Vec::new();
for i in 0..file_count {
let mut dirent = DirEntry::empty();
assert_eq!(
disk_inode.read_at(i * DIRENT_SZ, dirent.as_bytes_mut(), &self.block_device,),
DIRENT_SZ,
);
v.push(String::from(dirent.name()));
}
v
})
}
pub fn read_at(&self, offset: usize, buf: &mut [u8]) -> usize {
kprintln!("[KERN EASYFS] vfs::Inode::read_at() begin");
let _fs = self.fs.lock();
self.read_disk_inode(|disk_inode| disk_inode.read_at(offset, buf, &self.block_device))
}
pub fn write_at(&self, offset: usize, buf: &[u8]) -> usize {
kprintln!("[KERN EASYFS] vfs::Inode::write_at() begin");
let mut fs = self.fs.lock();
let size = self.modify_disk_inode(|disk_inode| {
self.increase_size((offset + buf.len()) as u32, disk_inode, &mut fs);
disk_inode.write_at(offset, buf, &self.block_device)
});
block_cache_sync_all();
size
}
pub fn clear(&self) {
kprintln!("[KERN EASYFS] vfs::Inode::clear() begin");
let mut fs = self.fs.lock();
self.modify_disk_inode(|disk_inode| {
let size = disk_inode.size;
let data_blocks_dealloc = disk_inode.clear_size(&self.block_device);
assert!(data_blocks_dealloc.len() == DiskInode::total_blocks(size) as usize);
for data_block in data_blocks_dealloc.into_iter() {
fs.dealloc_data(data_block);
}
});
block_cache_sync_all();
}
}

@ -1,3 +1,4 @@
use super::easy_fs::{EasyFileSystem, Inode};
use super::File;
use crate::drivers::BLOCK_DEVICE;
use crate::mm::UserBuffer;
@ -5,7 +6,6 @@ use crate::sync::UPIntrFreeCell;
use alloc::sync::Arc;
use alloc::vec::Vec;
use bitflags::*;
use easy_fs::{EasyFileSystem, Inode};
use lazy_static::*;
pub struct OSInode {
@ -21,6 +21,7 @@ pub struct OSInodeInner {
impl OSInode {
pub fn new(readable: bool, writable: bool, inode: Arc<Inode>) -> Self {
kprintln!("[KERN] fs::inode::OSInode::new() begin");
Self {
readable,
writable,
@ -28,6 +29,7 @@ impl OSInode {
}
}
pub fn read_all(&self) -> Vec<u8> {
kprintln!("[KERN] fs::inode::OSInode::read_all() begin");
let mut inner = self.inner.exclusive_access();
let mut buffer = [0u8; 512];
let mut v: Vec<u8> = Vec::new();
@ -45,17 +47,18 @@ impl OSInode {
lazy_static! {
pub static ref ROOT_INODE: Arc<Inode> = {
kprintln!("[KERN] fs::inode::lazy_static!ROOT_INODE begin");
let efs = EasyFileSystem::open(BLOCK_DEVICE.clone());
Arc::new(EasyFileSystem::root_inode(&efs))
};
}
pub fn list_apps() {
println!("/**** APPS ****");
kprintln!("[KERN] fs::inode::list_apps()) begin");
for app in ROOT_INODE.ls() {
println!("{}", app);
}
println!("**************/")
kprintln!("[KERN] fs::inode::list_apps()) end");
}
bitflags! {
@ -83,6 +86,7 @@ impl OpenFlags {
}
pub fn open_file(name: &str, flags: OpenFlags) -> Option<Arc<OSInode>> {
kprintln!("[KERN] fs::inode::open_file() begin");
let (readable, writable) = flags.read_write();
if flags.contains(OpenFlags::CREATE) {
if let Some(inode) = ROOT_INODE.find(name) {
@ -113,6 +117,7 @@ impl File for OSInode {
self.writable
}
fn read(&self, mut buf: UserBuffer) -> usize {
kprintln!("[KERN] fs::inode::OSInode<FIle>::read() begin");
let mut inner = self.inner.exclusive_access();
let mut total_read_size = 0usize;
for slice in buf.buffers.iter_mut() {
@ -123,9 +128,11 @@ impl File for OSInode {
inner.offset += read_size;
total_read_size += read_size;
}
kprintln!("[KERN] fs::inode::OSInode<FIle>::read() end");
total_read_size
}
fn write(&self, buf: UserBuffer) -> usize {
kprintln!("[KERN] fs::inode::OSInode<FIle>::write() begin");
let mut inner = self.inner.exclusive_access();
let mut total_write_size = 0usize;
for slice in buf.buffers.iter() {
@ -134,6 +141,7 @@ impl File for OSInode {
inner.offset += write_size;
total_write_size += write_size;
}
kprintln!("[KERN] fs::inode::OSInode<FIle>::write() end");
total_write_size
}
}

@ -1,7 +1,7 @@
pub mod easy_fs;
mod inode;
mod pipe;
mod stdio;
use crate::mm::UserBuffer;
pub trait File: Send + Sync {
@ -11,6 +11,7 @@ pub trait File: Send + Sync {
fn write(&self, buf: UserBuffer) -> usize;
}
pub use easy_fs::*;
pub use inode::{list_apps, open_file, OSInode, OpenFlags};
pub use pipe::{make_pipe, Pipe};
pub use stdio::{Stdin, Stdout};

@ -8,15 +8,16 @@ extern crate alloc;
#[macro_use]
extern crate bitflags;
#[cfg(feature = "board_k210")]
#[path = "boards/k210.rs"]
mod board;
#[cfg(not(any(feature = "board_k210")))]
#[path = "boards/qemu.rs"]
mod board;
// #[cfg(feature = "board_k210")]
// #[path = "boards/k210.rs"]
// mod board;
// #[cfg(not(any(feature = "board_k210")))]
// #[path = "board/qemu.rs"]
// mod board::qemu;
#[macro_use]
mod console;
mod board;
mod config;
mod drivers;
mod fs;
@ -28,6 +29,7 @@ mod syscall;
mod task;
mod timer;
mod trap;
// use board::*;
core::arch::global_asm!(include_str!("entry.asm"));
@ -36,22 +38,27 @@ fn clear_bss() {
fn sbss();
fn ebss();
}
kprintln!("[KERN] clear_bss() begin");
unsafe {
core::slice::from_raw_parts_mut(sbss as usize as *mut u8, ebss as usize - sbss as usize)
.fill(0);
}
kprintln!("[KERN] clear_bss() end");
}
use lazy_static::*;
use sync::UPIntrFreeCell;
lazy_static! {
pub static ref DEV_NON_BLOCKING_ACCESS: UPIntrFreeCell<bool> =
unsafe { UPIntrFreeCell::new(false) };
pub static ref DEV_NON_BLOCKING_ACCESS: UPIntrFreeCell<bool> = {
kprintln!("[KERN] main::lazy_static!DEV_NON_BLOCKING_ACCESS begin");
unsafe { UPIntrFreeCell::new(false) }
};
}
#[no_mangle]
pub fn rust_main() -> ! {
kprintln!("[KERN] rust_main() begin");
clear_bss();
mm::init();
trap::init();
@ -62,5 +69,5 @@ pub fn rust_main() -> ! {
task::add_initproc();
*DEV_NON_BLOCKING_ACCESS.exclusive_access() = true;
task::run_tasks();
panic!("Unreachable in rust_main!");
panic!("[KERN] Unreachable in rust_main!");
}

@ -28,7 +28,9 @@ impl Debug for FrameTracker {
impl Drop for FrameTracker {
fn drop(&mut self) {
kprintln!("[KERN] mm::frame_allocator::Drop<FrameTracker>::drop begin");
frame_dealloc(self.ppn);
kprintln!("[KERN] mm::frame_allocator::Drop<FrameTracker>::drop end");
}
}
@ -48,7 +50,7 @@ impl StackFrameAllocator {
pub fn init(&mut self, l: PhysPageNum, r: PhysPageNum) {
self.current = l.0;
self.end = r.0;
println!("last {} Physical Frames.", self.end - self.current);
println!("[KERN] last {} Physical Frames.", self.end - self.current);
}
}
impl FrameAllocator for StackFrameAllocator {
@ -73,7 +75,7 @@ impl FrameAllocator for StackFrameAllocator {
let ppn = ppn.0;
// validity check
if ppn >= self.current || self.recycled.iter().any(|&v| v == ppn) {
panic!("Frame ppn={:#x} has not been allocated!", ppn);
panic!("[KERN] Frame ppn={:#x} has not been allocated!", ppn);
}
// recycle
self.recycled.push(ppn);
@ -83,18 +85,22 @@ impl FrameAllocator for StackFrameAllocator {
type FrameAllocatorImpl = StackFrameAllocator;
lazy_static! {
pub static ref FRAME_ALLOCATOR: UPIntrFreeCell<FrameAllocatorImpl> =
unsafe { UPIntrFreeCell::new(FrameAllocatorImpl::new()) };
pub static ref FRAME_ALLOCATOR: UPIntrFreeCell<FrameAllocatorImpl> = {
kprintln!("[KERN] mm::frame_allocator::lazy_static!FRAME_ALLOCATOR begin");
unsafe { UPIntrFreeCell::new(FrameAllocatorImpl::new()) }
};
}
pub fn init_frame_allocator() {
extern "C" {
fn ekernel();
}
kprintln!("[KERN] mm::init_frame_allocator() begin");
FRAME_ALLOCATOR.exclusive_access().init(
PhysAddr::from(ekernel as usize).ceil(),
PhysAddr::from(MEMORY_END).floor(),
);
kprintln!("[KERN] mm::init_frame_allocator() end");
}
pub fn frame_alloc() -> Option<FrameTracker> {

@ -6,17 +6,19 @@ static HEAP_ALLOCATOR: LockedHeap = LockedHeap::empty();
#[alloc_error_handler]
pub fn handle_alloc_error(layout: core::alloc::Layout) -> ! {
panic!("Heap allocation error, layout = {:?}", layout);
panic!("[KERN] Heap allocation error, layout = {:?}", layout);
}
static mut HEAP_SPACE: [u8; KERNEL_HEAP_SIZE] = [0; KERNEL_HEAP_SIZE];
pub fn init_heap() {
kprintln!("[KERN] mm::init_heap() begin");
unsafe {
HEAP_ALLOCATOR
.lock()
.init(HEAP_SPACE.as_ptr() as usize, KERNEL_HEAP_SIZE);
}
kprintln!("[KERN] mm::init_heap() end");
}
#[allow(unused)]

@ -25,8 +25,10 @@ extern "C" {
}
lazy_static! {
pub static ref KERNEL_SPACE: Arc<UPIntrFreeCell<MemorySet>> =
Arc::new(unsafe { UPIntrFreeCell::new(MemorySet::new_kernel()) });
pub static ref KERNEL_SPACE: Arc<UPIntrFreeCell<MemorySet>> = {
kprintln!("[KERN] mm::memory_set::lazy_static!KERNEL_SPACE begin");
Arc::new(unsafe { UPIntrFreeCell::new(MemorySet::new_kernel()) })
};
}
pub fn kernel_token() -> usize {
@ -88,18 +90,28 @@ impl MemorySet {
}
/// Without kernel stacks.
pub fn new_kernel() -> Self {
kprintln!("[KERN] mm::memory_set::MemorySet::new_kernel() begin");
let mut memory_set = Self::new_bare();
// map trampoline
memory_set.map_trampoline();
// map kernel sections
println!(".text [{:#x}, {:#x})", stext as usize, etext as usize);
println!(".rodata [{:#x}, {:#x})", srodata as usize, erodata as usize);
println!(".data [{:#x}, {:#x})", sdata as usize, edata as usize);
println!(
".bss [{:#x}, {:#x})",
"[KERN] .text [{:#x}, {:#x})",
stext as usize, etext as usize
);
println!(
"[KERN] .rodata [{:#x}, {:#x})",
srodata as usize, erodata as usize
);
println!(
"[KERN] .data [{:#x}, {:#x})",
sdata as usize, edata as usize
);
println!(
"[KERN] .bss [{:#x}, {:#x})",
sbss_with_stack as usize, ebss as usize
);
println!("mapping .text section");
println!("[KERN] mapping .text section");
memory_set.push(
MapArea::new(
(stext as usize).into(),
@ -109,7 +121,7 @@ impl MemorySet {
),
None,
);
println!("mapping .rodata section");
println!("[KERN] mapping .rodata section");
memory_set.push(
MapArea::new(
(srodata as usize).into(),
@ -119,7 +131,7 @@ impl MemorySet {
),
None,
);
println!("mapping .data section");
println!("[KERN] mapping .data section");
memory_set.push(
MapArea::new(
(sdata as usize).into(),
@ -129,7 +141,7 @@ impl MemorySet {
),
None,
);
println!("mapping .bss section");
println!("[KERN] mapping .bss section");
memory_set.push(
MapArea::new(
(sbss_with_stack as usize).into(),
@ -139,7 +151,7 @@ impl MemorySet {
),
None,
);
println!("mapping physical memory");
println!("[KERN] mapping physical memory");
memory_set.push(
MapArea::new(
(ekernel as usize).into(),
@ -149,7 +161,7 @@ impl MemorySet {
),
None,
);
println!("mapping memory-mapped registers");
println!("[KERN] mapping memory-mapped registers");
for pair in MMIO {
memory_set.push(
MapArea::new(
@ -161,11 +173,13 @@ impl MemorySet {
None,
);
}
kprintln!("[KERN] mm::memory_set::MemorySet::new_kernel() end");
memory_set
}
/// Include sections in elf and trampoline,
/// also returns user_sp_base and entry point.
pub fn from_elf(elf_data: &[u8]) -> (Self, usize, usize) {
kprintln!("[KERN] mm::memory_set::MemorySet::from_elf() begin");
let mut memory_set = Self::new_bare();
// map trampoline
memory_set.map_trampoline();
@ -203,6 +217,7 @@ impl MemorySet {
let max_end_va: VirtAddr = max_end_vpn.into();
let mut user_stack_base: usize = max_end_va.into();
user_stack_base += PAGE_SIZE;
kprintln!("[KERN] mm::memory_set::MemorySet::from_elf() end");
(
memory_set,
user_stack_base,
@ -210,6 +225,7 @@ impl MemorySet {
)
}
pub fn from_existed_user(user_space: &MemorySet) -> MemorySet {
kprintln!("[KERN] mm::memory_set::MemorySet::from_existed_user() begin");
let mut memory_set = Self::new_bare();
// map trampoline
memory_set.map_trampoline();
@ -226,14 +242,17 @@ impl MemorySet {
.copy_from_slice(src_ppn.get_bytes_array());
}
}
kprintln!("[KERN] mm::memory_set::MemorySet::from_existed_user() end");
memory_set
}
pub fn activate(&self) {
kprintln!("[KERN] mm::memory_set::MemorySet::activate() begin");
let satp = self.page_table.token();
unsafe {
satp::write(satp);
asm!("sfence.vma");
}
kprintln!("[KERN] mm::memory_set::MemorySet::activate() begin");
}
pub fn translate(&self, vpn: VirtPageNum) -> Option<PageTableEntry> {
self.page_table.translate(vpn)
@ -258,8 +277,14 @@ impl MapArea {
map_type: MapType,
map_perm: MapPermission,
) -> Self {
kprintln!("[KERN] mm::memory_set::MapArea::new() begin");
let start_vpn: VirtPageNum = start_va.floor();
let end_vpn: VirtPageNum = end_va.ceil();
kprintln!(
"[KERN] mm::memory_set::MapArea::new(start_vpn: {:?}, end_vpn: {:?}) end",
start_vpn,
end_vpn
);
Self {
vpn_range: VPNRange::new(start_vpn, end_vpn),
data_frames: BTreeMap::new(),
@ -297,18 +322,23 @@ impl MapArea {
page_table.unmap(vpn);
}
pub fn map(&mut self, page_table: &mut PageTable) {
kprintln!("[KERN] mm::memory_set::MapArea::map() begin");
for vpn in self.vpn_range {
self.map_one(page_table, vpn);
}
kprintln!("[KERN] mm::memory_set::MapArea::map() end");
}
pub fn unmap(&mut self, page_table: &mut PageTable) {
kprintln!("[KERN] mm::memory_set::MapArea::unmap() begin");
for vpn in self.vpn_range {
self.unmap_one(page_table, vpn);
}
kprintln!("[KERN] mm::memory_set::MapArea::unmap() end");
}
/// data: start-aligned but maybe with shorter length
/// assume that all frames were cleared before
pub fn copy_data(&mut self, page_table: &mut PageTable, data: &[u8]) {
kprintln!("[KERN] mm::memory_set::MapArea::copy_data() begin");
assert_eq!(self.map_type, MapType::Framed);
let mut start: usize = 0;
let mut current_vpn = self.vpn_range.get_start();
@ -327,6 +357,7 @@ impl MapArea {
}
current_vpn.step();
}
kprintln!("[KERN] mm::memory_set::MapArea::copy_data() end");
}
}

@ -16,7 +16,9 @@ pub use page_table::{
};
pub fn init() {
kprintln!("[KERN] mm::init() begin");
heap_allocator::init_heap();
frame_allocator::init_frame_allocator();
KERNEL_SPACE.exclusive_access().activate();
kprintln!("[KERN] mm::init() end");
}

@ -56,8 +56,10 @@ pub struct IntrMaskingInfo {
}
lazy_static! {
static ref INTR_MASKING_INFO: UPSafeCellRaw<IntrMaskingInfo> =
unsafe { UPSafeCellRaw::new(IntrMaskingInfo::new()) };
static ref INTR_MASKING_INFO: UPSafeCellRaw<IntrMaskingInfo> = unsafe {
kprintln!("[KERN] sync::up::lazy_static!INTR_MASKING_INFO begin");
UPSafeCellRaw::new(IntrMaskingInfo::new())
};
}
impl IntrMaskingInfo {

@ -4,6 +4,9 @@ use crate::task::{current_process, current_user_token};
use alloc::sync::Arc;
pub fn sys_write(fd: usize, buf: *const u8, len: usize) -> isize {
if fd != 1 && fd != 2 {
kprintln!("[KERN] syscall::fs::sys_write(fd: {}) begin", fd);
}
let token = current_user_token();
let process = current_process();
let inner = process.inner_exclusive_access();
@ -24,6 +27,9 @@ pub fn sys_write(fd: usize, buf: *const u8, len: usize) -> isize {
}
pub fn sys_read(fd: usize, buf: *const u8, len: usize) -> isize {
if fd != 0 {
kprintln!("[KERN] syscall::fs::sys_read(fd: {}) begin", fd);
}
let token = current_user_token();
let process = current_process();
let inner = process.inner_exclusive_access();
@ -44,6 +50,7 @@ pub fn sys_read(fd: usize, buf: *const u8, len: usize) -> isize {
}
pub fn sys_open(path: *const u8, flags: u32) -> isize {
kprintln!("[KERN] syscall::fs::sys_open() begin");
let process = current_process();
let token = current_user_token();
let path = translated_str(token, path);
@ -51,6 +58,7 @@ pub fn sys_open(path: *const u8, flags: u32) -> isize {
let mut inner = process.inner_exclusive_access();
let fd = inner.alloc_fd();
inner.fd_table[fd] = Some(inode);
kprintln!("[KERN] syscall::fs::sys_open() return fd {}, end", fd);
fd as isize
} else {
-1
@ -58,6 +66,7 @@ pub fn sys_open(path: *const u8, flags: u32) -> isize {
}
pub fn sys_close(fd: usize) -> isize {
kprintln!("[KERN] syscall::fs::sys_close(fd: {}) begin", fd);
let process = current_process();
let mut inner = process.inner_exclusive_access();
if fd >= inner.fd_table.len() {
@ -71,6 +80,7 @@ pub fn sys_close(fd: usize) -> isize {
}
pub fn sys_pipe(pipe: *mut usize) -> isize {
kprintln!("[KERN] syscall::fs::sys_pipe() begin");
let process = current_process();
let token = current_user_token();
let mut inner = process.inner_exclusive_access();
@ -85,6 +95,7 @@ pub fn sys_pipe(pipe: *mut usize) -> isize {
}
pub fn sys_dup(fd: usize) -> isize {
kprintln!("[KERN] syscall::fs::sys_dup(fd: {}) begin", fd);
let process = current_process();
let mut inner = process.inner_exclusive_access();
if fd >= inner.fd_table.len() {

@ -37,6 +37,16 @@ use sync::*;
use thread::*;
pub fn syscall(syscall_id: usize, args: [usize; 3]) -> isize {
if syscall_id != SYSCALL_YIELD
&& syscall_id != SYSCALL_WAITPID
&& !(syscall_id == SYSCALL_READ && args[0] == 0)
&& !(syscall_id == SYSCALL_WRITE && (args[0] == 1 || args[0] == 2))
{
kprintln!(
"[KERN] syscall::syscall(id: {}) begin",
sys_id_str(syscall_id)
);
}
match syscall_id {
SYSCALL_DUP => sys_dup(args[0]),
SYSCALL_OPEN => sys_open(args[0] as *const u8, args[1] as u32),
@ -68,3 +78,36 @@ pub fn syscall(syscall_id: usize, args: [usize; 3]) -> isize {
_ => panic!("Unsupported syscall_id: {}", syscall_id),
}
}
pub fn sys_id_str(syscall_id: usize) -> &'static str {
match syscall_id {
SYSCALL_DUP => "sys_dup",
SYSCALL_OPEN => "sys_open",
SYSCALL_CLOSE => "sys_close",
SYSCALL_PIPE => "sys_pipe",
SYSCALL_READ => "sys_read",
SYSCALL_WRITE => "sys_write",
SYSCALL_EXIT => "sys_exit",
SYSCALL_SLEEP => "sys_sleep",
SYSCALL_YIELD => "sys_yield",
SYSCALL_KILL => "sys_kill",
SYSCALL_GET_TIME => "sys_get_time",
SYSCALL_GETPID => "sys_getpid",
SYSCALL_FORK => "sys_fork",
SYSCALL_EXEC => "sys_exec",
SYSCALL_WAITPID => "sys_waitpid",
SYSCALL_THREAD_CREATE => "sys_thread_create",
SYSCALL_GETTID => "sys_gettid",
SYSCALL_WAITTID => "sys_waittid",
SYSCALL_MUTEX_CREATE => "sys_mutex_create",
SYSCALL_MUTEX_LOCK => "sys_mutex_lock",
SYSCALL_MUTEX_UNLOCK => "sys_mutex_unlock",
SYSCALL_SEMAPHORE_CREATE => "sys_semaphore_create",
SYSCALL_SEMAPHORE_UP => "sys_semaphore_up",
SYSCALL_SEMAPHORE_DOWN => "sys_semaphore_down",
SYSCALL_CONDVAR_CREATE => "sys_condvar_create",
SYSCALL_CONDVAR_SIGNAL => "sys_condvar_signal",
SYSCALL_CONDVAR_WAIT => "sys_condvar_wait",
_ => "Unsupported syscall_id",
}
}

@ -10,6 +10,7 @@ use alloc::sync::Arc;
use alloc::vec::Vec;
pub fn sys_exit(exit_code: i32) -> ! {
kprintln!("[KERN] syscall::process::sys_exit begin");
exit_current_and_run_next(exit_code);
panic!("Unreachable in sys_exit!");
}
@ -20,14 +21,17 @@ pub fn sys_yield() -> isize {
}
pub fn sys_get_time() -> isize {
kprintln!("[KERN] syscall::process::sys_get_time begin");
get_time_ms() as isize
}
pub fn sys_getpid() -> isize {
kprintln!("[KERN] syscall::process::sys_getpid begin");
current_task().unwrap().process.upgrade().unwrap().getpid() as isize
}
pub fn sys_fork() -> isize {
kprintln!("[KERN] syscall::process::sys_fork begin");
let current_process = current_process();
let new_process = current_process.fork();
let new_pid = new_process.getpid();
@ -38,10 +42,12 @@ pub fn sys_fork() -> isize {
// we do not have to move to next instruction since we have done it before
// for child process, fork returns 0
trap_cx.x[10] = 0;
kprintln!("[KERN] syscall::process::sys_fork end");
new_pid as isize
}
pub fn sys_exec(path: *const u8, mut args: *const usize) -> isize {
kprintln!("[KERN] syscall::process::sys_exec begin");
let token = current_user_token();
let path = translated_str(token, path);
let mut args_vec: Vec<String> = Vec::new();
@ -60,6 +66,7 @@ pub fn sys_exec(path: *const u8, mut args: *const usize) -> isize {
let process = current_process();
let argc = args_vec.len();
process.exec(all_data.as_slice(), args_vec);
kprintln!("[KERN] syscall::process::sys_exec end");
// return argc because cx.x[10] will be covered with it later
argc as isize
} else {
@ -70,6 +77,7 @@ pub fn sys_exec(path: *const u8, mut args: *const usize) -> isize {
/// If there is not a child process whose pid is same as given, return -1.
/// Else if there is a child process but it is still running, return -2.
pub fn sys_waitpid(pid: isize, exit_code_ptr: *mut i32) -> isize {
//kprintln!("[KERN] syscall::process::sys_waitpid begin");
let process = current_process();
// find a child process
@ -88,14 +96,21 @@ pub fn sys_waitpid(pid: isize, exit_code_ptr: *mut i32) -> isize {
// ++++ release child PCB
});
if let Some((idx, _)) = pair {
kprintln!(
"[KERN] syscall::process::sys_waitpid(): remove child from PCB's children Vector"
);
let child = inner.children.remove(idx);
// confirm that child will be deallocated after being removed from children list
assert_eq!(Arc::strong_count(&child), 1);
let found_pid = child.getpid();
// ++++ temporarily access child PCB exclusively
kprintln!(
"[KERN] syscall::process::sys_waitpid(): get child's exit_code and return child pid"
);
let exit_code = child.inner_exclusive_access().exit_code;
// ++++ release child PCB
*translated_refmut(inner.memory_set.token(), exit_code_ptr) = exit_code;
kprintln!("[KERN] syscall::process::sys_waitpid(): release child PCB");
found_pid as isize
} else {
-2
@ -104,6 +119,7 @@ pub fn sys_waitpid(pid: isize, exit_code_ptr: *mut i32) -> isize {
}
pub fn sys_kill(pid: usize, signal: u32) -> isize {
kprintln!("[KERN] syscall::process::sys_kill begin");
if let Some(process) = pid2process(pid) {
if let Some(flag) = SignalFlags::from_bits(signal) {
process.inner_exclusive_access().signals |= flag;

@ -4,14 +4,17 @@ use crate::timer::{add_timer, get_time_ms};
use alloc::sync::Arc;
pub fn sys_sleep(ms: usize) -> isize {
kprintln!("[KERN] syscall::sync::sys_sleep begin");
let expire_ms = get_time_ms() + ms;
let task = current_task().unwrap();
add_timer(expire_ms, task);
block_current_and_run_next();
kprintln!("[KERN] syscall::sync::sys_sleep end");
0
}
pub fn sys_mutex_create(blocking: bool) -> isize {
kprintln!("[KERN] syscall::sync::sys_mutex_create begin");
let process = current_process();
let mutex: Option<Arc<dyn Mutex>> = if !blocking {
Some(Arc::new(MutexSpin::new()))
@ -35,6 +38,7 @@ pub fn sys_mutex_create(blocking: bool) -> isize {
}
pub fn sys_mutex_lock(mutex_id: usize) -> isize {
kprintln!("[KERN] syscall::sync::sys_mutex_lock begin");
let process = current_process();
let process_inner = process.inner_exclusive_access();
let mutex = Arc::clone(process_inner.mutex_list[mutex_id].as_ref().unwrap());
@ -45,6 +49,7 @@ pub fn sys_mutex_lock(mutex_id: usize) -> isize {
}
pub fn sys_mutex_unlock(mutex_id: usize) -> isize {
kprintln!("[KERN] syscall::sync::sys_mutex_unlock begin");
let process = current_process();
let process_inner = process.inner_exclusive_access();
let mutex = Arc::clone(process_inner.mutex_list[mutex_id].as_ref().unwrap());
@ -55,6 +60,7 @@ pub fn sys_mutex_unlock(mutex_id: usize) -> isize {
}
pub fn sys_semaphore_create(res_count: usize) -> isize {
kprintln!("[KERN] syscall::sync::sys_semaphore_create begin");
let process = current_process();
let mut process_inner = process.inner_exclusive_access();
let id = if let Some(id) = process_inner
@ -76,6 +82,7 @@ pub fn sys_semaphore_create(res_count: usize) -> isize {
}
pub fn sys_semaphore_up(sem_id: usize) -> isize {
kprintln!("[KERN] syscall::sync::sys_semaphore_up begin");
let process = current_process();
let process_inner = process.inner_exclusive_access();
let sem = Arc::clone(process_inner.semaphore_list[sem_id].as_ref().unwrap());
@ -85,6 +92,7 @@ pub fn sys_semaphore_up(sem_id: usize) -> isize {
}
pub fn sys_semaphore_down(sem_id: usize) -> isize {
kprintln!("[KERN] syscall::sync::sys_semaphore_down begin");
let process = current_process();
let process_inner = process.inner_exclusive_access();
let sem = Arc::clone(process_inner.semaphore_list[sem_id].as_ref().unwrap());
@ -94,6 +102,7 @@ pub fn sys_semaphore_down(sem_id: usize) -> isize {
}
pub fn sys_condvar_create(_arg: usize) -> isize {
kprintln!("[KERN] syscall::sync::sys_condvar_create begin");
let process = current_process();
let mut process_inner = process.inner_exclusive_access();
let id = if let Some(id) = process_inner
@ -115,6 +124,7 @@ pub fn sys_condvar_create(_arg: usize) -> isize {
}
pub fn sys_condvar_signal(condvar_id: usize) -> isize {
kprintln!("[KERN] syscall::sync::sys_condvar_signal begin");
let process = current_process();
let process_inner = process.inner_exclusive_access();
let condvar = Arc::clone(process_inner.condvar_list[condvar_id].as_ref().unwrap());
@ -124,6 +134,7 @@ pub fn sys_condvar_signal(condvar_id: usize) -> isize {
}
pub fn sys_condvar_wait(condvar_id: usize, mutex_id: usize) -> isize {
kprintln!("[KERN] syscall::sync::sys_condvar_wait begin");
let process = current_process();
let process_inner = process.inner_exclusive_access();
let condvar = Arc::clone(process_inner.condvar_list[condvar_id].as_ref().unwrap());

@ -6,6 +6,7 @@ use crate::{
use alloc::sync::Arc;
pub fn sys_thread_create(entry: usize, arg: usize) -> isize {
kprintln!("[KERN] syscall::thread::sys_thread_create begin");
let task = current_task().unwrap();
let process = task.process.upgrade().unwrap();
// create a new thread
@ -39,10 +40,12 @@ pub fn sys_thread_create(entry: usize, arg: usize) -> isize {
trap_handler as usize,
);
(*new_task_trap_cx).x[10] = arg;
kprintln!("[KERN] syscall::thread::sys_thread_create end");
new_task_tid as isize
}
pub fn sys_gettid() -> isize {
kprintln!("[KERN] syscall::thread::sys_gettid begin");
current_task()
.unwrap()
.inner_exclusive_access()
@ -56,6 +59,7 @@ pub fn sys_gettid() -> isize {
/// thread has not exited yet, return -2
/// otherwise, return thread's exit code
pub fn sys_waittid(tid: usize) -> i32 {
kprintln!("[KERN] syscall::thread::sys_waittid begin");
let task = current_task().unwrap();
let process = task.process.upgrade().unwrap();
let task_inner = task.inner_exclusive_access();

@ -40,10 +40,17 @@ impl RecycleAllocator {
}
lazy_static! {
static ref PID_ALLOCATOR: UPIntrFreeCell<RecycleAllocator> =
unsafe { UPIntrFreeCell::new(RecycleAllocator::new()) };
static ref KSTACK_ALLOCATOR: UPIntrFreeCell<RecycleAllocator> =
unsafe { UPIntrFreeCell::new(RecycleAllocator::new()) };
static ref PID_ALLOCATOR: UPIntrFreeCell<RecycleAllocator> = {
kprintln!("[KERN] task::id::lazy_static!PID_ALLOCATOR begin");
unsafe { UPIntrFreeCell::new(RecycleAllocator::new()) }
};
}
lazy_static! {
static ref KSTACK_ALLOCATOR: UPIntrFreeCell<RecycleAllocator> = {
kprintln!("[KERN] task::id::lazy_static!KSTACK_ALLOCATOR begin");
unsafe { UPIntrFreeCell::new(RecycleAllocator::new()) }
};
}
pub const IDLE_PID: usize = 0;
@ -56,20 +63,25 @@ pub fn pid_alloc() -> PidHandle {
impl Drop for PidHandle {
fn drop(&mut self) {
kprintln!("[KERN] task::id::Drop<PidHandle>::drop begin");
PID_ALLOCATOR.exclusive_access().dealloc(self.0);
kprintln!("[KERN] task::id::Drop<PidHandle>::drop end");
}
}
/// Return (bottom, top) of a kernel stack in kernel space.
pub fn kernel_stack_position(kstack_id: usize) -> (usize, usize) {
kprintln!("[KERN] task::id::kernel_stack_position() begin");
let top = TRAMPOLINE - kstack_id * (KERNEL_STACK_SIZE + PAGE_SIZE);
let bottom = top - KERNEL_STACK_SIZE;
kprintln!("[KERN] task::id::kernel_stack_position() end");
(bottom, top)
}
pub struct KernelStack(pub usize);
pub fn kstack_alloc() -> KernelStack {
kprintln!("[KERN] task::id::kstack_alloc() begin");
let kstack_id = KSTACK_ALLOCATOR.exclusive_access().alloc();
let (kstack_bottom, kstack_top) = kernel_stack_position(kstack_id);
KERNEL_SPACE.exclusive_access().insert_framed_area(
@ -77,16 +89,19 @@ pub fn kstack_alloc() -> KernelStack {
kstack_top.into(),
MapPermission::R | MapPermission::W,
);
kprintln!("[KERN] task::id::kstack_alloc() end");
KernelStack(kstack_id)
}
impl Drop for KernelStack {
fn drop(&mut self) {
kprintln!("[KERN] task::id::Drop<KernelStack>::drop begin");
let (kernel_stack_bottom, _) = kernel_stack_position(self.0);
let kernel_stack_bottom_va: VirtAddr = kernel_stack_bottom.into();
KERNEL_SPACE
.exclusive_access()
.remove_area_with_start_vpn(kernel_stack_bottom_va.into());
kprintln!("[KERN] task::id::Drop<KernelStack>::drop end");
}
}
@ -129,6 +144,7 @@ impl TaskUserRes {
ustack_base: usize,
alloc_user_res: bool,
) -> Self {
kprintln!("[KERN] task::id::TaskUserRes::new() begin");
let tid = process.inner_exclusive_access().alloc_tid();
let task_user_res = Self {
tid,
@ -138,13 +154,16 @@ impl TaskUserRes {
if alloc_user_res {
task_user_res.alloc_user_res();
}
kprintln!("[KERN] task::id::TaskUserRes::new() end");
task_user_res
}
pub fn alloc_user_res(&self) {
kprintln!("[KERN] task::id::TaskUserRes::alloc_user_res() begin");
let process = self.process.upgrade().unwrap();
let mut process_inner = process.inner_exclusive_access();
// alloc user stack
kprintln!("[KERN] task::id::TaskUserRes::alloc_user_res(): alloc user stack for TCB");
let ustack_bottom = ustack_bottom_from_tid(self.ustack_base, self.tid);
let ustack_top = ustack_bottom + USER_STACK_SIZE;
process_inner.memory_set.insert_framed_area(
@ -153,6 +172,7 @@ impl TaskUserRes {
MapPermission::R | MapPermission::W | MapPermission::U,
);
// alloc trap_cx
kprintln!("[KERN] task::id::TaskUserRes::alloc_user_res(): alloc trap_cx for TCB");
let trap_cx_bottom = trap_cx_bottom_from_tid(self.tid);
let trap_cx_top = trap_cx_bottom + PAGE_SIZE;
process_inner.memory_set.insert_framed_area(
@ -160,22 +180,29 @@ impl TaskUserRes {
trap_cx_top.into(),
MapPermission::R | MapPermission::W,
);
kprintln!("[KERN] task::id::TaskUserRes::alloc_user_res() end");
}
fn dealloc_user_res(&self) {
kprintln!("[KERN] task::id::TaskUserRes::dealloc_user_res() begin");
// dealloc tid
kprintln!("[KERN] task::id::TaskUserRes::dealloc_user_res(): dealloc tid");
let process = self.process.upgrade().unwrap();
let mut process_inner = process.inner_exclusive_access();
// dealloc ustack manually
kprintln!("[KERN] task::id::TaskUserRes::dealloc_user_res(): dealloc ustack manually");
let ustack_bottom_va: VirtAddr = ustack_bottom_from_tid(self.ustack_base, self.tid).into();
process_inner
.memory_set
.remove_area_with_start_vpn(ustack_bottom_va.into());
// dealloc trap_cx manually
kprintln!("[KERN] task::id::TaskUserRes::dealloc_user_res(): dealloc trap_cx manually");
let trap_cx_bottom_va: VirtAddr = trap_cx_bottom_from_tid(self.tid).into();
process_inner
.memory_set
.remove_area_with_start_vpn(trap_cx_bottom_va.into());
kprintln!("[KERN] task::id::TaskUserRes::dealloc_user_res() end");
}
#[allow(unused)]
@ -219,7 +246,9 @@ impl TaskUserRes {
impl Drop for TaskUserRes {
fn drop(&mut self) {
kprintln!("[KERN] task::id::Drop<TaskUserRes>::drop begin");
self.dealloc_tid();
self.dealloc_user_res();
kprintln!("[KERN] task::id::Drop<TaskUserRes>::drop end");
}
}

@ -24,17 +24,27 @@ impl TaskManager {
}
lazy_static! {
pub static ref TASK_MANAGER: UPIntrFreeCell<TaskManager> =
unsafe { UPIntrFreeCell::new(TaskManager::new()) };
pub static ref PID2PCB: UPIntrFreeCell<BTreeMap<usize, Arc<ProcessControlBlock>>> =
unsafe { UPIntrFreeCell::new(BTreeMap::new()) };
pub static ref TASK_MANAGER: UPIntrFreeCell<TaskManager> = {
kprintln!("[KERN] task::manager::lazy_static!TASK_MANAGER begin");
unsafe { UPIntrFreeCell::new(TaskManager::new()) }
};
}
lazy_static! {
pub static ref PID2PCB: UPIntrFreeCell<BTreeMap<usize, Arc<ProcessControlBlock>>> = {
kprintln!("[KERN] task::manager::lazy_static!PID2PCB begin");
unsafe { UPIntrFreeCell::new(BTreeMap::new()) }
};
}
pub fn add_task(task: Arc<TaskControlBlock>) {
//kprintln!("[KERN] task::manager::add_task() begin");
TASK_MANAGER.exclusive_access().add(task);
//kprintln!("[KERN] task::manager::add_task() end");
}
pub fn fetch_task() -> Option<Arc<TaskControlBlock>> {
//kprintln!("[KERN] task::manager::fetch_task() begin");
TASK_MANAGER.exclusive_access().fetch()
}

@ -28,6 +28,7 @@ pub use task::{TaskControlBlock, TaskStatus};
pub fn suspend_current_and_run_next() {
// There must be an application running.
//kprintln!("[KERN] task::suspend_current_and_run_next() begin");
let task = take_current_task().unwrap();
// ---- access current TCB exclusively
@ -41,14 +42,17 @@ pub fn suspend_current_and_run_next() {
// push back to ready queue.
add_task(task);
// jump to scheduling cycle
//kprintln!("[KERN] task::suspend_current_and_run_next() end");
schedule(task_cx_ptr);
}
/// This function must be followed by a schedule
pub fn block_current_task() -> *mut TaskContext {
//kprintln!("[KERN] task::block_current_task() begin");
let task = take_current_task().unwrap();
let mut task_inner = task.inner_exclusive_access();
task_inner.task_status = TaskStatus::Blocking;
//kprintln!("[KERN] task::block_current_task() end");
&mut task_inner.task_cx as *mut TaskContext
}
@ -60,20 +64,26 @@ pub fn block_current_and_run_next() {
use crate::board::QEMUExit;
pub fn exit_current_and_run_next(exit_code: i32) {
kprintln!("[KERN] task::exit_current_and_run_next() begin");
let task = take_current_task().unwrap();
let mut task_inner = task.inner_exclusive_access();
let process = task.process.upgrade().unwrap();
let tid = task_inner.res.as_ref().unwrap().tid;
// record exit code
kprintln!("[KERN] task::exit_current_and_run_next(): record exit code in task_inner");
task_inner.exit_code = Some(exit_code);
kprintln!("[KERN] task::exit_current_and_run_next(): TaskUserRes =>None");
task_inner.res = None;
// here we do not remove the thread since we are still using the kstack
// it will be deallocated when sys_waittid is called
kprintln!("[KERN] task::exit_current_and_run_next(): drop task_inner");
drop(task_inner);
kprintln!("[KERN] task::exit_current_and_run_next(): drop task");
drop(task);
// however, if this is the main thread of current process
// the process should terminate at once
if tid == 0 {
kprintln!("[KERN] task::exit_current_and_run_next(): it's main thread, process should terminate at once");
let pid = process.getpid();
if pid == IDLE_PID {
println!(
@ -91,12 +101,19 @@ pub fn exit_current_and_run_next(exit_code: i32) {
remove_from_pid2process(pid);
let mut process_inner = process.inner_exclusive_access();
// mark this process as a zombie process
kprintln!(
"[KERN] task::exit_current_and_run_next(): mark this process as a zombie process"
);
process_inner.is_zombie = true;
// record exit code of main process
kprintln!("[KERN] task::exit_current_and_run_next(): record exit code in process_inner");
process_inner.exit_code = exit_code;
{
// move all child processes under init process
kprintln!(
"[KERN] task::exit_current_and_run_next(): move all child processes under INITPROC"
);
let mut initproc_inner = INITPROC.inner_exclusive_access();
for child in process_inner.children.iter() {
child.inner_exclusive_access().parent = Some(Arc::downgrade(&INITPROC));
@ -107,6 +124,7 @@ pub fn exit_current_and_run_next(exit_code: i32) {
// deallocate user res (including tid/trap_cx/ustack) of all threads
// it has to be done before we dealloc the whole memory_set
// otherwise they will be deallocated twice
kprintln!("[KERN] task::exit_current_and_run_next(): deallocate user res (tid/trap_cx/ustack) of all threads");
let mut recycle_res = Vec::<TaskUserRes>::new();
for task in process_inner.tasks.iter().filter(|t| t.is_some()) {
let task = task.as_ref().unwrap();
@ -115,6 +133,10 @@ pub fn exit_current_and_run_next(exit_code: i32) {
recycle_res.push(res);
}
}
kprintln!(
"[KERN] task::exit_current_and_run_next(): clear children Vector in process_inner"
);
// dealloc_tid and dealloc_user_res require access to PCB inner, so we
// need to collect those user res first, then release process_inner
// for now to avoid deadlock/double borrow problem.
@ -124,18 +146,23 @@ pub fn exit_current_and_run_next(exit_code: i32) {
let mut process_inner = process.inner_exclusive_access();
process_inner.children.clear();
// deallocate other data in user space i.e. program code/data section
kprintln!("[KERN] task::exit_current_and_run_next(): deallocate code/data in user space");
process_inner.memory_set.recycle_data_pages();
// drop file descriptors
kprintln!("[KERN] task::exit_current_and_run_next(): drop file descriptors");
process_inner.fd_table.clear();
}
kprintln!("[KERN] task::exit_current_and_run_next(): drop process");
drop(process);
// we do not have to save task context
let mut _unused = TaskContext::zero_init();
kprintln!("[KERN] task::exit_current_and_run_next() end, sched next task");
schedule(&mut _unused as *mut _);
}
lazy_static! {
pub static ref INITPROC: Arc<ProcessControlBlock> = {
kprintln!("[KERN] task::lazy_static!INITPROC begin");
let inode = open_file("initproc", OpenFlags::RDONLY).unwrap();
let v = inode.read_all();
ProcessControlBlock::new(v.as_slice())
@ -143,7 +170,9 @@ lazy_static! {
}
pub fn add_initproc() {
kprintln!("[KERN] task::add_initproc() begin");
let _initproc = INITPROC.clone();
kprintln!("[KERN] task::add_initproc() end");
}
pub fn check_signals_of_current() -> Option<(i32, &'static str)> {

@ -72,10 +72,14 @@ impl ProcessControlBlock {
}
pub fn new(elf_data: &[u8]) -> Arc<Self> {
// memory_set with elf program headers/trampoline/trap context/user stack
// memory_set with elf program headers/trampoline/user stack_base addr/entry_point
kprintln!("[KERN] task::process::PCB::new() begin");
kprintln!("[KERN] task::process::PCB::new(): build MemorySet, set trampoline, user_stack_base, entry_point...");
let (memory_set, ustack_base, entry_point) = MemorySet::from_elf(elf_data);
// allocate a pid
kprintln!("[KERN] task::process::PCB::new(): allocate a pid");
let pid_handle = pid_alloc();
kprintln!("[KERN] task::process::PCB::new(): new ProcessControlBlockInner");
let process = Arc::new(Self {
pid: pid_handle,
inner: unsafe {
@ -103,12 +107,15 @@ impl ProcessControlBlock {
},
});
// create a main thread, we should allocate ustack and trap_cx here
kprintln!("[KERN] task::process::PCB::new(): create a main thread... start");
kprintln!("[KERN] task::process::PCB::new(): create a main thread: new TCB(alloc kstack, utack & trap_cx...) ");
let task = Arc::new(TaskControlBlock::new(
Arc::clone(&process),
ustack_base,
true,
));
// prepare trap_cx of main thread
kprintln!("[KERN] task::process::PCB::new(): create a main thread: set trap_cx(entry_point, ustack_top, k_satp, k_sp, trap_handler) ");
let task_inner = task.inner_exclusive_access();
let trap_cx = task_inner.get_trap_cx();
let ustack_top = task_inner.res.as_ref().unwrap().ustack_top();
@ -121,32 +128,45 @@ impl ProcessControlBlock {
kstack_top,
trap_handler as usize,
);
kprintln!("[KERN] task::process::PCB::new(): create a main thread... done");
// add main thread to the process
kprintln!("[KERN] task::process::PCB::new(): add main thread to the process");
let mut process_inner = process.inner_exclusive_access();
process_inner.tasks.push(Some(Arc::clone(&task)));
drop(process_inner);
kprintln!("[KERN] task::process::PCB::new(): insert <pid, PCB> in PID2PCB BTreeMap");
insert_into_pid2process(process.getpid(), Arc::clone(&process));
// add main thread to scheduler
kprintln!("[KERN] task::process::PCB::new(): add_task(task): add main thread to scheduler");
add_task(task);
kprintln!("[KERN] task::process::PCB::new() end");
process
}
/// Only support processes with a single thread.
pub fn exec(self: &Arc<Self>, elf_data: &[u8], args: Vec<String>) {
kprintln!("[KERN] task::process::PCB::exec() begin");
assert_eq!(self.inner_exclusive_access().thread_count(), 1);
// memory_set with elf program headers/trampoline/trap context/user stack
// memory_set with elf program headers/trampoline/user_stack_base addr/entry_point
kprintln!("[KERN] task::process::PCB::exec(): build MemorySet, trampoline, user_stack_base, entry_point...");
let (memory_set, ustack_base, entry_point) = MemorySet::from_elf(elf_data);
let new_token = memory_set.token();
// substitute memory_set
kprintln!("[KERN] task::process::PCB::exec(): substitute memory_set, ustack_base");
self.inner_exclusive_access().memory_set = memory_set;
// then we alloc user resource for main thread again
// since memory_set has been changed
let task = self.inner_exclusive_access().get_task(0);
let mut task_inner = task.inner_exclusive_access();
task_inner.res.as_mut().unwrap().ustack_base = ustack_base;
kprintln!("[KERN] task::process::PCB::exec(): alloc user resource for this thread");
task_inner.res.as_mut().unwrap().alloc_user_res();
kprintln!("[KERN] task::process::PCB::exec(): set trap_cx_ppn for this thread");
task_inner.trap_cx_ppn = task_inner.res.as_mut().unwrap().trap_cx_ppn();
// push arguments on user stack
kprintln!(
"[KERN] task::process::PCB::exec(): push arguments on user stack for this thread"
);
let mut user_sp = task_inner.res.as_mut().unwrap().ustack_top();
user_sp -= (args.len() + 1) * core::mem::size_of::<usize>();
let argv_base = user_sp;
@ -172,6 +192,7 @@ impl ProcessControlBlock {
// make the user_sp aligned to 8B for k210 platform
user_sp -= user_sp % core::mem::size_of::<usize>();
// initialize trap_cx
kprintln!("[KERN] task::process::PCB::exec(): set trap_cx(entry_point, ustack_top, k_satp, k_sp, trap_handler, argc=x[10], argv=x[11])");
let mut trap_cx = TrapContext::app_init_context(
entry_point,
user_sp,
@ -182,17 +203,22 @@ impl ProcessControlBlock {
trap_cx.x[10] = args.len();
trap_cx.x[11] = argv_base;
*task_inner.get_trap_cx() = trap_cx;
kprintln!("[KERN] task::process::PCB::exec() end");
}
/// Only support processes with a single thread.
pub fn fork(self: &Arc<Self>) -> Arc<Self> {
kprintln!("[KERN] task::process::PCB::fork() begin");
let mut parent = self.inner_exclusive_access();
assert_eq!(parent.thread_count(), 1);
// clone parent's memory_set completely including trampoline/ustacks/trap_cxs
kprintln!("[KERN] task::process::PCB::fork(): clone parent's memory_set for child");
let memory_set = MemorySet::from_existed_user(&parent.memory_set);
// alloc a pid
kprintln!("[KERN] task::process::PCB::fork(): alloc a new pid for child");
let pid = pid_alloc();
// copy fd table
kprintln!("[KERN] task::process::PCB::fork(): copy fd table for child");
let mut new_fd_table: Vec<Option<Arc<dyn File + Send + Sync>>> = Vec::new();
for fd in parent.fd_table.iter() {
if let Some(file) = fd {
@ -202,6 +228,7 @@ impl ProcessControlBlock {
}
}
// create child process pcb
kprintln!("[KERN] task::process::PCB::fork(): new child PCB with new pid, memory_set, fd_table, ...");
let child = Arc::new(Self {
pid,
inner: unsafe {
@ -222,8 +249,10 @@ impl ProcessControlBlock {
},
});
// add child
kprintln!("[KERN] task::process::PCB::fork(): add child link in parent' children Vec");
parent.children.push(Arc::clone(&child));
// create main thread of child process
kprintln!("[KERN] task::process::PCB::fork(): TaskControlBlock::new(): create main thread of child process");
let task = Arc::new(TaskControlBlock::new(
Arc::clone(&child),
parent
@ -238,17 +267,26 @@ impl ProcessControlBlock {
false,
));
// attach task to child process
kprintln!("[KERN] task::process::PCB::fork(): attach child TCB to child PCB");
let mut child_inner = child.inner_exclusive_access();
child_inner.tasks.push(Some(Arc::clone(&task)));
drop(child_inner);
// modify kstack_top in trap_cx of this thread
kprintln!(
"[KERN] task::process::PCB::fork(): modify child's kstack_top in trap_cx of child"
);
let task_inner = task.inner_exclusive_access();
let trap_cx = task_inner.get_trap_cx();
trap_cx.kernel_sp = task.kstack.get_top();
drop(task_inner);
kprintln!(
"[KERN] task::process::PCB::fork(): insert <child pid, child PCB> in PID2PCB BTreeMap"
);
insert_into_pid2process(child.getpid(), Arc::clone(&child));
// add this thread to scheduler
kprintln!("[KERN] task::process::PCB::fork(): add_task(child task): add child thread to scheduler");
add_task(task);
kprintln!("[KERN] task::process::PCB::fork() end");
child
}

@ -30,11 +30,14 @@ impl Processor {
}
lazy_static! {
pub static ref PROCESSOR: UPIntrFreeCell<Processor> =
unsafe { UPIntrFreeCell::new(Processor::new()) };
pub static ref PROCESSOR: UPIntrFreeCell<Processor> = {
kprintln!("[KERN] task::processor::lazy_static!PROCESSOR begin");
unsafe { UPIntrFreeCell::new(Processor::new()) }
};
}
pub fn run_tasks() {
kprintln!("[KERN] task::processor::run_tasks() begin");
loop {
let mut processor = PROCESSOR.exclusive_access();
if let Some(task) = fetch_task() {

@ -52,10 +52,13 @@ impl TaskControlBlock {
ustack_base: usize,
alloc_user_res: bool,
) -> Self {
kprintln!("[KERN] task::task::TaskControlBlock::new() begin");
let res = TaskUserRes::new(Arc::clone(&process), ustack_base, alloc_user_res);
let trap_cx_ppn = res.trap_cx_ppn();
kprintln!("[KERN] task::task::TaskControlBlock::new(): alloc kernel stack for TCB");
let kstack = kstack_alloc();
let kstack_top = kstack.get_top();
kprintln!("[KERN] task::task::TaskControlBlock::new() end");
Self {
process: Arc::downgrade(&process),
kstack,

@ -21,7 +21,9 @@ pub fn get_time_ms() -> usize {
}
pub fn set_next_trigger() {
//kprintln!("[KERN] timer::set_next_trigger() begin");
set_timer(get_time() + CLOCK_FREQ / TICKS_PER_SEC);
//kprintln!("[KERN] timer::set_next_trigger() end");
}
pub struct TimerCondVar {
@ -50,8 +52,10 @@ impl Ord for TimerCondVar {
}
lazy_static! {
static ref TIMERS: UPIntrFreeCell<BinaryHeap<TimerCondVar>> =
unsafe { UPIntrFreeCell::new(BinaryHeap::<TimerCondVar>::new()) };
static ref TIMERS: UPIntrFreeCell<BinaryHeap<TimerCondVar>> = {
kprintln!("[KERN] timer::lazy_static!TIMERS begin");
unsafe { UPIntrFreeCell::new(BinaryHeap::<TimerCondVar>::new()) }
};
}
pub fn add_timer(expire_ms: usize, task: Arc<TaskControlBlock>) {

@ -22,6 +22,7 @@ impl TrapContext {
kernel_sp: usize,
trap_handler: usize,
) -> Self {
kprintln!("[KERN] trap::context::TrapContext::app_init_context begin");
let mut sstatus = sstatus::read();
// set CPU privilege to User after trapping back
sstatus.set_spp(SPP::User);
@ -34,6 +35,7 @@ impl TrapContext {
trap_handler,
};
cx.set_sp(sp);
kprintln!("[KERN] trap::context::TrapContext::app_init_context end");
cx
}
}

@ -17,7 +17,9 @@ use riscv::register::{
global_asm!(include_str!("trap.S"));
pub fn init() {
kprintln!("[KERN] trap::init() begin");
set_kernel_trap_entry();
kprintln!("[KERN] trap::init() end");
}
fn set_kernel_trap_entry() {
@ -39,9 +41,11 @@ fn set_user_trap_entry() {
}
pub fn enable_timer_interrupt() {
kprintln!("[KERN] trap::enable_timer_interrupt() begin");
unsafe {
sie::set_stimer();
}
kprintln!("[KERN] trap::enable_timer_interrupt() end");
}
fn enable_supervisor_interrupt() {

@ -7,16 +7,18 @@ extern crate user_lib;
extern crate alloc;
extern crate core;
use user_lib::{thread_create, waittid, exit, sleep};
use alloc::vec::Vec;
use core::sync::atomic::{AtomicUsize, Ordering};
use user_lib::{exit, sleep, thread_create, waittid};
const N: usize = 2;
const THREAD_NUM: usize = 10;
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
enum FlagState {
Out, Want, In,
Out,
Want,
In,
}
static mut TURN: usize = 0;
@ -25,7 +27,7 @@ static mut FLAG: [FlagState; THREAD_NUM] = [FlagState::Out; THREAD_NUM];
static GUARD: AtomicUsize = AtomicUsize::new(0);
fn critical_test_enter() {
assert_eq!(GUARD.fetch_add(1, Ordering::SeqCst), 0);
assert_eq!(GUARD.fetch_add(1, Ordering::SeqCst), 0);
}
fn critical_test_claim() {
@ -33,7 +35,7 @@ fn critical_test_claim() {
}
fn critical_test_exit() {
assert_eq!(GUARD.fetch_sub(1, Ordering::SeqCst), 1);
assert_eq!(GUARD.fetch_sub(1, Ordering::SeqCst), 1);
}
fn eisenberg_enter_critical(id: usize) {
@ -43,7 +45,7 @@ fn eisenberg_enter_critical(id: usize) {
vstore!(&FLAG[id], FlagState::Want);
loop {
/* check if any with higher priority is `Want` or `In` */
let mut prior_thread:Option<usize> = None;
let mut prior_thread: Option<usize> = None;
let turn = vload!(&TURN);
let ring_id = if id < turn { id + THREAD_NUM } else { id };
// FLAG.iter() may lead to some errors, use for-loop instead
@ -56,8 +58,11 @@ fn eisenberg_enter_critical(id: usize) {
if prior_thread.is_none() {
break;
}
println!("Thread[{}]: prior thread {} exist, sleep and retry",
id, prior_thread.unwrap());
println!(
"Thread[{}]: prior thread {} exist, sleep and retry",
id,
prior_thread.unwrap()
);
sleep(1);
}
/* now tentatively claim the resource */
@ -86,7 +91,7 @@ fn eisenberg_exit_critical(id: usize) {
/* find next one who wants to enter and give the turn to it*/
let mut next = id;
let ring_id = id + THREAD_NUM;
for i in (id+1)..ring_id {
for i in (id + 1)..ring_id {
let idx = i % THREAD_NUM;
if vload!(&FLAG[idx]) == FlagState::Want {
next = idx;
@ -119,7 +124,7 @@ pub fn main() -> i32 {
let mut v = Vec::new();
// TODO: really shuffle
assert_eq!(THREAD_NUM, 10);
let shuffle:[usize; 10] = [0, 7, 4, 6, 2, 9, 8, 1, 3, 5];
let shuffle: [usize; 10] = [0, 7, 4, 6, 2, 9, 8, 1, 3, 5];
for i in 0..THREAD_NUM {
v.push(thread_create(thread_fn as usize, shuffle[i]));
}
@ -130,4 +135,4 @@ pub fn main() -> i32 {
}
println!("main thread exited.");
0
}
}

@ -4,7 +4,7 @@
#[macro_use]
extern crate user_lib;
use user_lib::{exit, fork, getpid, sleep, yield_, wait};
use user_lib::{exit, fork, getpid, sleep, wait, yield_};
const DEPTH: usize = 4;
@ -28,7 +28,7 @@ fn fork_tree(cur: &str) {
fork_child(cur, '0');
fork_child(cur, '1');
let mut exit_code: i32 = 0;
for _ in 0..2{
for _ in 0..2 {
wait(&mut exit_code);
}
}
@ -37,7 +37,7 @@ fn fork_tree(cur: &str) {
pub fn main() -> i32 {
fork_tree("");
let mut exit_code: i32 = 0;
for _ in 0..2{
for _ in 0..2 {
wait(&mut exit_code);
}
sleep(3000);

@ -8,9 +8,9 @@ extern crate user_lib;
extern crate alloc;
extern crate core;
use user_lib::{thread_create, waittid, exit, sleep};
use core::sync::atomic::{AtomicUsize, Ordering};
use alloc::vec::Vec;
use core::sync::atomic::{AtomicUsize, Ordering};
use user_lib::{exit, sleep, thread_create, waittid};
const N: usize = 3;
static mut TURN: usize = 0;
@ -26,7 +26,7 @@ fn critical_test_claim() {
}
fn critical_test_exit() {
assert_eq!(GUARD.fetch_sub(1, Ordering::SeqCst), 1);
assert_eq!(GUARD.fetch_sub(1, Ordering::SeqCst), 1);
}
fn peterson_enter_critical(id: usize, peer_id: usize) {
@ -75,4 +75,4 @@ pub fn main() -> i32 {
}
println!("main thread exited.");
0
}
}

@ -4,7 +4,7 @@
#[macro_use]
extern crate user_lib;
// not in SUCC_TESTS & FAIL_TESTS
// not in SUCC_TESTS & FAIL_TESTS
// count_lines, infloop, user_shell, usertests
// item of TESTS : app_name(argv_0), argv_1, argv_2, argv_3, exit_code
@ -115,7 +115,11 @@ pub fn main() -> i32 {
let succ_num = run_tests(SUCC_TESTS);
let err_num = run_tests(FAIL_TESTS);
if succ_num == SUCC_TESTS.len() as i32 && err_num == FAIL_TESTS.len() as i32 {
println!("{} of sueecssed apps, {} of failed apps run correctly. \nUsertests passed!", SUCC_TESTS.len(), FAIL_TESTS.len() );
println!(
"{} of sueecssed apps, {} of failed apps run correctly. \nUsertests passed!",
SUCC_TESTS.len(),
FAIL_TESTS.len()
);
return 0;
}
if succ_num != SUCC_TESTS.len() as i32 {

@ -118,13 +118,17 @@ pub fn wait(exit_code: &mut i32) -> isize {
}
pub fn waitpid(pid: usize, exit_code: &mut i32) -> isize {
println!("[USER] lib::waitpid() begin");
loop {
match sys_waitpid(pid as isize, exit_code as *mut _) {
-2 => {
yield_();
}
// -1 or a real pid
exit_pid => return exit_pid,
exit_pid => {
println!("[USER] lib::waitpid() end: exit_pid {}", exit_pid);
return exit_pid;
}
}
}
}
@ -201,14 +205,14 @@ pub fn condvar_wait(condvar_id: usize, mutex_id: usize) {
#[macro_export]
macro_rules! vstore {
($var_ref: expr, $value: expr) => {
($var_ref: expr, $value: expr) => {
unsafe { core::intrinsics::volatile_store($var_ref as *const _ as _, $value) }
};
}
#[macro_export]
macro_rules! vload {
($var_ref: expr) => {
($var_ref: expr) => {
unsafe { core::intrinsics::volatile_load($var_ref as *const _ as _) }
};
}
@ -218,4 +222,4 @@ macro_rules! memory_fence {
() => {
core::sync::atomic::fence(core::sync::atomic::Ordering::SeqCst)
};
}
}

Loading…
Cancel
Save