Small Fix && cargo fmt

2022spring
Yifan Wu 3 years ago
parent 740730e7f7
commit 1c55663478

@ -1,12 +1,9 @@
use easy_fs::{
BlockDevice,
EasyFileSystem,
};
use std::fs::{File, OpenOptions, read_dir};
use std::io::{Read, Write, Seek, SeekFrom};
use std::sync::Mutex;
use clap::{App, Arg};
use easy_fs::{BlockDevice, EasyFileSystem};
use std::fs::{read_dir, File, OpenOptions};
use std::io::{Read, Seek, SeekFrom, Write};
use std::sync::Arc;
use clap::{Arg, App};
use std::sync::Mutex;
const BLOCK_SZ: usize = 512;
@ -34,17 +31,19 @@ fn main() {
fn easy_fs_pack() -> std::io::Result<()> {
let matches = App::new("EasyFileSystem packer")
.arg(Arg::with_name("source")
.short("s")
.long("source")
.takes_value(true)
.help("Executable source dir(with backslash)")
.arg(
Arg::with_name("source")
.short("s")
.long("source")
.takes_value(true)
.help("Executable source dir(with backslash)"),
)
.arg(Arg::with_name("target")
.short("t")
.long("target")
.takes_value(true)
.help("Executable target dir(with backslash)")
.arg(
Arg::with_name("target")
.short("t")
.long("target")
.takes_value(true)
.help("Executable target dir(with backslash)"),
)
.get_matches();
let src_path = matches.value_of("source").unwrap();
@ -60,11 +59,7 @@ fn easy_fs_pack() -> std::io::Result<()> {
f
})));
// 16MiB, at most 4095 files
let efs = EasyFileSystem::create(
block_file,
16 * 2048,
1,
);
let efs = EasyFileSystem::create(block_file, 16 * 2048, 1);
let root_inode = Arc::new(EasyFileSystem::root_inode(&efs));
let apps: Vec<_> = read_dir(src_path)
.unwrap()
@ -103,11 +98,7 @@ fn efs_test() -> std::io::Result<()> {
f.set_len(8192 * 512).unwrap();
f
})));
EasyFileSystem::create(
block_file.clone(),
4096,
1,
);
EasyFileSystem::create(block_file.clone(), 4096, 1);
let efs = EasyFileSystem::open(block_file.clone());
let root_inode = EasyFileSystem::root_inode(&efs);
root_inode.create("filea");
@ -121,17 +112,11 @@ fn efs_test() -> std::io::Result<()> {
//let mut buffer = [0u8; 512];
let mut buffer = [0u8; 233];
let len = filea.read_at(0, &mut buffer);
assert_eq!(
greet_str,
core::str::from_utf8(&buffer[..len]).unwrap(),
);
assert_eq!(greet_str, core::str::from_utf8(&buffer[..len]).unwrap(),);
let mut random_str_test = |len: usize| {
filea.clear();
assert_eq!(
filea.read_at(0, &mut buffer),
0,
);
assert_eq!(filea.read_at(0, &mut buffer), 0,);
let mut str = String::new();
use rand;
// random digit
@ -148,9 +133,7 @@ fn efs_test() -> std::io::Result<()> {
break;
}
offset += len;
read_str.push_str(
core::str::from_utf8(&read_buffer[..len]).unwrap()
);
read_str.push_str(core::str::from_utf8(&read_buffer[..len]).unwrap());
}
assert_eq!(str, read_str);
};

@ -1,9 +1,5 @@
use super::{get_block_cache, BlockDevice, BLOCK_SZ};
use alloc::sync::Arc;
use super::{
BlockDevice,
BLOCK_SZ,
get_block_cache,
};
type BitmapBlock = [u64; 64];
@ -34,14 +30,15 @@ impl Bitmap {
let pos = get_block_cache(
block_id + self.start_block_id as usize,
Arc::clone(block_device),
).lock().modify(0, |bitmap_block: &mut BitmapBlock| {
)
.lock()
.modify(0, |bitmap_block: &mut BitmapBlock| {
if let Some((bits64_pos, inner_pos)) = bitmap_block
.iter()
.enumerate()
.find(|(_, bits64)| **bits64 != u64::MAX)
.map(|(bits64_pos, bits64)| {
(bits64_pos, bits64.trailing_ones() as usize)
}) {
.map(|(bits64_pos, bits64)| (bits64_pos, bits64.trailing_ones() as usize))
{
// modify cache
bitmap_block[bits64_pos] |= 1u64 << inner_pos;
Some(block_id * BLOCK_BITS + bits64_pos * 64 + inner_pos as usize)
@ -58,13 +55,12 @@ impl Bitmap {
pub fn dealloc(&self, block_device: &Arc<dyn BlockDevice>, bit: usize) {
let (block_pos, bits64_pos, inner_pos) = decomposition(bit);
get_block_cache(
block_pos + self.start_block_id,
Arc::clone(block_device)
).lock().modify(0, |bitmap_block: &mut BitmapBlock| {
assert!(bitmap_block[bits64_pos] & (1u64 << inner_pos) > 0);
bitmap_block[bits64_pos] -= 1u64 << inner_pos;
});
get_block_cache(block_pos + self.start_block_id, Arc::clone(block_device))
.lock()
.modify(0, |bitmap_block: &mut BitmapBlock| {
assert!(bitmap_block[bits64_pos] & (1u64 << inner_pos) > 0);
bitmap_block[bits64_pos] -= 1u64 << inner_pos;
});
}
pub fn maximum(&self) -> usize {

@ -1,7 +1,4 @@
use super::{
BLOCK_SZ,
BlockDevice,
};
use super::{BlockDevice, BLOCK_SZ};
use alloc::collections::VecDeque;
use alloc::sync::Arc;
use lazy_static::*;
@ -16,10 +13,7 @@ pub struct BlockCache {
impl BlockCache {
/// Load a new BlockCache from disk.
pub fn new(
block_id: usize,
block_device: Arc<dyn BlockDevice>
) -> Self {
pub fn new(block_id: usize, block_device: Arc<dyn BlockDevice>) -> Self {
let mut cache = [0u8; BLOCK_SZ];
block_device.read_block(block_id, &mut cache);
Self {
@ -34,14 +28,20 @@ impl BlockCache {
&self.cache[offset] as *const _ as usize
}
pub fn get_ref<T>(&self, offset: usize) -> &T where T: Sized {
pub fn get_ref<T>(&self, offset: usize) -> &T
where
T: Sized,
{
let type_size = core::mem::size_of::<T>();
assert!(offset + type_size <= BLOCK_SZ);
let addr = self.addr_of_offset(offset);
unsafe { &*(addr as *const T) }
unsafe { &*(addr as *const T) }
}
pub fn get_mut<T>(&mut self, offset: usize) -> &mut T where T: Sized {
pub fn get_mut<T>(&mut self, offset: usize) -> &mut T
where
T: Sized,
{
let type_size = core::mem::size_of::<T>();
assert!(offset + type_size <= BLOCK_SZ);
self.modified = true;
@ -53,7 +53,7 @@ impl BlockCache {
f(self.get_ref(offset))
}
pub fn modify<T, V>(&mut self, offset:usize, f: impl FnOnce(&mut T) -> V) -> V {
pub fn modify<T, V>(&mut self, offset: usize, f: impl FnOnce(&mut T) -> V) -> V {
f(self.get_mut(offset))
}
@ -79,7 +79,9 @@ pub struct BlockCacheManager {
impl BlockCacheManager {
pub fn new() -> Self {
Self { queue: VecDeque::new() }
Self {
queue: VecDeque::new(),
}
}
pub fn get_block_cache(
@ -87,27 +89,28 @@ impl BlockCacheManager {
block_id: usize,
block_device: Arc<dyn BlockDevice>,
) -> Arc<Mutex<BlockCache>> {
if let Some(pair) = self.queue
.iter()
.find(|pair| pair.0 == block_id) {
Arc::clone(&pair.1)
if let Some(pair) = self.queue.iter().find(|pair| pair.0 == block_id) {
Arc::clone(&pair.1)
} else {
// substitute
if self.queue.len() == BLOCK_CACHE_SIZE {
// from front to tail
if let Some((idx, _)) = self.queue
if let Some((idx, _)) = self
.queue
.iter()
.enumerate()
.find(|(_, pair)| Arc::strong_count(&pair.1) == 1) {
.find(|(_, pair)| Arc::strong_count(&pair.1) == 1)
{
self.queue.drain(idx..=idx);
} else {
panic!("Run out of BlockCache!");
}
}
// load block into mem and push back
let block_cache = Arc::new(Mutex::new(
BlockCache::new(block_id, Arc::clone(&block_device))
));
let block_cache = Arc::new(Mutex::new(BlockCache::new(
block_id,
Arc::clone(&block_device),
)));
self.queue.push_back((block_id, Arc::clone(&block_cache)));
block_cache
}
@ -115,16 +118,17 @@ impl BlockCacheManager {
}
lazy_static! {
pub static ref BLOCK_CACHE_MANAGER: Mutex<BlockCacheManager> = Mutex::new(
BlockCacheManager::new()
);
pub static ref BLOCK_CACHE_MANAGER: Mutex<BlockCacheManager> =
Mutex::new(BlockCacheManager::new());
}
pub fn get_block_cache(
block_id: usize,
block_device: Arc<dyn BlockDevice>
block_device: Arc<dyn BlockDevice>,
) -> Arc<Mutex<BlockCache>> {
BLOCK_CACHE_MANAGER.lock().get_block_cache(block_id, block_device)
BLOCK_CACHE_MANAGER
.lock()
.get_block_cache(block_id, block_device)
}
pub fn block_cache_sync_all() {

@ -1,6 +1,6 @@
use core::any::Any;
pub trait BlockDevice : Send + Sync + Any {
pub trait BlockDevice: Send + Sync + Any {
fn read_block(&self, block_id: usize, buf: &mut [u8]);
fn write_block(&self, block_id: usize, buf: &[u8]);
}

@ -1,16 +1,10 @@
use alloc::sync::Arc;
use spin::Mutex;
use super::{
BlockDevice,
Bitmap,
block_cache_sync_all, get_block_cache, Bitmap, BlockDevice, DiskInode, DiskInodeType, Inode,
SuperBlock,
DiskInode,
DiskInodeType,
Inode,
get_block_cache,
block_cache_sync_all,
};
use crate::BLOCK_SZ;
use alloc::sync::Arc;
use spin::Mutex;
pub struct EasyFileSystem {
pub block_device: Arc<dyn BlockDevice>,
@ -50,39 +44,36 @@ impl EasyFileSystem {
};
// clear all blocks
for i in 0..total_blocks {
get_block_cache(
i as usize,
Arc::clone(&block_device)
)
.lock()
.modify(0, |data_block: &mut DataBlock| {
for byte in data_block.iter_mut() { *byte = 0; }
});
get_block_cache(i as usize, Arc::clone(&block_device))
.lock()
.modify(0, |data_block: &mut DataBlock| {
for byte in data_block.iter_mut() {
*byte = 0;
}
});
}
// initialize SuperBlock
get_block_cache(0, Arc::clone(&block_device))
.lock()
.modify(0, |super_block: &mut SuperBlock| {
super_block.initialize(
total_blocks,
inode_bitmap_blocks,
inode_area_blocks,
data_bitmap_blocks,
data_area_blocks,
);
});
get_block_cache(0, Arc::clone(&block_device)).lock().modify(
0,
|super_block: &mut SuperBlock| {
super_block.initialize(
total_blocks,
inode_bitmap_blocks,
inode_area_blocks,
data_bitmap_blocks,
data_area_blocks,
);
},
);
// write back immediately
// create a inode for root node "/"
assert_eq!(efs.alloc_inode(), 0);
let (root_inode_block_id, root_inode_offset) = efs.get_disk_inode_pos(0);
get_block_cache(
root_inode_block_id as usize,
Arc::clone(&block_device)
)
.lock()
.modify(root_inode_offset, |disk_inode: &mut DiskInode| {
disk_inode.initialize(DiskInodeType::Directory);
});
get_block_cache(root_inode_block_id as usize, Arc::clone(&block_device))
.lock()
.modify(root_inode_offset, |disk_inode: &mut DiskInode| {
disk_inode.initialize(DiskInodeType::Directory);
});
block_cache_sync_all();
Arc::new(Mutex::new(efs))
}
@ -97,10 +88,7 @@ impl EasyFileSystem {
super_block.inode_bitmap_blocks + super_block.inode_area_blocks;
let efs = Self {
block_device,
inode_bitmap: Bitmap::new(
1,
super_block.inode_bitmap_blocks as usize
),
inode_bitmap: Bitmap::new(1, super_block.inode_bitmap_blocks as usize),
data_bitmap: Bitmap::new(
(1 + inode_total_blocks) as usize,
super_block.data_bitmap_blocks as usize,
@ -117,19 +105,17 @@ impl EasyFileSystem {
// acquire efs lock temporarily
let (block_id, block_offset) = efs.lock().get_disk_inode_pos(0);
// release efs lock
Inode::new(
block_id,
block_offset,
Arc::clone(efs),
block_device,
)
Inode::new(block_id, block_offset, Arc::clone(efs), block_device)
}
pub fn get_disk_inode_pos(&self, inode_id: u32) -> (u32, usize) {
let inode_size = core::mem::size_of::<DiskInode>();
let inodes_per_block = (BLOCK_SZ / inode_size) as u32;
let block_id = self.inode_area_start_block + inode_id / inodes_per_block;
(block_id, (inode_id % inodes_per_block) as usize * inode_size)
(
block_id,
(inode_id % inodes_per_block) as usize * inode_size,
)
}
pub fn get_data_block_id(&self, data_block_id: u32) -> u32 {
@ -146,18 +132,16 @@ impl EasyFileSystem {
}
pub fn dealloc_data(&mut self, block_id: u32) {
get_block_cache(
block_id as usize,
Arc::clone(&self.block_device)
)
.lock()
.modify(0, |data_block: &mut DataBlock| {
data_block.iter_mut().for_each(|p| { *p = 0; })
});
get_block_cache(block_id as usize, Arc::clone(&self.block_device))
.lock()
.modify(0, |data_block: &mut DataBlock| {
data_block.iter_mut().for_each(|p| {
*p = 0;
})
});
self.data_bitmap.dealloc(
&self.block_device,
(block_id - self.data_area_start_block) as usize
(block_id - self.data_area_start_block) as usize,
)
}
}
}

@ -1,11 +1,7 @@
use core::fmt::{Debug, Formatter, Result};
use super::{
BLOCK_SZ,
BlockDevice,
get_block_cache,
};
use super::{get_block_cache, BlockDevice, BLOCK_SZ};
use alloc::sync::Arc;
use alloc::vec::Vec;
use core::fmt::{Debug, Formatter, Result};
const EFS_MAGIC: u32 = 0x3b800001;
const INODE_DIRECT_COUNT: usize = 28;
@ -115,7 +111,8 @@ impl DiskInode {
if data_blocks > INDIRECT1_BOUND {
total += 1;
// sub indirect1
total += (data_blocks - INDIRECT1_BOUND + INODE_INDIRECT1_COUNT - 1) / INODE_INDIRECT1_COUNT;
total +=
(data_blocks - INDIRECT1_BOUND + INODE_INDIRECT1_COUNT - 1) / INODE_INDIRECT1_COUNT;
}
total as u32
}
@ -135,22 +132,16 @@ impl DiskInode {
})
} else {
let last = inner_id - INDIRECT1_BOUND;
let indirect1 = get_block_cache(
self.indirect2 as usize,
Arc::clone(block_device)
)
.lock()
.read(0, |indirect2: &IndirectBlock| {
indirect2[last / INODE_INDIRECT1_COUNT]
});
get_block_cache(
indirect1 as usize,
Arc::clone(block_device)
)
.lock()
.read(0, |indirect1: &IndirectBlock| {
indirect1[last % INODE_INDIRECT1_COUNT]
})
let indirect1 = get_block_cache(self.indirect2 as usize, Arc::clone(block_device))
.lock()
.read(0, |indirect2: &IndirectBlock| {
indirect2[last / INODE_INDIRECT1_COUNT]
});
get_block_cache(indirect1 as usize, Arc::clone(block_device))
.lock()
.read(0, |indirect1: &IndirectBlock| {
indirect1[last % INODE_INDIRECT1_COUNT]
})
}
}
pub fn increase_size(
@ -169,7 +160,7 @@ impl DiskInode {
current_blocks += 1;
}
// alloc indirect1
if total_blocks > INODE_DIRECT_COUNT as u32{
if total_blocks > INODE_DIRECT_COUNT as u32 {
if current_blocks == INODE_DIRECT_COUNT as u32 {
self.indirect1 = new_blocks.next().unwrap();
}
@ -179,17 +170,14 @@ impl DiskInode {
return;
}
// fill indirect1
get_block_cache(
self.indirect1 as usize,
Arc::clone(block_device)
)
.lock()
.modify(0, |indirect1: &mut IndirectBlock| {
while current_blocks < total_blocks.min(INODE_INDIRECT1_COUNT as u32) {
indirect1[current_blocks as usize] = new_blocks.next().unwrap();
current_blocks += 1;
}
});
get_block_cache(self.indirect1 as usize, Arc::clone(block_device))
.lock()
.modify(0, |indirect1: &mut IndirectBlock| {
while current_blocks < total_blocks.min(INODE_INDIRECT1_COUNT as u32) {
indirect1[current_blocks as usize] = new_blocks.next().unwrap();
current_blocks += 1;
}
});
// alloc indirect2
if total_blocks > INODE_INDIRECT1_COUNT as u32 {
if current_blocks == INODE_INDIRECT1_COUNT as u32 {
@ -206,33 +194,27 @@ impl DiskInode {
let a1 = total_blocks as usize / INODE_INDIRECT1_COUNT;
let b1 = total_blocks as usize % INODE_INDIRECT1_COUNT;
// alloc low-level indirect1
get_block_cache(
self.indirect2 as usize,
Arc::clone(block_device)
)
.lock()
.modify(0, |indirect2: &mut IndirectBlock| {
while (a0 < a1) || (a0 == a1 && b0 < b1) {
if b0 == 0 {
indirect2[a0] = new_blocks.next().unwrap();
}
// fill current
get_block_cache(
indirect2[a0] as usize,
Arc::clone(block_device)
)
.lock()
.modify(0, |indirect1: &mut IndirectBlock| {
indirect1[b0] = new_blocks.next().unwrap();
});
// move to next
b0 += 1;
if b0 == INODE_INDIRECT1_COUNT {
b0 = 0;
a0 += 1;
get_block_cache(self.indirect2 as usize, Arc::clone(block_device))
.lock()
.modify(0, |indirect2: &mut IndirectBlock| {
while (a0 < a1) || (a0 == a1 && b0 < b1) {
if b0 == 0 {
indirect2[a0] = new_blocks.next().unwrap();
}
// fill current
get_block_cache(indirect2[a0] as usize, Arc::clone(block_device))
.lock()
.modify(0, |indirect1: &mut IndirectBlock| {
indirect1[b0] = new_blocks.next().unwrap();
});
// move to next
b0 += 1;
if b0 == INODE_INDIRECT1_COUNT {
b0 = 0;
a0 += 1;
}
}
}
});
});
}
/// Clear size to zero and return blocks that should be deallocated.
@ -258,18 +240,15 @@ impl DiskInode {
return v;
}
// indirect1
get_block_cache(
self.indirect1 as usize,
Arc::clone(block_device),
)
.lock()
.modify(0, |indirect1: &mut IndirectBlock| {
while current_blocks < data_blocks.min(INODE_INDIRECT1_COUNT) {
v.push(indirect1[current_blocks]);
//indirect1[current_blocks] = 0;
current_blocks += 1;
}
});
get_block_cache(self.indirect1 as usize, Arc::clone(block_device))
.lock()
.modify(0, |indirect1: &mut IndirectBlock| {
while current_blocks < data_blocks.min(INODE_INDIRECT1_COUNT) {
v.push(indirect1[current_blocks]);
//indirect1[current_blocks] = 0;
current_blocks += 1;
}
});
self.indirect1 = 0;
// indirect2 block
if data_blocks > INODE_INDIRECT1_COUNT {
@ -282,36 +261,33 @@ impl DiskInode {
assert!(data_blocks <= INODE_INDIRECT2_COUNT);
let a1 = data_blocks / INODE_INDIRECT1_COUNT;
let b1 = data_blocks % INODE_INDIRECT1_COUNT;
get_block_cache(
self.indirect2 as usize,
Arc::clone(block_device),
)
.lock()
.modify(0, |indirect2: &mut IndirectBlock| {
// full indirect1 blocks
for entry in indirect2.iter_mut().take(a1) {
v.push(*entry);
get_block_cache(*entry as usize, Arc::clone(block_device))
.lock()
.modify(0, |indirect1: &mut IndirectBlock| {
for entry in indirect1.iter() {
v.push(*entry);
}
});
}
// last indirect1 block
if b1 > 0 {
v.push(indirect2[a1]);
get_block_cache(indirect2[a1] as usize, Arc::clone(block_device))
.lock()
.modify(0, |indirect1: &mut IndirectBlock| {
for entry in indirect1.iter().take(b1) {
v.push(*entry);
}
});
//indirect2[a1] = 0;
}
});
get_block_cache(self.indirect2 as usize, Arc::clone(block_device))
.lock()
.modify(0, |indirect2: &mut IndirectBlock| {
// full indirect1 blocks
for entry in indirect2.iter_mut().take(a1) {
v.push(*entry);
get_block_cache(*entry as usize, Arc::clone(block_device))
.lock()
.modify(0, |indirect1: &mut IndirectBlock| {
for entry in indirect1.iter() {
v.push(*entry);
}
});
}
// last indirect1 block
if b1 > 0 {
v.push(indirect2[a1]);
get_block_cache(indirect2[a1] as usize, Arc::clone(block_device))
.lock()
.modify(0, |indirect1: &mut IndirectBlock| {
for entry in indirect1.iter().take(b1) {
v.push(*entry);
}
});
//indirect2[a1] = 0;
}
});
self.indirect2 = 0;
v
}
@ -346,7 +322,9 @@ impl DiskInode {
});
read_size += block_read_size;
// move to next block
if end_current_block == end { break; }
if end_current_block == end {
break;
}
start_block += 1;
start = end_current_block;
}
@ -372,7 +350,7 @@ impl DiskInode {
let block_write_size = end_current_block - start;
get_block_cache(
self.get_block_id(start_block as u32, block_device) as usize,
Arc::clone(block_device)
Arc::clone(block_device),
)
.lock()
.modify(0, |data_block: &mut DataBlock| {
@ -382,7 +360,9 @@ impl DiskInode {
});
write_size += block_write_size;
// move to next block
if end_current_block == end { break; }
if end_current_block == end {
break;
}
start_block += 1;
start = end_current_block;
}
@ -414,20 +394,10 @@ impl DirEntry {
}
}
pub fn as_bytes(&self) -> &[u8] {
unsafe {
core::slice::from_raw_parts(
self as *const _ as usize as *const u8,
DIRENT_SZ,
)
}
unsafe { core::slice::from_raw_parts(self as *const _ as usize as *const u8, DIRENT_SZ) }
}
pub fn as_bytes_mut(&mut self) -> &mut [u8] {
unsafe {
core::slice::from_raw_parts_mut(
self as *mut _ as usize as *mut u8,
DIRENT_SZ,
)
}
unsafe { core::slice::from_raw_parts_mut(self as *mut _ as usize as *mut u8, DIRENT_SZ) }
}
pub fn name(&self) -> &str {
let len = (0usize..).find(|i| self.name[*i] == 0).unwrap();

@ -2,17 +2,17 @@
extern crate alloc;
mod bitmap;
mod block_cache;
mod block_dev;
mod layout;
mod efs;
mod bitmap;
mod layout;
mod vfs;
mod block_cache;
pub const BLOCK_SZ: usize = 512;
use bitmap::Bitmap;
use block_cache::{block_cache_sync_all, get_block_cache};
pub use block_dev::BlockDevice;
pub use efs::EasyFileSystem;
pub use vfs::Inode;
use layout::*;
use bitmap::Bitmap;
use block_cache::{get_block_cache, block_cache_sync_all};
pub use vfs::Inode;

@ -1,15 +1,9 @@
use super::{
BlockDevice,
DiskInode,
DiskInodeType,
DirEntry,
EasyFileSystem,
DIRENT_SZ,
get_block_cache,
block_cache_sync_all,
block_cache_sync_all, get_block_cache, BlockDevice, DirEntry, DiskInode, DiskInodeType,
EasyFileSystem, DIRENT_SZ,
};
use alloc::sync::Arc;
use alloc::string::String;
use alloc::sync::Arc;
use alloc::vec::Vec;
use spin::{Mutex, MutexGuard};
@ -37,35 +31,25 @@ impl Inode {
}
fn read_disk_inode<V>(&self, f: impl FnOnce(&DiskInode) -> V) -> V {
get_block_cache(
self.block_id,
Arc::clone(&self.block_device)
).lock().read(self.block_offset, f)
get_block_cache(self.block_id, Arc::clone(&self.block_device))
.lock()
.read(self.block_offset, f)
}
fn modify_disk_inode<V>(&self, f: impl FnOnce(&mut DiskInode) -> V) -> V {
get_block_cache(
self.block_id,
Arc::clone(&self.block_device)
).lock().modify(self.block_offset, f)
get_block_cache(self.block_id, Arc::clone(&self.block_device))
.lock()
.modify(self.block_offset, f)
}
fn find_inode_id(
&self,
name: &str,
disk_inode: &DiskInode,
) -> Option<u32> {
fn find_inode_id(&self, name: &str, disk_inode: &DiskInode) -> Option<u32> {
// assert it is a directory
assert!(disk_inode.is_dir());
let file_count = (disk_inode.size as usize) / DIRENT_SZ;
let mut dirent = DirEntry::empty();
for i in 0..file_count {
assert_eq!(
disk_inode.read_at(
DIRENT_SZ * i,
dirent.as_bytes_mut(),
&self.block_device,
),
disk_inode.read_at(DIRENT_SZ * i, dirent.as_bytes_mut(), &self.block_device,),
DIRENT_SZ,
);
if dirent.name() == name {
@ -78,8 +62,7 @@ impl Inode {
pub fn find(&self, name: &str) -> Option<Arc<Inode>> {
let fs = self.fs.lock();
self.read_disk_inode(|disk_inode| {
self.find_inode_id(name, disk_inode)
.map(|inode_id| {
self.find_inode_id(name, disk_inode).map(|inode_id| {
let (block_id, block_offset) = fs.get_disk_inode_pos(inode_id);
Arc::new(Self::new(
block_id,
@ -123,14 +106,12 @@ impl Inode {
// alloc a inode with an indirect block
let new_inode_id = fs.alloc_inode();
// initialize inode
let (new_inode_block_id, new_inode_block_offset)
= fs.get_disk_inode_pos(new_inode_id);
get_block_cache(
new_inode_block_id as usize,
Arc::clone(&self.block_device)
).lock().modify(new_inode_block_offset, |new_inode: &mut DiskInode| {
new_inode.initialize(DiskInodeType::File);
});
let (new_inode_block_id, new_inode_block_offset) = fs.get_disk_inode_pos(new_inode_id);
get_block_cache(new_inode_block_id as usize, Arc::clone(&self.block_device))
.lock()
.modify(new_inode_block_offset, |new_inode: &mut DiskInode| {
new_inode.initialize(DiskInodeType::File);
});
self.modify_disk_inode(|root_inode| {
// append file in the dirent
let file_count = (root_inode.size as usize) / DIRENT_SZ;
@ -166,11 +147,7 @@ impl Inode {
for i in 0..file_count {
let mut dirent = DirEntry::empty();
assert_eq!(
disk_inode.read_at(
i * DIRENT_SZ,
dirent.as_bytes_mut(),
&self.block_device,
),
disk_inode.read_at(i * DIRENT_SZ, dirent.as_bytes_mut(), &self.block_device,),
DIRENT_SZ,
);
v.push(String::from(dirent.name()));
@ -181,9 +158,7 @@ impl Inode {
pub fn read_at(&self, offset: usize, buf: &mut [u8]) -> usize {
let _fs = self.fs.lock();
self.read_disk_inode(|disk_inode| {
disk_inode.read_at(offset, buf, &self.block_device)
})
self.read_disk_inode(|disk_inode| disk_inode.read_at(offset, buf, &self.block_device))
}
pub fn write_at(&self, offset: usize, buf: &[u8]) -> usize {

@ -17,26 +17,24 @@ pub const CLOCK_FREQ: usize = 403000000 / 62;
pub const CLOCK_FREQ: usize = 12500000;
#[cfg(feature = "board_qemu")]
pub const MMIO: &[(usize, usize)] = &[
(0x10001000, 0x1000),
];
pub const MMIO: &[(usize, usize)] = &[(0x10001000, 0x1000)];
#[cfg(feature = "board_k210")]
pub const MMIO: &[(usize, usize)] = &[
// we don't need clint in S priv when running
// we only need claim/complete for target0 after initializing
(0x0C00_0000, 0x3000), /* PLIC */
(0x0C20_0000, 0x1000), /* PLIC */
(0x3800_0000, 0x1000), /* UARTHS */
(0x3800_1000, 0x1000), /* GPIOHS */
(0x5020_0000, 0x1000), /* GPIO */
(0x5024_0000, 0x1000), /* SPI_SLAVE */
(0x502B_0000, 0x1000), /* FPIOA */
(0x502D_0000, 0x1000), /* TIMER0 */
(0x502E_0000, 0x1000), /* TIMER1 */
(0x502F_0000, 0x1000), /* TIMER2 */
(0x5044_0000, 0x1000), /* SYSCTL */
(0x5200_0000, 0x1000), /* SPI0 */
(0x5300_0000, 0x1000), /* SPI1 */
(0x5400_0000, 0x1000), /* SPI2 */
(0x0C00_0000, 0x3000), /* PLIC */
(0x0C20_0000, 0x1000), /* PLIC */
(0x3800_0000, 0x1000), /* UARTHS */
(0x3800_1000, 0x1000), /* GPIOHS */
(0x5020_0000, 0x1000), /* GPIO */
(0x5024_0000, 0x1000), /* SPI_SLAVE */
(0x502B_0000, 0x1000), /* FPIOA */
(0x502D_0000, 0x1000), /* TIMER0 */
(0x502E_0000, 0x1000), /* TIMER1 */
(0x502F_0000, 0x1000), /* TIMER2 */
(0x5044_0000, 0x1000), /* SYSCTL */
(0x5200_0000, 0x1000), /* SPI0 */
(0x5300_0000, 0x1000), /* SPI1 */
(0x5400_0000, 0x1000), /* SPI2 */
];

@ -1,5 +1,5 @@
use core::fmt::{self, Write};
use crate::sbi::console_putchar;
use core::fmt::{self, Write};
struct Stdout;
@ -29,5 +29,3 @@ macro_rules! println {
$crate::console::print(format_args!(concat!($fmt, "\n") $(, $($arg)+)?))
}
}

@ -1,9 +1,9 @@
mod virtio_blk;
mod sdcard;
mod virtio_blk;
use lazy_static::*;
use alloc::sync::Arc;
use easy_fs::BlockDevice;
use lazy_static::*;
#[cfg(feature = "board_qemu")]
type BlockDeviceImpl = virtio_blk::VirtIOBlock;
@ -21,10 +21,12 @@ pub fn block_device_test() {
let mut write_buffer = [0u8; 512];
let mut read_buffer = [0u8; 512];
for i in 0..512 {
for byte in write_buffer.iter_mut() { *byte = i as u8; }
for byte in write_buffer.iter_mut() {
*byte = i as u8;
}
block_device.write_block(i as usize, &write_buffer);
block_device.read_block(i as usize, &mut read_buffer);
assert_eq!(write_buffer, read_buffer);
}
println!("block device test passed!");
}
}

@ -2,21 +2,21 @@
#![allow(non_camel_case_types)]
#![allow(unused)]
use k210_pac::{Peripherals, SPI0};
use super::BlockDevice;
use crate::sync::UPSafeCell;
use core::convert::TryInto;
use k210_hal::prelude::*;
use k210_pac::{Peripherals, SPI0};
use k210_soc::{
fpioa::{self, io},
//dmac::{dma_channel, DMAC, DMACExt},
gpio,
gpiohs,
spi::{aitm, frame_format, tmod, work_mode, SPI, SPIExt, SPIImpl},
fpioa::{self, io},
sysctl,
sleep::usleep,
spi::{aitm, frame_format, tmod, work_mode, SPIExt, SPIImpl, SPI},
sysctl,
};
use crate::sync::UPSafeCell;
use lazy_static::*;
use super::BlockDevice;
use core::convert::TryInto;
pub struct SDCard<SPI> {
spi: SPI,
@ -160,7 +160,11 @@ pub struct SDCardInfo {
}
impl</*'a,*/ X: SPI> SDCard</*'a,*/ X> {
pub fn new(spi: X, spi_cs: u32, cs_gpionum: u8/*, dmac: &'a DMAC, channel: dma_channel*/) -> Self {
pub fn new(
spi: X,
spi_cs: u32,
cs_gpionum: u8, /*, dmac: &'a DMAC, channel: dma_channel*/
) -> Self {
Self {
spi,
spi_cs,
@ -606,7 +610,7 @@ impl</*'a,*/ X: SPI> SDCard</*'a,*/ X> {
}
let mut error = false;
//let mut dma_chunk = [0u32; SEC_LEN];
let mut tmp_chunk= [0u8; SEC_LEN];
let mut tmp_chunk = [0u8; SEC_LEN];
for chunk in data_buf.chunks_mut(SEC_LEN) {
if self.get_response() != SD_START_DATA_SINGLE_BLOCK_READ {
error = true;
@ -616,7 +620,7 @@ impl</*'a,*/ X: SPI> SDCard</*'a,*/ X> {
//self.read_data_dma(&mut dma_chunk);
self.read_data(&mut tmp_chunk);
/* Place the data received as u32 units from DMA into the u8 target buffer */
for (a, b) in chunk.iter_mut().zip(/*dma_chunk*/tmp_chunk.iter()) {
for (a, b) in chunk.iter_mut().zip(/*dma_chunk*/ tmp_chunk.iter()) {
//*a = (b & 0xff) as u8;
*a = *b;
}
@ -675,7 +679,7 @@ impl</*'a,*/ X: SPI> SDCard</*'a,*/ X> {
/* Send the data token to signify the start of the data */
self.write_data(&frame);
/* Write the block data to SD : write count data by block */
for (a, &b) in /*dma_chunk*/tmp_chunk.iter_mut().zip(chunk.iter()) {
for (a, &b) in /*dma_chunk*/ tmp_chunk.iter_mut().zip(chunk.iter()) {
//*a = b.into();
*a = b;
}
@ -711,9 +715,8 @@ fn io_init() {
}
lazy_static! {
static ref PERIPHERALS: UPSafeCell<Peripherals> = unsafe {
UPSafeCell::new(Peripherals::take().unwrap())
};
static ref PERIPHERALS: UPSafeCell<Peripherals> =
unsafe { UPSafeCell::new(Peripherals::take().unwrap()) };
}
fn init_sdcard() -> SDCard<SPIImpl<SPI0>> {
@ -747,9 +750,15 @@ impl SDCardWrapper {
impl BlockDevice for SDCardWrapper {
fn read_block(&self, block_id: usize, buf: &mut [u8]) {
self.0.exclusive_access().read_sector(buf,block_id as u32).unwrap();
self.0
.exclusive_access()
.read_sector(buf, block_id as u32)
.unwrap();
}
fn write_block(&self, block_id: usize, buf: &[u8]) {
self.0.exclusive_access().write_sector(buf,block_id as u32).unwrap();
self.0
.exclusive_access()
.write_sector(buf, block_id as u32)
.unwrap();
}
}
}

@ -1,20 +1,12 @@
use virtio_drivers::{VirtIOBlk, VirtIOHeader};
use super::BlockDevice;
use crate::mm::{
PhysAddr,
VirtAddr,
frame_alloc,
frame_dealloc,
PhysPageNum,
FrameTracker,
StepByOne,
PageTable,
kernel_token,
frame_alloc, frame_dealloc, kernel_token, FrameTracker, PageTable, PhysAddr, PhysPageNum,
StepByOne, VirtAddr,
};
use super::BlockDevice;
use crate::sync::UPSafeCell;
use alloc::vec::Vec;
use lazy_static::*;
use virtio_drivers::{VirtIOBlk, VirtIOHeader};
#[allow(unused)]
const VIRTIO0: usize = 0x10001000;
@ -22,21 +14,21 @@ const VIRTIO0: usize = 0x10001000;
pub struct VirtIOBlock(UPSafeCell<VirtIOBlk<'static>>);
lazy_static! {
static ref QUEUE_FRAMES: UPSafeCell<Vec<FrameTracker>> = unsafe {
UPSafeCell::new(Vec::new())
};
static ref QUEUE_FRAMES: UPSafeCell<Vec<FrameTracker>> = unsafe { UPSafeCell::new(Vec::new()) };
}
impl BlockDevice for VirtIOBlock {
fn read_block(&self, block_id: usize, buf: &mut [u8]) {
self.0.exclusive_access()
.read_block(block_id, buf)
.expect("Error when reading VirtIOBlk");
self.0
.exclusive_access()
.read_block(block_id, buf)
.expect("Error when reading VirtIOBlk");
}
fn write_block(&self, block_id: usize, buf: &[u8]) {
self.0.exclusive_access()
.write_block(block_id, buf)
.expect("Error when writing VirtIOBlk");
self.0
.exclusive_access()
.write_block(block_id, buf)
.expect("Error when writing VirtIOBlk");
}
}
@ -44,9 +36,9 @@ impl VirtIOBlock {
#[allow(unused)]
pub fn new() -> Self {
unsafe {
Self(UPSafeCell::new(VirtIOBlk::new(
&mut *(VIRTIO0 as *mut VirtIOHeader)
).unwrap()))
Self(UPSafeCell::new(
VirtIOBlk::new(&mut *(VIRTIO0 as *mut VirtIOHeader)).unwrap(),
))
}
}
}
@ -56,7 +48,9 @@ pub extern "C" fn virtio_dma_alloc(pages: usize) -> PhysAddr {
let mut ppn_base = PhysPageNum(0);
for i in 0..pages {
let frame = frame_alloc().unwrap();
if i == 0 { ppn_base = frame.ppn; }
if i == 0 {
ppn_base = frame.ppn;
}
assert_eq!(frame.ppn.0, ppn_base.0 + i);
QUEUE_FRAMES.exclusive_access().push(frame);
}
@ -80,5 +74,7 @@ pub extern "C" fn virtio_phys_to_virt(paddr: PhysAddr) -> VirtAddr {
#[no_mangle]
pub extern "C" fn virtio_virt_to_phys(vaddr: VirtAddr) -> PhysAddr {
PageTable::from_token(kernel_token()).translate_va(vaddr).unwrap()
PageTable::from_token(kernel_token())
.translate_va(vaddr)
.unwrap()
}

@ -1,3 +1,3 @@
mod block;
pub use block::BLOCK_DEVICE;
pub use block::BLOCK_DEVICE;

@ -1,15 +1,12 @@
use easy_fs::{
EasyFileSystem,
Inode,
};
use super::File;
use crate::drivers::BLOCK_DEVICE;
use crate::mm::UserBuffer;
use crate::sync::UPSafeCell;
use alloc::sync::Arc;
use lazy_static::*;
use bitflags::*;
use alloc::vec::Vec;
use super::File;
use crate::mm::UserBuffer;
use bitflags::*;
use easy_fs::{EasyFileSystem, Inode};
use lazy_static::*;
pub struct OSInode {
readable: bool,
@ -23,18 +20,11 @@ pub struct OSInodeInner {
}
impl OSInode {
pub fn new(
readable: bool,
writable: bool,
inode: Arc<Inode>,
) -> Self {
pub fn new(readable: bool, writable: bool, inode: Arc<Inode>) -> Self {
Self {
readable,
writable,
inner: unsafe { UPSafeCell::new(OSInodeInner {
offset: 0,
inode,
})},
inner: unsafe { UPSafeCell::new(OSInodeInner { offset: 0, inode }) },
}
}
pub fn read_all(&self) -> Vec<u8> {
@ -98,40 +88,30 @@ pub fn open_file(name: &str, flags: OpenFlags) -> Option<Arc<OSInode>> {
if let Some(inode) = ROOT_INODE.find(name) {
// clear size
inode.clear();
Some(Arc::new(OSInode::new(
readable,
writable,
inode,
)))
Some(Arc::new(OSInode::new(readable, writable, inode)))
} else {
// create file
ROOT_INODE.create(name)
.map(|inode| {
Arc::new(OSInode::new(
readable,
writable,
inode,
))
})
ROOT_INODE
.create(name)
.map(|inode| Arc::new(OSInode::new(readable, writable, inode)))
}
} else {
ROOT_INODE.find(name)
.map(|inode| {
if flags.contains(OpenFlags::TRUNC) {
inode.clear();
}
Arc::new(OSInode::new(
readable,
writable,
inode
))
})
ROOT_INODE.find(name).map(|inode| {
if flags.contains(OpenFlags::TRUNC) {
inode.clear();
}
Arc::new(OSInode::new(readable, writable, inode))
})
}
}
impl File for OSInode {
fn readable(&self) -> bool { self.readable }
fn writable(&self) -> bool { self.writable }
fn readable(&self) -> bool {
self.readable
}
fn writable(&self) -> bool {
self.writable
}
fn read(&self, mut buf: UserBuffer) -> usize {
let mut inner = self.inner.exclusive_access();
let mut total_read_size = 0usize;
@ -156,4 +136,4 @@ impl File for OSInode {
}
total_write_size
}
}
}

@ -1,16 +1,16 @@
mod inode;
mod pipe;
mod stdio;
mod inode;
use crate::mm::UserBuffer;
pub trait File : Send + Sync {
pub trait File: Send + Sync {
fn readable(&self) -> bool;
fn writable(&self) -> bool;
fn read(&self, buf: UserBuffer) -> usize;
fn write(&self, buf: UserBuffer) -> usize;
}
pub use pipe::{Pipe, make_pipe};
pub use inode::{list_apps, open_file, OSInode, OpenFlags};
pub use pipe::{make_pipe, Pipe};
pub use stdio::{Stdin, Stdout};
pub use inode::{OSInode, open_file, OpenFlags, list_apps};

@ -1,7 +1,7 @@
use super::File;
use alloc::sync::{Arc, Weak};
use crate::sync::UPSafeCell;
use crate::mm::UserBuffer;
use crate::sync::UPSafeCell;
use alloc::sync::{Arc, Weak};
use crate::task::suspend_current_and_run_next;
@ -98,22 +98,20 @@ impl PipeRingBuffer {
/// Return (read_end, write_end)
pub fn make_pipe() -> (Arc<Pipe>, Arc<Pipe>) {
let buffer = Arc::new(unsafe {
UPSafeCell::new(PipeRingBuffer::new())
});
let read_end = Arc::new(
Pipe::read_end_with_buffer(buffer.clone())
);
let write_end = Arc::new(
Pipe::write_end_with_buffer(buffer.clone())
);
let buffer = Arc::new(unsafe { UPSafeCell::new(PipeRingBuffer::new()) });
let read_end = Arc::new(Pipe::read_end_with_buffer(buffer.clone()));
let write_end = Arc::new(Pipe::write_end_with_buffer(buffer.clone()));
buffer.exclusive_access().set_write_end(&write_end);
(read_end, write_end)
}
impl File for Pipe {
fn readable(&self) -> bool { self.readable }
fn writable(&self) -> bool { self.writable }
fn readable(&self) -> bool {
self.readable
}
fn writable(&self) -> bool {
self.writable
}
fn read(&self, buf: UserBuffer) -> usize {
assert_eq!(self.readable(), true);
let mut buf_iter = buf.into_iter();
@ -132,7 +130,9 @@ impl File for Pipe {
// read at most loop_read bytes
for _ in 0..loop_read {
if let Some(byte_ref) = buf_iter.next() {
unsafe { *byte_ref = ring_buffer.read_byte(); }
unsafe {
*byte_ref = ring_buffer.read_byte();
}
read_size += 1;
} else {
return read_size;

@ -1,5 +1,5 @@
use super::File;
use crate::mm::{UserBuffer};
use crate::mm::UserBuffer;
use crate::sbi::console_getchar;
use crate::task::suspend_current_and_run_next;
@ -8,8 +8,12 @@ pub struct Stdin;
pub struct Stdout;
impl File for Stdin {
fn readable(&self) -> bool { true }
fn writable(&self) -> bool { false }
fn readable(&self) -> bool {
true
}
fn writable(&self) -> bool {
false
}
fn read(&self, mut user_buf: UserBuffer) -> usize {
assert_eq!(user_buf.len(), 1);
// busy loop
@ -24,7 +28,9 @@ impl File for Stdin {
}
}
let ch = c as u8;
unsafe { user_buf.buffers[0].as_mut_ptr().write_volatile(ch); }
unsafe {
user_buf.buffers[0].as_mut_ptr().write_volatile(ch);
}
1
}
fn write(&self, _user_buf: UserBuffer) -> usize {
@ -33,9 +39,13 @@ impl File for Stdin {
}
impl File for Stdout {
fn readable(&self) -> bool { false }
fn writable(&self) -> bool { true }
fn read(&self, _user_buf: UserBuffer) -> usize{
fn readable(&self) -> bool {
false
}
fn writable(&self) -> bool {
true
}
fn read(&self, _user_buf: UserBuffer) -> usize {
panic!("Cannot read from stdout!");
}
fn write(&self, user_buf: UserBuffer) -> usize {
@ -44,4 +54,4 @@ impl File for Stdout {
}
user_buf.len()
}
}
}

@ -1,16 +1,23 @@
use core::panic::PanicInfo;
use core::arch::asm;
use crate::sbi::shutdown;
use crate::task::current_kstack_top;
use core::arch::asm;
use core::panic::PanicInfo;
#[panic_handler]
fn panic(info: &PanicInfo) -> ! {
if let Some(location) = info.location() {
println!("[kernel] Panicked at {}:{} {}", location.file(), location.line(), info.message().unwrap());
println!(
"[kernel] Panicked at {}:{} {}",
location.file(),
location.line(),
info.message().unwrap()
);
} else {
println!("[kernel] Panicked: {}", info.message().unwrap());
}
unsafe { backtrace(); }
unsafe {
backtrace();
}
shutdown()
}
@ -20,9 +27,11 @@ unsafe fn backtrace() {
asm!("mv {}, s0", out(reg) fp);
println!("---START BACKTRACE---");
for i in 0..10 {
if fp == stop { break; }
println!("#{}:ra={:#x}", i, *((fp-8) as *const usize));
fp = *((fp-16) as *const usize);
if fp == stop {
break;
}
println!("#{}:ra={:#x}", i, *((fp - 8) as *const usize));
fp = *((fp - 16) as *const usize);
}
println!("---END BACKTRACE---");
}

@ -10,17 +10,17 @@ extern crate bitflags;
#[macro_use]
mod console;
mod config;
mod drivers;
mod fs;
mod lang_items;
mod mm;
mod sbi;
mod sync;
mod syscall;
mod trap;
mod config;
mod task;
mod timer;
mod sync;
mod mm;
mod fs;
mod drivers;
mod trap;
use core::arch::global_asm;
@ -32,10 +32,8 @@ fn clear_bss() {
fn ebss();
}
unsafe {
core::slice::from_raw_parts_mut(
sbss as usize as *mut u8,
ebss as usize - sbss as usize,
).fill(0);
core::slice::from_raw_parts_mut(sbss as usize as *mut u8, ebss as usize - sbss as usize)
.fill(0);
}
}

@ -1,5 +1,5 @@
use crate::config::{PAGE_SIZE, PAGE_SIZE_BITS};
use super::PageTableEntry;
use crate::config::{PAGE_SIZE, PAGE_SIZE_BITS};
use core::fmt::{self, Debug, Formatter};
const PA_WIDTH_SV39: usize = 56;
@ -52,35 +52,59 @@ impl Debug for PhysPageNum {
/// usize -> T: usize.into()
impl From<usize> for PhysAddr {
fn from(v: usize) -> Self { Self(v & ( (1 << PA_WIDTH_SV39) - 1 )) }
fn from(v: usize) -> Self {
Self(v & ((1 << PA_WIDTH_SV39) - 1))
}
}
impl From<usize> for PhysPageNum {
fn from(v: usize) -> Self { Self(v & ( (1 << PPN_WIDTH_SV39) - 1 )) }
fn from(v: usize) -> Self {
Self(v & ((1 << PPN_WIDTH_SV39) - 1))
}
}
impl From<usize> for VirtAddr {
fn from(v: usize) -> Self { Self(v & ( (1 << VA_WIDTH_SV39) - 1 )) }
fn from(v: usize) -> Self {
Self(v & ((1 << VA_WIDTH_SV39) - 1))
}
}
impl From<usize> for VirtPageNum {
fn from(v: usize) -> Self { Self(v & ( (1 << VPN_WIDTH_SV39) - 1 )) }
fn from(v: usize) -> Self {
Self(v & ((1 << VPN_WIDTH_SV39) - 1))
}
}
impl From<PhysAddr> for usize {
fn from(v: PhysAddr) -> Self { v.0 }
fn from(v: PhysAddr) -> Self {
v.0
}
}
impl From<PhysPageNum> for usize {
fn from(v: PhysPageNum) -> Self { v.0 }
fn from(v: PhysPageNum) -> Self {
v.0
}
}
impl From<VirtAddr> for usize {
fn from(v: VirtAddr) -> Self { v.0 }
fn from(v: VirtAddr) -> Self {
v.0
}
}
impl From<VirtPageNum> for usize {
fn from(v: VirtPageNum) -> Self { v.0 }
fn from(v: VirtPageNum) -> Self {
v.0
}
}
impl VirtAddr {
pub fn floor(&self) -> VirtPageNum { VirtPageNum(self.0 / PAGE_SIZE) }
pub fn ceil(&self) -> VirtPageNum { VirtPageNum((self.0 - 1 + PAGE_SIZE) / PAGE_SIZE) }
pub fn page_offset(&self) -> usize { self.0 & (PAGE_SIZE - 1) }
pub fn aligned(&self) -> bool { self.page_offset() == 0 }
pub fn floor(&self) -> VirtPageNum {
VirtPageNum(self.0 / PAGE_SIZE)
}
pub fn ceil(&self) -> VirtPageNum {
VirtPageNum((self.0 - 1 + PAGE_SIZE) / PAGE_SIZE)
}
pub fn page_offset(&self) -> usize {
self.0 & (PAGE_SIZE - 1)
}
pub fn aligned(&self) -> bool {
self.page_offset() == 0
}
}
impl From<VirtAddr> for VirtPageNum {
fn from(v: VirtAddr) -> Self {
@ -89,13 +113,23 @@ impl From<VirtAddr> for VirtPageNum {
}
}
impl From<VirtPageNum> for VirtAddr {
fn from(v: VirtPageNum) -> Self { Self(v.0 << PAGE_SIZE_BITS) }
fn from(v: VirtPageNum) -> Self {
Self(v.0 << PAGE_SIZE_BITS)
}
}
impl PhysAddr {
pub fn floor(&self) -> PhysPageNum { PhysPageNum(self.0 / PAGE_SIZE) }
pub fn ceil(&self) -> PhysPageNum { PhysPageNum((self.0 - 1 + PAGE_SIZE) / PAGE_SIZE) }
pub fn page_offset(&self) -> usize { self.0 & (PAGE_SIZE - 1) }
pub fn aligned(&self) -> bool { self.page_offset() == 0 }
pub fn floor(&self) -> PhysPageNum {
PhysPageNum(self.0 / PAGE_SIZE)
}
pub fn ceil(&self) -> PhysPageNum {
PhysPageNum((self.0 - 1 + PAGE_SIZE) / PAGE_SIZE)
}
pub fn page_offset(&self) -> usize {
self.0 & (PAGE_SIZE - 1)
}
pub fn aligned(&self) -> bool {
self.page_offset() == 0
}
}
impl From<PhysAddr> for PhysPageNum {
fn from(v: PhysAddr) -> Self {
@ -104,7 +138,9 @@ impl From<PhysAddr> for PhysPageNum {
}
}
impl From<PhysPageNum> for PhysAddr {
fn from(v: PhysPageNum) -> Self { Self(v.0 << PAGE_SIZE_BITS) }
fn from(v: PhysPageNum) -> Self {
Self(v.0 << PAGE_SIZE_BITS)
}
}
impl VirtPageNum {
@ -121,28 +157,20 @@ impl VirtPageNum {
impl PhysAddr {
pub fn get_ref<T>(&self) -> &'static T {
unsafe {
(self.0 as *const T).as_ref().unwrap()
}
unsafe { (self.0 as *const T).as_ref().unwrap() }
}
pub fn get_mut<T>(&self) -> &'static mut T {
unsafe {
(self.0 as *mut T).as_mut().unwrap()
}
unsafe { (self.0 as *mut T).as_mut().unwrap() }
}
}
impl PhysPageNum {
pub fn get_pte_array(&self) -> &'static mut [PageTableEntry] {
let pa: PhysAddr = self.clone().into();
unsafe {
core::slice::from_raw_parts_mut(pa.0 as *mut PageTableEntry, 512)
}
unsafe { core::slice::from_raw_parts_mut(pa.0 as *mut PageTableEntry, 512) }
}
pub fn get_bytes_array(&self) -> &'static mut [u8] {
let pa: PhysAddr = self.clone().into();
unsafe {
core::slice::from_raw_parts_mut(pa.0 as *mut u8, 4096)
}
unsafe { core::slice::from_raw_parts_mut(pa.0 as *mut u8, 4096) }
}
pub fn get_mut<T>(&self) -> &'static mut T {
let pa: PhysAddr = self.clone().into();
@ -165,41 +193,57 @@ impl StepByOne for PhysPageNum {
}
#[derive(Copy, Clone)]
pub struct SimpleRange<T> where
T: StepByOne + Copy + PartialEq + PartialOrd + Debug, {
pub struct SimpleRange<T>
where
T: StepByOne + Copy + PartialEq + PartialOrd + Debug,
{
l: T,
r: T,
}
impl<T> SimpleRange<T> where
T: StepByOne + Copy + PartialEq + PartialOrd + Debug, {
impl<T> SimpleRange<T>
where
T: StepByOne + Copy + PartialEq + PartialOrd + Debug,
{
pub fn new(start: T, end: T) -> Self {
assert!(start <= end, "start {:?} > end {:?}!", start, end);
Self { l: start, r: end }
}
pub fn get_start(&self) -> T { self.l }
pub fn get_end(&self) -> T { self.r }
pub fn get_start(&self) -> T {
self.l
}
pub fn get_end(&self) -> T {
self.r
}
}
impl<T> IntoIterator for SimpleRange<T> where
T: StepByOne + Copy + PartialEq + PartialOrd + Debug, {
impl<T> IntoIterator for SimpleRange<T>
where
T: StepByOne + Copy + PartialEq + PartialOrd + Debug,
{
type Item = T;
type IntoIter = SimpleRangeIterator<T>;
fn into_iter(self) -> Self::IntoIter {
SimpleRangeIterator::new(self.l, self.r)
}
}
pub struct SimpleRangeIterator<T> where
T: StepByOne + Copy + PartialEq + PartialOrd + Debug, {
pub struct SimpleRangeIterator<T>
where
T: StepByOne + Copy + PartialEq + PartialOrd + Debug,
{
current: T,
end: T,
}
impl<T> SimpleRangeIterator<T> where
T: StepByOne + Copy + PartialEq + PartialOrd + Debug, {
impl<T> SimpleRangeIterator<T>
where
T: StepByOne + Copy + PartialEq + PartialOrd + Debug,
{
pub fn new(l: T, r: T) -> Self {
Self { current: l, end: r, }
Self { current: l, end: r }
}
}
impl<T> Iterator for SimpleRangeIterator<T> where
T: StepByOne + Copy + PartialEq + PartialOrd + Debug, {
impl<T> Iterator for SimpleRangeIterator<T>
where
T: StepByOne + Copy + PartialEq + PartialOrd + Debug,
{
type Item = T;
fn next(&mut self) -> Option<Self::Item> {
if self.current == self.end {

@ -1,9 +1,9 @@
use super::{PhysAddr, PhysPageNum};
use alloc::vec::Vec;
use crate::sync::UPSafeCell;
use crate::config::MEMORY_END;
use lazy_static::*;
use crate::sync::UPSafeCell;
use alloc::vec::Vec;
use core::fmt::{self, Debug, Formatter};
use lazy_static::*;
pub struct FrameTracker {
pub ppn: PhysPageNum,
@ -72,10 +72,7 @@ impl FrameAllocator for StackFrameAllocator {
fn dealloc(&mut self, ppn: PhysPageNum) {
let ppn = ppn.0;
// validity check
if ppn >= self.current || self.recycled
.iter()
.find(|&v| {*v == ppn})
.is_some() {
if ppn >= self.current || self.recycled.iter().find(|&v| *v == ppn).is_some() {
panic!("Frame ppn={:#x} has not been allocated!", ppn);
}
// recycle
@ -86,18 +83,18 @@ impl FrameAllocator for StackFrameAllocator {
type FrameAllocatorImpl = StackFrameAllocator;
lazy_static! {
pub static ref FRAME_ALLOCATOR: UPSafeCell<FrameAllocatorImpl> = unsafe {
UPSafeCell::new(FrameAllocatorImpl::new())
};
pub static ref FRAME_ALLOCATOR: UPSafeCell<FrameAllocatorImpl> =
unsafe { UPSafeCell::new(FrameAllocatorImpl::new()) };
}
pub fn init_frame_allocator() {
extern "C" {
fn ekernel();
}
FRAME_ALLOCATOR
.exclusive_access()
.init(PhysAddr::from(ekernel as usize).ceil(), PhysAddr::from(MEMORY_END).floor());
FRAME_ALLOCATOR.exclusive_access().init(
PhysAddr::from(ekernel as usize).ceil(),
PhysAddr::from(MEMORY_END).floor(),
);
}
pub fn frame_alloc() -> Option<FrameTracker> {
@ -108,9 +105,7 @@ pub fn frame_alloc() -> Option<FrameTracker> {
}
pub fn frame_dealloc(ppn: PhysPageNum) {
FRAME_ALLOCATOR
.exclusive_access()
.dealloc(ppn);
FRAME_ALLOCATOR.exclusive_access().dealloc(ppn);
}
#[allow(unused)]

@ -1,5 +1,5 @@
use buddy_system_allocator::LockedHeap;
use crate::config::KERNEL_HEAP_SIZE;
use buddy_system_allocator::LockedHeap;
#[global_allocator]
static HEAP_ALLOCATOR: LockedHeap = LockedHeap::empty();

@ -1,20 +1,15 @@
use super::{PageTable, PageTableEntry, PTEFlags};
use super::{VirtPageNum, VirtAddr, PhysPageNum, PhysAddr};
use super::{FrameTracker, frame_alloc};
use super::{VPNRange, StepByOne};
use super::{frame_alloc, FrameTracker};
use super::{PTEFlags, PageTable, PageTableEntry};
use super::{PhysAddr, PhysPageNum, VirtAddr, VirtPageNum};
use super::{StepByOne, VPNRange};
use crate::config::{MEMORY_END, MMIO, PAGE_SIZE, TRAMPOLINE};
use crate::sync::UPSafeCell;
use alloc::collections::BTreeMap;
use alloc::vec::Vec;
use riscv::register::satp;
use alloc::sync::Arc;
use lazy_static::*;
use crate::sync::UPSafeCell;
use crate::config::{
MEMORY_END,
PAGE_SIZE,
TRAMPOLINE,
MMIO,
};
use alloc::vec::Vec;
use core::arch::asm;
use lazy_static::*;
use riscv::register::satp;
extern "C" {
fn stext();
@ -30,9 +25,8 @@ extern "C" {
}
lazy_static! {
pub static ref KERNEL_SPACE: Arc<UPSafeCell<MemorySet>> = Arc::new(unsafe {
UPSafeCell::new(MemorySet::new_kernel())
});
pub static ref KERNEL_SPACE: Arc<UPSafeCell<MemorySet>> =
Arc::new(unsafe { UPSafeCell::new(MemorySet::new_kernel()) });
}
pub fn kernel_token() -> usize {
@ -55,17 +49,24 @@ impl MemorySet {
self.page_table.token()
}
/// Assume that no conflicts.
pub fn insert_framed_area(&mut self, start_va: VirtAddr, end_va: VirtAddr, permission: MapPermission) {
self.push(MapArea::new(
start_va,
end_va,
MapType::Framed,
permission,
), None);
pub fn insert_framed_area(
&mut self,
start_va: VirtAddr,
end_va: VirtAddr,
permission: MapPermission,
) {
self.push(
MapArea::new(start_va, end_va, MapType::Framed, permission),
None,
);
}
pub fn remove_area_with_start_vpn(&mut self, start_vpn: VirtPageNum) {
if let Some((idx, area)) = self.areas.iter_mut().enumerate()
.find(|(_, area)| area.vpn_range.get_start() == start_vpn) {
if let Some((idx, area)) = self
.areas
.iter_mut()
.enumerate()
.find(|(_, area)| area.vpn_range.get_start() == start_vpn)
{
area.unmap(&mut self.page_table);
self.areas.remove(idx);
}
@ -94,50 +95,71 @@ impl MemorySet {
println!(".text [{:#x}, {:#x})", stext as usize, etext as usize);
println!(".rodata [{:#x}, {:#x})", srodata as usize, erodata as usize);
println!(".data [{:#x}, {:#x})", sdata as usize, edata as usize);
println!(".bss [{:#x}, {:#x})", sbss_with_stack as usize, ebss as usize);
println!(
".bss [{:#x}, {:#x})",
sbss_with_stack as usize, ebss as usize
);
println!("mapping .text section");
memory_set.push(MapArea::new(
(stext as usize).into(),
(etext as usize).into(),
MapType::Identical,
MapPermission::R | MapPermission::X,
), None);
memory_set.push(
MapArea::new(
(stext as usize).into(),
(etext as usize).into(),
MapType::Identical,
MapPermission::R | MapPermission::X,
),
None,
);
println!("mapping .rodata section");
memory_set.push(MapArea::new(
(srodata as usize).into(),
(erodata as usize).into(),
MapType::Identical,
MapPermission::R,
), None);
memory_set.push(
MapArea::new(
(srodata as usize).into(),
(erodata as usize).into(),
MapType::Identical,
MapPermission::R,
),
None,
);
println!("mapping .data section");
memory_set.push(MapArea::new(
(sdata as usize).into(),
(edata as usize).into(),
MapType::Identical,
MapPermission::R | MapPermission::W,
), None);
memory_set.push(
MapArea::new(
(sdata as usize).into(),
(edata as usize).into(),
MapType::Identical,
MapPermission::R | MapPermission::W,
),
None,
);
println!("mapping .bss section");
memory_set.push(MapArea::new(
(sbss_with_stack as usize).into(),
(ebss as usize).into(),
MapType::Identical,
MapPermission::R | MapPermission::W,
), None);
memory_set.push(
MapArea::new(
(sbss_with_stack as usize).into(),
(ebss as usize).into(),
MapType::Identical,
MapPermission::R | MapPermission::W,
),
None,
);
println!("mapping physical memory");
memory_set.push(MapArea::new(
(ekernel as usize).into(),
MEMORY_END.into(),
MapType::Identical,
MapPermission::R | MapPermission::W,
), None);
println!("mapping memory-mapped registers");
for pair in MMIO {
memory_set.push(MapArea::new(
(*pair).0.into(),
((*pair).0 + (*pair).1).into(),
memory_set.push(
MapArea::new(
(ekernel as usize).into(),
MEMORY_END.into(),
MapType::Identical,
MapPermission::R | MapPermission::W,
), None);
),
None,
);
println!("mapping memory-mapped registers");
for pair in MMIO {
memory_set.push(
MapArea::new(
(*pair).0.into(),
((*pair).0 + (*pair).1).into(),
MapType::Identical,
MapPermission::R | MapPermission::W,
),
None,
);
}
memory_set
}
@ -161,26 +183,31 @@ impl MemorySet {
let end_va: VirtAddr = ((ph.virtual_addr() + ph.mem_size()) as usize).into();
let mut map_perm = MapPermission::U;
let ph_flags = ph.flags();
if ph_flags.is_read() { map_perm |= MapPermission::R; }
if ph_flags.is_write() { map_perm |= MapPermission::W; }
if ph_flags.is_execute() { map_perm |= MapPermission::X; }
let map_area = MapArea::new(
start_va,
end_va,
MapType::Framed,
map_perm,
);
if ph_flags.is_read() {
map_perm |= MapPermission::R;
}
if ph_flags.is_write() {
map_perm |= MapPermission::W;
}
if ph_flags.is_execute() {
map_perm |= MapPermission::X;
}
let map_area = MapArea::new(start_va, end_va, MapType::Framed, map_perm);
max_end_vpn = map_area.vpn_range.get_end();
memory_set.push(
map_area,
Some(&elf.input[ph.offset() as usize..(ph.offset() + ph.file_size()) as usize])
Some(&elf.input[ph.offset() as usize..(ph.offset() + ph.file_size()) as usize]),
);
}
}
let max_end_va: VirtAddr = max_end_vpn.into();
let mut user_stack_base: usize = max_end_va.into();
user_stack_base += PAGE_SIZE;
(memory_set, user_stack_base, elf.header.pt2.entry_point() as usize)
(
memory_set,
user_stack_base,
elf.header.pt2.entry_point() as usize,
)
}
pub fn from_existed_user(user_space: &MemorySet) -> MemorySet {
let mut memory_set = Self::new_bare();
@ -194,7 +221,9 @@ impl MemorySet {
for vpn in area.vpn_range {
let src_ppn = user_space.translate(vpn).unwrap().ppn();
let dst_ppn = memory_set.translate(vpn).unwrap().ppn();
dst_ppn.get_bytes_array().copy_from_slice(src_ppn.get_bytes_array());
dst_ppn
.get_bytes_array()
.copy_from_slice(src_ppn.get_bytes_array());
}
}
memory_set
@ -227,7 +256,7 @@ impl MapArea {
start_va: VirtAddr,
end_va: VirtAddr,
map_type: MapType,
map_perm: MapPermission
map_perm: MapPermission,
) -> Self {
let start_vpn: VirtPageNum = start_va.floor();
let end_vpn: VirtPageNum = end_va.ceil();
@ -326,15 +355,27 @@ pub fn remap_test() {
let mid_rodata: VirtAddr = ((srodata as usize + erodata as usize) / 2).into();
let mid_data: VirtAddr = ((sdata as usize + edata as usize) / 2).into();
assert_eq!(
kernel_space.page_table.translate(mid_text.floor()).unwrap().writable(),
kernel_space
.page_table
.translate(mid_text.floor())
.unwrap()
.writable(),
false
);
assert_eq!(
kernel_space.page_table.translate(mid_rodata.floor()).unwrap().writable(),
kernel_space
.page_table
.translate(mid_rodata.floor())
.unwrap()
.writable(),
false,
);
assert_eq!(
kernel_space.page_table.translate(mid_data.floor()).unwrap().executable(),
kernel_space
.page_table
.translate(mid_data.floor())
.unwrap()
.executable(),
false,
);
println!("remap_test passed!");

@ -1,28 +1,22 @@
mod heap_allocator;
mod address;
mod frame_allocator;
mod page_table;
mod heap_allocator;
mod memory_set;
mod page_table;
use page_table::PTEFlags;
use address::VPNRange;
pub use address::{PhysAddr, VirtAddr, PhysPageNum, VirtPageNum, StepByOne};
pub use frame_allocator::{FrameTracker, frame_alloc, frame_dealloc,};
pub use address::{PhysAddr, PhysPageNum, StepByOne, VirtAddr, VirtPageNum};
pub use frame_allocator::{frame_alloc, frame_dealloc, FrameTracker};
pub use memory_set::remap_test;
pub use memory_set::{kernel_token, MapPermission, MemorySet, KERNEL_SPACE};
use page_table::PTEFlags;
pub use page_table::{
PageTable,
PageTableEntry,
translated_byte_buffer,
translated_str,
translated_ref,
translated_refmut,
UserBuffer,
UserBufferIterator,
translated_byte_buffer, translated_ref, translated_refmut, translated_str, PageTable,
PageTableEntry, UserBuffer, UserBufferIterator,
};
pub use memory_set::{MemorySet, KERNEL_SPACE, MapPermission, kernel_token};
pub use memory_set::remap_test;
pub fn init() {
heap_allocator::init_heap();
frame_allocator::init_frame_allocator();
KERNEL_SPACE.exclusive_access().activate();
}
}

@ -1,15 +1,7 @@
use super::{
frame_alloc,
PhysPageNum,
FrameTracker,
VirtPageNum,
VirtAddr,
PhysAddr,
StepByOne
};
use alloc::vec::Vec;
use alloc::vec;
use super::{frame_alloc, FrameTracker, PhysAddr, PhysPageNum, StepByOne, VirtAddr, VirtPageNum};
use alloc::string::String;
use alloc::vec;
use alloc::vec::Vec;
use bitflags::*;
bitflags! {
@ -38,9 +30,7 @@ impl PageTableEntry {
}
}
pub fn empty() -> Self {
PageTableEntry {
bits: 0,
}
PageTableEntry { bits: 0 }
}
pub fn ppn(&self) -> PhysPageNum {
(self.bits >> 10 & ((1usize << 44) - 1)).into()
@ -132,17 +122,15 @@ impl PageTable {
*pte = PageTableEntry::empty();
}
pub fn translate(&self, vpn: VirtPageNum) -> Option<PageTableEntry> {
self.find_pte(vpn)
.map(|pte| {pte.clone()})
self.find_pte(vpn).map(|pte| pte.clone())
}
pub fn translate_va(&self, va: VirtAddr) -> Option<PhysAddr> {
self.find_pte(va.clone().floor())
.map(|pte| {
let aligned_pa: PhysAddr = pte.ppn().into();
let offset = va.page_offset();
let aligned_pa_usize: usize = aligned_pa.into();
(aligned_pa_usize + offset).into()
})
self.find_pte(va.clone().floor()).map(|pte| {
let aligned_pa: PhysAddr = pte.ppn().into();
let offset = va.page_offset();
let aligned_pa_usize: usize = aligned_pa.into();
(aligned_pa_usize + offset).into()
})
}
pub fn token(&self) -> usize {
8usize << 60 | self.root_ppn.0
@ -157,10 +145,7 @@ pub fn translated_byte_buffer(token: usize, ptr: *const u8, len: usize) -> Vec<&
while start < end {
let start_va = VirtAddr::from(start);
let mut vpn = start_va.floor();
let ppn = page_table
.translate(vpn)
.unwrap()
.ppn();
let ppn = page_table.translate(vpn).unwrap().ppn();
vpn.step();
let mut end_va: VirtAddr = vpn.into();
end_va = end_va.min(VirtAddr::from(end));
@ -180,7 +165,10 @@ pub fn translated_str(token: usize, ptr: *const u8) -> String {
let mut string = String::new();
let mut va = ptr as usize;
loop {
let ch: u8 = *(page_table.translate_va(VirtAddr::from(va)).unwrap().get_mut());
let ch: u8 = *(page_table
.translate_va(VirtAddr::from(va))
.unwrap()
.get_mut());
if ch == 0 {
break;
}
@ -192,13 +180,19 @@ pub fn translated_str(token: usize, ptr: *const u8) -> String {
pub fn translated_ref<T>(token: usize, ptr: *const T) -> &'static T {
let page_table = PageTable::from_token(token);
page_table.translate_va(VirtAddr::from(ptr as usize)).unwrap().get_ref()
page_table
.translate_va(VirtAddr::from(ptr as usize))
.unwrap()
.get_ref()
}
pub fn translated_refmut<T>(token: usize, ptr: *mut T) -> &'static mut T {
let page_table = PageTable::from_token(token);
let va = ptr as usize;
page_table.translate_va(VirtAddr::from(va)).unwrap().get_mut()
page_table
.translate_va(VirtAddr::from(va))
.unwrap()
.get_mut()
}
pub struct UserBuffer {

@ -43,4 +43,3 @@ pub fn shutdown() -> ! {
sbi_call(SBI_SHUTDOWN, 0, 0, 0);
panic!("It should shutdown!");
}

@ -1,6 +1,6 @@
use alloc::{sync::Arc, collections::VecDeque};
use crate::task::{add_task, TaskControlBlock, current_task, block_current_and_run_next};
use crate::sync::{Mutex, UPSafeCell};
use crate::task::{add_task, block_current_and_run_next, current_task, TaskControlBlock};
use alloc::{collections::VecDeque, sync::Arc};
pub struct Condvar {
pub inner: UPSafeCell<CondvarInner>,
@ -13,11 +13,11 @@ pub struct CondvarInner {
impl Condvar {
pub fn new() -> Self {
Self {
inner: unsafe { UPSafeCell::new(
CondvarInner {
inner: unsafe {
UPSafeCell::new(CondvarInner {
wait_queue: VecDeque::new(),
}
)},
})
},
}
}
@ -28,7 +28,7 @@ impl Condvar {
}
}
pub fn wait(&self, mutex:Arc<dyn Mutex>) {
pub fn wait(&self, mutex: Arc<dyn Mutex>) {
mutex.unlock();
let mut inner = self.inner.exclusive_access();
inner.wait_queue.push_back(current_task().unwrap());

@ -1,9 +1,9 @@
mod up;
mod condvar;
mod mutex;
mod semaphore;
mod condvar;
mod up;
pub use up::UPSafeCell;
pub use mutex::{Mutex, MutexSpin, MutexBlocking};
pub use condvar::Condvar;
pub use mutex::{Mutex, MutexBlocking, MutexSpin};
pub use semaphore::Semaphore;
pub use condvar::Condvar;
pub use up::UPSafeCell;

@ -1,8 +1,8 @@
use super::UPSafeCell;
use crate::task::{block_current_and_run_next, suspend_current_and_run_next};
use crate::task::TaskControlBlock;
use crate::task::{add_task, current_task};
use alloc::{sync::Arc, collections::VecDeque};
use crate::task::{block_current_and_run_next, suspend_current_and_run_next};
use alloc::{collections::VecDeque, sync::Arc};
pub trait Mutex: Sync + Send {
fn lock(&self);
@ -77,7 +77,7 @@ impl Mutex for MutexBlocking {
}
fn unlock(&self) {
let mut mutex_inner = self.inner.exclusive_access();
let mut mutex_inner = self.inner.exclusive_access();
assert_eq!(mutex_inner.locked, true);
if let Some(waking_task) = mutex_inner.wait_queue.pop_front() {
add_task(waking_task);

@ -1,6 +1,6 @@
use alloc::{sync::Arc, collections::VecDeque};
use crate::task::{add_task, TaskControlBlock, current_task, block_current_and_run_next};
use crate::sync::UPSafeCell;
use crate::task::{add_task, block_current_and_run_next, current_task, TaskControlBlock};
use alloc::{collections::VecDeque, sync::Arc};
pub struct Semaphore {
pub inner: UPSafeCell<SemaphoreInner>,
@ -14,12 +14,12 @@ pub struct SemaphoreInner {
impl Semaphore {
pub fn new(res_count: usize) -> Self {
Self {
inner: unsafe { UPSafeCell::new(
SemaphoreInner {
inner: unsafe {
UPSafeCell::new(SemaphoreInner {
count: res_count as isize,
wait_queue: VecDeque::new(),
}
)},
})
},
}
}

@ -18,10 +18,12 @@ impl<T> UPSafeCell<T> {
/// User is responsible to guarantee that inner struct is only used in
/// uniprocessor.
pub unsafe fn new(value: T) -> Self {
Self { inner: RefCell::new(value) }
Self {
inner: RefCell::new(value),
}
}
/// Panic if the data has been borrowed.
pub fn exclusive_access(&self) -> RefMut<'_, T> {
self.inner.borrow_mut()
}
}
}

@ -1,11 +1,6 @@
use crate::mm::{
UserBuffer,
translated_byte_buffer,
translated_refmut,
translated_str,
};
use crate::task::{current_user_token, current_process};
use crate::fs::{make_pipe, OpenFlags, open_file};
use crate::fs::{make_pipe, open_file, OpenFlags};
use crate::mm::{translated_byte_buffer, translated_refmut, translated_str, UserBuffer};
use crate::task::{current_process, current_user_token};
use alloc::sync::Arc;
pub fn sys_write(fd: usize, buf: *const u8, len: usize) -> isize {
@ -22,9 +17,7 @@ pub fn sys_write(fd: usize, buf: *const u8, len: usize) -> isize {
let file = file.clone();
// release current task TCB manually to avoid multi-borrow
drop(inner);
file.write(
UserBuffer::new(translated_byte_buffer(token, buf, len))
) as isize
file.write(UserBuffer::new(translated_byte_buffer(token, buf, len))) as isize
} else {
-1
}
@ -44,9 +37,7 @@ pub fn sys_read(fd: usize, buf: *const u8, len: usize) -> isize {
}
// release current task TCB manually to avoid multi-borrow
drop(inner);
file.read(
UserBuffer::new(translated_byte_buffer(token, buf, len))
) as isize
file.read(UserBuffer::new(translated_byte_buffer(token, buf, len))) as isize
} else {
-1
}
@ -56,10 +47,7 @@ pub fn sys_open(path: *const u8, flags: u32) -> isize {
let process = current_process();
let token = current_user_token();
let path = translated_str(token, path);
if let Some(inode) = open_file(
path.as_str(),
OpenFlags::from_bits(flags).unwrap()
) {
if let Some(inode) = open_file(path.as_str(), OpenFlags::from_bits(flags).unwrap()) {
let mut inner = process.inner_exclusive_access();
let fd = inner.alloc_fd();
inner.fd_table[fd] = Some(inode);

@ -27,17 +27,17 @@ const SYSCALL_CONDVAR_WAIT: usize = 1032;
mod fs;
mod process;
mod thread;
mod sync;
mod thread;
use fs::*;
use process::*;
use thread::*;
use sync::*;
use thread::*;
pub fn syscall(syscall_id: usize, args: [usize; 3]) -> isize {
match syscall_id {
SYSCALL_DUP=> sys_dup(args[0]),
SYSCALL_DUP => sys_dup(args[0]),
SYSCALL_OPEN => sys_open(args[0] as *const u8, args[1] as u32),
SYSCALL_CLOSE => sys_close(args[0]),
SYSCALL_PIPE => sys_pipe(args[0] as *mut usize),
@ -66,4 +66,3 @@ pub fn syscall(syscall_id: usize, args: [usize; 3]) -> isize {
_ => panic!("Unsupported syscall_id: {}", syscall_id),
}
}

@ -1,23 +1,13 @@
use crate::fs::{open_file, OpenFlags};
use crate::mm::{translated_ref, translated_refmut, translated_str};
use crate::task::{
current_process, current_task, current_user_token, exit_current_and_run_next,
suspend_current_and_run_next,
exit_current_and_run_next,
current_task,
current_process,
current_user_token,
};
use crate::timer::get_time_ms;
use crate::mm::{
translated_str,
translated_refmut,
translated_ref,
};
use crate::fs::{
open_file,
OpenFlags,
};
use alloc::string::String;
use alloc::sync::Arc;
use alloc::vec::Vec;
use alloc::string::String;
pub fn sys_exit(exit_code: i32) -> ! {
exit_current_and_run_next(exit_code);
@ -61,7 +51,9 @@ pub fn sys_exec(path: *const u8, mut args: *const usize) -> isize {
break;
}
args_vec.push(translated_str(token, arg_str_ptr as *const u8));
unsafe { args = args.add(1); }
unsafe {
args = args.add(1);
}
}
if let Some(app_inode) = open_file(path.as_str(), OpenFlags::RDONLY) {
let all_data = app_inode.read_all();
@ -82,21 +74,20 @@ pub fn sys_waitpid(pid: isize, exit_code_ptr: *mut i32) -> isize {
// find a child process
let mut inner = process.inner_exclusive_access();
if inner.children
if inner
.children
.iter()
.find(|p| {pid == -1 || pid as usize == p.getpid()})
.is_none() {
.find(|p| pid == -1 || pid as usize == p.getpid())
.is_none()
{
return -1;
// ---- release current PCB
}
let pair = inner.children
.iter()
.enumerate()
.find(|(_, p)| {
// ++++ temporarily access child PCB exclusively
p.inner_exclusive_access().is_zombie && (pid == -1 || pid as usize == p.getpid())
// ++++ release child PCB
});
let pair = inner.children.iter().enumerate().find(|(_, p)| {
// ++++ temporarily access child PCB exclusively
p.inner_exclusive_access().is_zombie && (pid == -1 || pid as usize == p.getpid())
// ++++ release child PCB
});
if let Some((idx, _)) = pair {
let child = inner.children.remove(idx);
// confirm that child will be deallocated after being removed from children list

@ -1,6 +1,6 @@
use crate::task::{current_task, current_process, block_current_and_run_next};
use crate::sync::{Mutex, MutexSpin, MutexBlocking, Semaphore, Condvar};
use crate::timer::{get_time_ms, add_timer};
use crate::sync::{Condvar, Mutex, MutexBlocking, MutexSpin, Semaphore};
use crate::task::{block_current_and_run_next, current_process, current_task};
use crate::timer::{add_timer, get_time_ms};
use alloc::sync::Arc;
pub fn sys_sleep(ms: usize) -> isize {
@ -24,7 +24,8 @@ pub fn sys_mutex_create(blocking: bool) -> isize {
.iter()
.enumerate()
.find(|(_, item)| item.is_none())
.map(|(id, _)| id) {
.map(|(id, _)| id)
{
process_inner.mutex_list[id] = mutex;
id as isize
} else {
@ -61,11 +62,14 @@ pub fn sys_semaphore_create(res_count: usize) -> isize {
.iter()
.enumerate()
.find(|(_, item)| item.is_none())
.map(|(id, _)| id) {
process_inner.semaphore_list[id] = Some(Arc::new(Semaphore::new(res_count)));
.map(|(id, _)| id)
{
process_inner.semaphore_list[id] = Some(Arc::new(Semaphore::new(res_count)));
id
} else {
process_inner.semaphore_list.push(Some(Arc::new(Semaphore::new(res_count))));
process_inner
.semaphore_list
.push(Some(Arc::new(Semaphore::new(res_count))));
process_inner.semaphore_list.len() - 1
};
id as isize
@ -89,7 +93,6 @@ pub fn sys_semaphore_down(sem_id: usize) -> isize {
0
}
pub fn sys_condvar_create(_arg: usize) -> isize {
let process = current_process();
let mut process_inner = process.inner_exclusive_access();
@ -98,11 +101,14 @@ pub fn sys_condvar_create(_arg: usize) -> isize {
.iter()
.enumerate()
.find(|(_, item)| item.is_none())
.map(|(id, _)| id) {
process_inner.condvar_list[id] = Some(Arc::new(Condvar::new()));
.map(|(id, _)| id)
{
process_inner.condvar_list[id] = Some(Arc::new(Condvar::new()));
id
} else {
process_inner.condvar_list.push(Some(Arc::new(Condvar::new())));
process_inner
.condvar_list
.push(Some(Arc::new(Condvar::new())));
process_inner.condvar_list.len() - 1
};
id as isize
@ -125,4 +131,4 @@ pub fn sys_condvar_wait(condvar_id: usize, mutex_id: usize) -> isize {
drop(process_inner);
condvar.wait(mutex);
0
}
}

@ -1,5 +1,9 @@
use crate::{
mm::kernel_token,
task::{add_task, current_task, TaskControlBlock},
trap::{trap_handler, TrapContext},
};
use alloc::sync::Arc;
use crate::{mm::kernel_token, task::{TaskControlBlock, add_task, current_task}, trap::{TrapContext, trap_handler}};
pub fn sys_thread_create(entry: usize, arg: usize) -> isize {
let task = current_task().unwrap();
@ -7,7 +11,11 @@ pub fn sys_thread_create(entry: usize, arg: usize) -> isize {
// create a new thread
let new_task = Arc::new(TaskControlBlock::new(
Arc::clone(&process),
task.inner_exclusive_access().res.as_ref().unwrap().ustack_base,
task.inner_exclusive_access()
.res
.as_ref()
.unwrap()
.ustack_base,
true,
));
// add new task to scheduler
@ -35,7 +43,13 @@ pub fn sys_thread_create(entry: usize, arg: usize) -> isize {
}
pub fn sys_gettid() -> isize {
current_task().unwrap().inner_exclusive_access().res.as_ref().unwrap().tid as isize
current_task()
.unwrap()
.inner_exclusive_access()
.res
.as_ref()
.unwrap()
.tid as isize
}
/// thread does not exist, return -1

@ -23,4 +23,3 @@ impl TaskContext {
}
}
}

@ -1,9 +1,12 @@
use alloc::{vec::Vec, sync::{Arc, Weak}};
use lazy_static::*;
use crate::sync::UPSafeCell;
use crate::mm::{KERNEL_SPACE, MapPermission, PhysPageNum, VirtAddr};
use crate::config::{KERNEL_STACK_SIZE, PAGE_SIZE, TRAMPOLINE, TRAP_CONTEXT_BASE, USER_STACK_SIZE};
use super::ProcessControlBlock;
use crate::config::{KERNEL_STACK_SIZE, PAGE_SIZE, TRAMPOLINE, TRAP_CONTEXT_BASE, USER_STACK_SIZE};
use crate::mm::{MapPermission, PhysPageNum, VirtAddr, KERNEL_SPACE};
use crate::sync::UPSafeCell;
use alloc::{
sync::{Arc, Weak},
vec::Vec,
};
use lazy_static::*;
pub struct RecycleAllocator {
current: usize,
@ -29,20 +32,18 @@ impl RecycleAllocator {
assert!(id < self.current);
assert!(
self.recycled.iter().find(|i| **i == id).is_none(),
"id {} has been deallocated!", id
"id {} has been deallocated!",
id
);
self.recycled.push(id);
}
}
lazy_static! {
static ref PID_ALLOCATOR: UPSafeCell<RecycleAllocator> = unsafe {
UPSafeCell::new(RecycleAllocator::new())
};
static ref KSTACK_ALLOCATOR: UPSafeCell<RecycleAllocator> = unsafe {
UPSafeCell::new(RecycleAllocator::new())
};
static ref PID_ALLOCATOR: UPSafeCell<RecycleAllocator> =
unsafe { UPSafeCell::new(RecycleAllocator::new()) };
static ref KSTACK_ALLOCATOR: UPSafeCell<RecycleAllocator> =
unsafe { UPSafeCell::new(RecycleAllocator::new()) };
}
pub struct PidHandle(pub usize);
@ -69,13 +70,11 @@ pub struct KernelStack(pub usize);
pub fn kstack_alloc() -> KernelStack {
let kstack_id = KSTACK_ALLOCATOR.exclusive_access().alloc();
let (kstack_bottom, kstack_top) = kernel_stack_position(kstack_id);
KERNEL_SPACE
.exclusive_access()
.insert_framed_area(
kstack_bottom.into(),
kstack_top.into(),
MapPermission::R | MapPermission::W,
);
KERNEL_SPACE.exclusive_access().insert_framed_area(
kstack_bottom.into(),
kstack_top.into(),
MapPermission::R | MapPermission::W,
);
KernelStack(kstack_id)
}
@ -91,11 +90,15 @@ impl Drop for KernelStack {
impl KernelStack {
#[allow(unused)]
pub fn push_on_top<T>(&self, value: T) -> *mut T where
T: Sized, {
pub fn push_on_top<T>(&self, value: T) -> *mut T
where
T: Sized,
{
let kernel_stack_top = self.get_top();
let ptr_mut = (kernel_stack_top - core::mem::size_of::<T>()) as *mut T;
unsafe { *ptr_mut = value; }
unsafe {
*ptr_mut = value;
}
ptr_mut
}
pub fn get_top(&self) -> usize {
@ -142,23 +145,19 @@ impl TaskUserRes {
// alloc user stack
let ustack_bottom = ustack_bottom_from_tid(self.ustack_base, self.tid);
let ustack_top = ustack_bottom + USER_STACK_SIZE;
process_inner
.memory_set
.insert_framed_area(
ustack_bottom.into(),
ustack_top.into(),
MapPermission::R | MapPermission::W | MapPermission::U,
);
process_inner.memory_set.insert_framed_area(
ustack_bottom.into(),
ustack_top.into(),
MapPermission::R | MapPermission::W | MapPermission::U,
);
// alloc trap_cx
let trap_cx_bottom = trap_cx_bottom_from_tid(self.tid);
let trap_cx_top = trap_cx_bottom + PAGE_SIZE;
process_inner
.memory_set
.insert_framed_area(
trap_cx_bottom.into(),
trap_cx_top.into(),
MapPermission::R | MapPermission::W,
);
process_inner.memory_set.insert_framed_area(
trap_cx_bottom.into(),
trap_cx_top.into(),
MapPermission::R | MapPermission::W,
);
}
fn dealloc_user_res(&self) {
@ -167,10 +166,14 @@ impl TaskUserRes {
let mut process_inner = process.inner_exclusive_access();
// dealloc ustack manually
let ustack_bottom_va: VirtAddr = ustack_bottom_from_tid(self.ustack_base, self.tid).into();
process_inner.memory_set.remove_area_with_start_vpn(ustack_bottom_va.into());
process_inner
.memory_set
.remove_area_with_start_vpn(ustack_bottom_va.into());
// dealloc trap_cx manually
let trap_cx_bottom_va: VirtAddr = trap_cx_bottom_from_tid(self.tid).into();
process_inner.memory_set.remove_area_with_start_vpn(trap_cx_bottom_va.into());
process_inner
.memory_set
.remove_area_with_start_vpn(trap_cx_bottom_va.into());
}
#[allow(unused)]
@ -197,12 +200,18 @@ impl TaskUserRes {
let process = self.process.upgrade().unwrap();
let process_inner = process.inner_exclusive_access();
let trap_cx_bottom_va: VirtAddr = trap_cx_bottom_from_tid(self.tid).into();
process_inner.memory_set.translate(trap_cx_bottom_va.into()).unwrap().ppn()
process_inner
.memory_set
.translate(trap_cx_bottom_va.into())
.unwrap()
.ppn()
}
pub fn ustack_base(&self) -> usize { self.ustack_base }
pub fn ustack_base(&self) -> usize {
self.ustack_base
}
pub fn ustack_top(&self) -> usize {
ustack_bottom_from_tid(self.ustack_base, self.tid) + USER_STACK_SIZE
ustack_bottom_from_tid(self.ustack_base, self.tid) + USER_STACK_SIZE
}
}
@ -212,4 +221,3 @@ impl Drop for TaskUserRes {
self.dealloc_user_res();
}
}

@ -1,5 +1,5 @@
use crate::sync::UPSafeCell;
use super::TaskControlBlock;
use crate::sync::UPSafeCell;
use alloc::collections::VecDeque;
use alloc::sync::Arc;
use lazy_static::*;
@ -11,7 +11,9 @@ pub struct TaskManager {
/// A simple FIFO scheduler.
impl TaskManager {
pub fn new() -> Self {
Self { ready_queue: VecDeque::new(), }
Self {
ready_queue: VecDeque::new(),
}
}
pub fn add(&mut self, task: Arc<TaskControlBlock>) {
self.ready_queue.push_back(task);
@ -22,9 +24,8 @@ impl TaskManager {
}
lazy_static! {
pub static ref TASK_MANAGER: UPSafeCell<TaskManager> = unsafe {
UPSafeCell::new(TaskManager::new())
};
pub static ref TASK_MANAGER: UPSafeCell<TaskManager> =
unsafe { UPSafeCell::new(TaskManager::new()) };
}
pub fn add_task(task: Arc<TaskControlBlock>) {

@ -1,38 +1,26 @@
mod context;
mod switch;
mod task;
mod manager;
mod processor;
mod id;
mod manager;
mod process;
mod processor;
mod switch;
mod task;
use crate::fs::{open_file, OpenFlags};
use switch::__switch;
use alloc::sync::Arc;
use manager::fetch_task;
use lazy_static::*;
use manager::fetch_task;
use process::ProcessControlBlock;
use switch::__switch;
pub use context::TaskContext;
pub use id::{kstack_alloc, pid_alloc, KernelStack, PidHandle};
pub use manager::add_task;
pub use processor::{
run_tasks,
current_task,
current_process,
current_user_token,
current_trap_cx_user_va,
current_trap_cx,
current_kstack_top,
take_current_task,
schedule,
current_kstack_top, current_process, current_task, current_trap_cx, current_trap_cx_user_va,
current_user_token, run_tasks, schedule, take_current_task,
};
pub use task::{TaskControlBlock, TaskStatus};
pub use manager::add_task;
pub use id::{
PidHandle,
pid_alloc,
KernelStack,
kstack_alloc,
};
pub fn suspend_current_and_run_next() {
// There must be an application running.
@ -86,7 +74,7 @@ pub fn exit_current_and_run_next(exit_code: i32) {
// move all child processes under init process
let mut initproc_inner = INITPROC.inner_exclusive_access();
for child in process_inner.children.iter() {
child.inner_exclusive_access().parent = Some(Arc::downgrade(&INITPROC));
child.inner_exclusive_access().parent = Some(Arc::downgrade(&INITPROC));
initproc_inner.children.push(child.clone());
}
}
@ -103,6 +91,8 @@ pub fn exit_current_and_run_next(exit_code: i32) {
process_inner.children.clear();
// deallocate other data in user space i.e. program code/data section
process_inner.memory_set.recycle_data_pages();
// drop file descriptors
process_inner.fd_table.clear();
}
drop(process);
// we do not have to save task context

@ -1,20 +1,16 @@
use crate::mm::{
MemorySet,
KERNEL_SPACE,
translated_refmut,
};
use crate::trap::{TrapContext, trap_handler};
use crate::sync::{UPSafeCell, Mutex, Semaphore, Condvar};
use core::cell::RefMut;
use super::add_task;
use super::id::RecycleAllocator;
use super::TaskControlBlock;
use super::{PidHandle, pid_alloc};
use super::add_task;
use alloc::sync::{Weak, Arc};
use super::{pid_alloc, PidHandle};
use crate::fs::{File, Stdin, Stdout};
use crate::mm::{translated_refmut, MemorySet, KERNEL_SPACE};
use crate::sync::{Condvar, Mutex, Semaphore, UPSafeCell};
use crate::trap::{trap_handler, TrapContext};
use alloc::string::String;
use alloc::sync::{Arc, Weak};
use alloc::vec;
use alloc::vec::Vec;
use alloc::string::String;
use crate::fs::{File, Stdin, Stdout};
use core::cell::RefMut;
pub struct ProcessControlBlock {
// immutable
@ -34,7 +30,7 @@ pub struct ProcessControlBlockInner {
pub task_res_allocator: RecycleAllocator,
pub mutex_list: Vec<Option<Arc<dyn Mutex>>>,
pub semaphore_list: Vec<Option<Arc<Semaphore>>>,
pub condvar_list: Vec<Option<Arc<Condvar>>>,
pub condvar_list: Vec<Option<Arc<Condvar>>>,
}
impl ProcessControlBlockInner {
@ -44,8 +40,7 @@ impl ProcessControlBlockInner {
}
pub fn alloc_fd(&mut self) -> usize {
if let Some(fd) = (0..self.fd_table.len())
.find(|fd| self.fd_table[*fd].is_none()) {
if let Some(fd) = (0..self.fd_table.len()).find(|fd| self.fd_table[*fd].is_none()) {
fd
} else {
self.fd_table.push(None);
@ -57,7 +52,7 @@ impl ProcessControlBlockInner {
self.task_res_allocator.alloc()
}
pub fn dealloc_tid(&mut self, tid: usize){
pub fn dealloc_tid(&mut self, tid: usize) {
self.task_res_allocator.dealloc(tid)
}
@ -82,26 +77,28 @@ impl ProcessControlBlock {
let pid_handle = pid_alloc();
let process = Arc::new(Self {
pid: pid_handle,
inner: unsafe { UPSafeCell::new(ProcessControlBlockInner {
is_zombie: false,
memory_set,
parent: None,
children: Vec::new(),
exit_code: 0,
fd_table: vec![
// 0 -> stdin
Some(Arc::new(Stdin)),
// 1 -> stdout
Some(Arc::new(Stdout)),
// 2 -> stderr
Some(Arc::new(Stdout)),
],
tasks: Vec::new(),
task_res_allocator: RecycleAllocator::new(),
mutex_list: Vec::new(),
semaphore_list: Vec::new(),
condvar_list: Vec::new(),
})}
inner: unsafe {
UPSafeCell::new(ProcessControlBlockInner {
is_zombie: false,
memory_set,
parent: None,
children: Vec::new(),
exit_code: 0,
fd_table: vec![
// 0 -> stdin
Some(Arc::new(Stdin)),
// 1 -> stdout
Some(Arc::new(Stdout)),
// 2 -> stderr
Some(Arc::new(Stdout)),
],
tasks: Vec::new(),
task_res_allocator: RecycleAllocator::new(),
mutex_list: Vec::new(),
semaphore_list: Vec::new(),
condvar_list: Vec::new(),
})
},
});
// create a main thread, we should allocate ustack and trap_cx here
let task = Arc::new(TaskControlBlock::new(
@ -154,7 +151,7 @@ impl ProcessControlBlock {
.map(|arg| {
translated_refmut(
new_token,
(argv_base + arg * core::mem::size_of::<usize>()) as *mut usize
(argv_base + arg * core::mem::size_of::<usize>()) as *mut usize,
)
})
.collect();
@ -201,29 +198,37 @@ impl ProcessControlBlock {
new_fd_table.push(None);
}
}
// create child process pcb
// create child process pcb
let child = Arc::new(Self {
pid,
inner: unsafe { UPSafeCell::new(ProcessControlBlockInner {
is_zombie: false,
memory_set,
parent: Some(Arc::downgrade(self)),
children: Vec::new(),
exit_code: 0,
fd_table: new_fd_table,
tasks: Vec::new(),
task_res_allocator: RecycleAllocator::new(),
mutex_list: Vec::new(),
semaphore_list: Vec::new(),
condvar_list: Vec::new(),
})}
inner: unsafe {
UPSafeCell::new(ProcessControlBlockInner {
is_zombie: false,
memory_set,
parent: Some(Arc::downgrade(self)),
children: Vec::new(),
exit_code: 0,
fd_table: new_fd_table,
tasks: Vec::new(),
task_res_allocator: RecycleAllocator::new(),
mutex_list: Vec::new(),
semaphore_list: Vec::new(),
condvar_list: Vec::new(),
})
},
});
// add child
parent.children.push(Arc::clone(&child));
// create main thread of child process
let task = Arc::new(TaskControlBlock::new(
Arc::clone(&child),
parent.get_task(0).inner_exclusive_access().res.as_ref().unwrap().ustack_base(),
parent
.get_task(0)
.inner_exclusive_access()
.res
.as_ref()
.unwrap()
.ustack_base(),
// here we do not allocate trap_cx or ustack again
// but mention that we allocate a new kstack here
false,
@ -246,4 +251,3 @@ impl ProcessControlBlock {
self.pid.0
}
}

@ -1,10 +1,10 @@
use super::{TaskContext, TaskControlBlock, ProcessControlBlock};
use alloc::sync::Arc;
use lazy_static::*;
use super::{fetch_task, TaskStatus};
use super::__switch;
use crate::trap::TrapContext;
use super::{fetch_task, TaskStatus};
use super::{ProcessControlBlock, TaskContext, TaskControlBlock};
use crate::sync::UPSafeCell;
use crate::trap::TrapContext;
use alloc::sync::Arc;
use lazy_static::*;
pub struct Processor {
current: Option<Arc<TaskControlBlock>>,
@ -30,9 +30,7 @@ impl Processor {
}
lazy_static! {
pub static ref PROCESSOR: UPSafeCell<Processor> = unsafe {
UPSafeCell::new(Processor::new())
};
pub static ref PROCESSOR: UPSafeCell<Processor> = unsafe { UPSafeCell::new(Processor::new()) };
}
pub fn run_tasks() {
@ -50,13 +48,10 @@ pub fn run_tasks() {
// release processor manually
drop(processor);
unsafe {
__switch(
idle_task_cx_ptr,
next_task_cx_ptr,
);
__switch(idle_task_cx_ptr, next_task_cx_ptr);
}
} else {
println!("no tasks available in run_tasks");
println!("no tasks available in run_tasks");
}
}
}
@ -80,7 +75,10 @@ pub fn current_user_token() -> usize {
}
pub fn current_trap_cx() -> &'static mut TrapContext {
current_task().unwrap().inner_exclusive_access().get_trap_cx()
current_task()
.unwrap()
.inner_exclusive_access()
.get_trap_cx()
}
pub fn current_trap_cx_user_va() -> usize {
@ -94,10 +92,7 @@ pub fn current_trap_cx_user_va() -> usize {
}
pub fn current_kstack_top() -> usize {
current_task()
.unwrap()
.kstack
.get_top()
current_task().unwrap().kstack.get_top()
}
pub fn schedule(switched_task_cx_ptr: *mut TaskContext) {
@ -105,9 +100,6 @@ pub fn schedule(switched_task_cx_ptr: *mut TaskContext) {
let idle_task_cx_ptr = processor.get_idle_task_cx_ptr();
drop(processor);
unsafe {
__switch(
switched_task_cx_ptr,
idle_task_cx_ptr,
);
__switch(switched_task_cx_ptr, idle_task_cx_ptr);
}
}

@ -4,8 +4,5 @@ use core::arch::global_asm;
global_asm!(include_str!("switch.S"));
extern "C" {
pub fn __switch(
current_task_cx_ptr: *mut TaskContext,
next_task_cx_ptr: *const TaskContext
);
pub fn __switch(current_task_cx_ptr: *mut TaskContext, next_task_cx_ptr: *const TaskContext);
}

@ -1,8 +1,8 @@
use alloc::sync::{Arc, Weak};
use crate::{mm::PhysPageNum, sync::UPSafeCell};
use crate::trap::TrapContext;
use super::id::TaskUserRes;
use super::{KernelStack, ProcessControlBlock, TaskContext, kstack_alloc};
use super::{kstack_alloc, KernelStack, ProcessControlBlock, TaskContext};
use crate::trap::TrapContext;
use crate::{mm::PhysPageNum, sync::UPSafeCell};
use alloc::sync::{Arc, Weak};
use core::cell::RefMut;
pub struct TaskControlBlock {
@ -37,7 +37,7 @@ impl TaskControlBlockInner {
pub fn get_trap_cx(&self) -> &'static mut TrapContext {
self.trap_cx_ppn.get_mut()
}
#[allow(unused)]
fn get_status(&self) -> TaskStatus {
self.task_status
@ -48,7 +48,7 @@ impl TaskControlBlock {
pub fn new(
process: Arc<ProcessControlBlock>,
ustack_base: usize,
alloc_user_res: bool
alloc_user_res: bool,
) -> Self {
let res = TaskUserRes::new(Arc::clone(&process), ustack_base, alloc_user_res);
let trap_cx_ppn = res.trap_cx_ppn();
@ -57,15 +57,15 @@ impl TaskControlBlock {
Self {
process: Arc::downgrade(&process),
kstack,
inner: unsafe { UPSafeCell::new(
TaskControlBlockInner {
inner: unsafe {
UPSafeCell::new(TaskControlBlockInner {
res: Some(res),
trap_cx_ppn,
task_cx: TaskContext::goto_trap_return(kstack_top),
task_status: TaskStatus::Ready,
exit_code: None,
}
)},
})
},
}
}
}

@ -1,13 +1,13 @@
use core::cmp::Ordering;
use riscv::register::time;
use crate::sbi::set_timer;
use crate::config::CLOCK_FREQ;
use crate::task::{TaskControlBlock, add_task};
use crate::sbi::set_timer;
use crate::sync::UPSafeCell;
use crate::task::{add_task, TaskControlBlock};
use alloc::collections::BinaryHeap;
use alloc::sync::Arc;
use lazy_static::*;
use riscv::register::time;
const TICKS_PER_SEC: usize = 100;
const MSEC_PER_SEC: usize = 1000;
@ -39,28 +39,24 @@ impl PartialOrd for TimerCondVar {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
let a = -(self.expire_ms as isize);
let b = -(other.expire_ms as isize);
Some(a.cmp(&b))
Some(a.cmp(&b))
}
}
impl Ord for TimerCondVar {
fn cmp(&self, other: &Self) -> Ordering {
self.partial_cmp(other).unwrap()
self.partial_cmp(other).unwrap()
}
}
lazy_static! {
static ref TIMERS: UPSafeCell<BinaryHeap<TimerCondVar>> = unsafe { UPSafeCell::new(
BinaryHeap::<TimerCondVar>::new()
)};
static ref TIMERS: UPSafeCell<BinaryHeap<TimerCondVar>> =
unsafe { UPSafeCell::new(BinaryHeap::<TimerCondVar>::new()) };
}
pub fn add_timer(expire_ms: usize, task: Arc<TaskControlBlock>) {
let mut timers = TIMERS.exclusive_access();
timers.push(TimerCondVar {
expire_ms,
task,
});
timers.push(TimerCondVar { expire_ms, task });
}
pub fn check_timer() {
@ -68,9 +64,11 @@ pub fn check_timer() {
let mut timers = TIMERS.exclusive_access();
while let Some(timer) = timers.peek() {
if timer.expire_ms <= current_ms {
add_task(Arc::clone(&timer.task));
add_task(Arc::clone(&timer.task));
drop(timer);
timers.pop();
} else { break; }
} else {
break;
}
}
}

@ -1,4 +1,4 @@
use riscv::register::sstatus::{Sstatus, self, SPP};
use riscv::register::sstatus::{self, Sstatus, SPP};
#[repr(C)]
#[derive(Debug)]
@ -12,7 +12,9 @@ pub struct TrapContext {
}
impl TrapContext {
pub fn set_sp(&mut self, sp: usize) { self.x[2] = sp; }
pub fn set_sp(&mut self, sp: usize) {
self.x[2] = sp;
}
pub fn app_init_context(
entry: usize,
sp: usize,

@ -1,28 +1,18 @@
mod context;
use riscv::register::{
mtvec::TrapMode,
stvec,
scause::{
self,
Trap,
Exception,
Interrupt,
},
stval,
sie,
};
use crate::config::TRAMPOLINE;
use crate::syscall::syscall;
use crate::task::{
exit_current_and_run_next,
current_trap_cx, current_trap_cx_user_va, current_user_token, exit_current_and_run_next,
suspend_current_and_run_next,
current_user_token,
current_trap_cx,
current_trap_cx_user_va,
};
use crate::timer::{set_next_trigger, check_timer};
use crate::config::TRAMPOLINE;
use core::arch::{global_asm, asm};
use crate::timer::{check_timer, set_next_trigger};
use core::arch::{asm, global_asm};
use riscv::register::{
mtvec::TrapMode,
scause::{self, Exception, Interrupt, Trap},
sie, stval, stvec,
};
global_asm!(include_str!("trap.S"));
@ -43,7 +33,9 @@ fn set_user_trap_entry() {
}
pub fn enable_timer_interrupt() {
unsafe { sie::set_stimer(); }
unsafe {
sie::set_stimer();
}
}
#[no_mangle]
@ -62,12 +54,12 @@ pub fn trap_handler() -> ! {
cx = current_trap_cx();
cx.x[10] = result as usize;
}
Trap::Exception(Exception::StoreFault) |
Trap::Exception(Exception::StorePageFault) |
Trap::Exception(Exception::InstructionFault) |
Trap::Exception(Exception::InstructionPageFault) |
Trap::Exception(Exception::LoadFault) |
Trap::Exception(Exception::LoadPageFault) => {
Trap::Exception(Exception::StoreFault)
| Trap::Exception(Exception::StorePageFault)
| Trap::Exception(Exception::InstructionFault)
| Trap::Exception(Exception::InstructionPageFault)
| Trap::Exception(Exception::LoadFault)
| Trap::Exception(Exception::LoadPageFault) => {
println!(
"[kernel] {:?} in application, bad addr = {:#x}, bad instruction = {:#x}, kernel killed it.",
scause.cause(),
@ -88,7 +80,11 @@ pub fn trap_handler() -> ! {
suspend_current_and_run_next();
}
_ => {
panic!("Unsupported trap {:?}, stval = {:#x}!", scause.cause(), stval);
panic!(
"Unsupported trap {:?}, stval = {:#x}!",
scause.cause(),
stval
);
}
}
trap_return();

@ -5,12 +5,7 @@
extern crate user_lib;
extern crate alloc;
use user_lib::{
open,
OpenFlags,
close,
read,
};
use user_lib::{close, open, read, OpenFlags};
#[no_mangle]
pub fn main(argc: usize, argv: &[&str]) -> i32 {
@ -23,7 +18,9 @@ pub fn main(argc: usize, argv: &[&str]) -> i32 {
let mut buf = [0u8; 16];
loop {
let size = read(fd, &mut buf) as usize;
if size == 0 { break; }
if size == 0 {
break;
}
println!("{}", core::str::from_utf8(&buf[..size]).unwrap());
}
close(fd);

@ -3,7 +3,7 @@
#[macro_use]
extern crate user_lib;
use user_lib::{fork, yield_, waitpid, exit, wait};
use user_lib::{exit, fork, wait, waitpid, yield_};
const MAGIC: i32 = -0x10384;
@ -13,7 +13,9 @@ pub fn main() -> i32 {
let pid = fork();
if pid == 0 {
println!("I am the child.");
for _ in 0..7 { yield_(); }
for _ in 0..7 {
yield_();
}
exit(MAGIC);
} else {
println!("I am parent, fork a child pid {}", pid);
@ -26,4 +28,3 @@ pub fn main() -> i32 {
println!("exit pass.");
0
}

@ -41,4 +41,4 @@ pub fn main() -> i32 {
println!("{}", color_text!(text, i));
}
0
}
}

@ -4,13 +4,7 @@
#[macro_use]
extern crate user_lib;
use user_lib::{
open,
close,
read,
write,
OpenFlags,
};
use user_lib::{close, open, read, write, OpenFlags};
#[no_mangle]
pub fn main() -> i32 {
@ -29,10 +23,7 @@ pub fn main() -> i32 {
let read_len = read(fd, &mut buffer) as usize;
close(fd);
assert_eq!(
test_str,
core::str::from_utf8(&buffer[..read_len]).unwrap(),
);
assert_eq!(test_str, core::str::from_utf8(&buffer[..read_len]).unwrap(),);
println!("file_test passed!");
0
}
}

@ -4,7 +4,7 @@
#[macro_use]
extern crate user_lib;
use user_lib::{fork, wait, exit};
use user_lib::{exit, fork, wait};
const MAX_CHILD: usize = 30;

@ -4,7 +4,7 @@
#[macro_use]
extern crate user_lib;
use user_lib::{fork, wait, getpid, exit, sleep, get_time};
use user_lib::{exit, fork, get_time, getpid, sleep, wait};
static NUM: usize = 30;
@ -14,7 +14,8 @@ pub fn main() -> i32 {
let pid = fork();
if pid == 0 {
let current_time = get_time();
let sleep_length = (current_time as i32 as isize) * (current_time as i32 as isize) % 1000 + 1000;
let sleep_length =
(current_time as i32 as isize) * (current_time as i32 as isize) % 1000 + 1000;
println!("pid {} sleep for {} ms", getpid(), sleep_length);
sleep(sleep_length as usize);
println!("pid {} OK!", getpid());
@ -30,4 +31,4 @@ pub fn main() -> i32 {
assert!(wait(&mut exit_code) < 0);
println!("forktest2 test passed!");
0
}
}

@ -25,4 +25,4 @@ pub fn main() -> i32 {
println!("child process pid = {}, exit code = {}", pid, exit_code);
0
}
}
}

@ -4,7 +4,7 @@
#[macro_use]
extern crate user_lib;
use user_lib::{sleep, getpid, fork, exit, yield_};
use user_lib::{exit, fork, getpid, sleep, yield_};
const DEPTH: usize = 4;

@ -8,4 +8,4 @@ extern crate user_lib;
pub fn main() -> i32 {
println!("Hello world from user mode program!");
0
}
}

@ -4,13 +4,7 @@
#[macro_use]
extern crate user_lib;
use user_lib::{
OpenFlags,
open,
close,
write,
get_time,
};
use user_lib::{close, get_time, open, write, OpenFlags};
#[no_mangle]
pub fn main() -> i32 {
@ -25,12 +19,15 @@ pub fn main() -> i32 {
let f = f as usize;
let start = get_time();
let size_mb = 1usize;
for _ in 0..1024*size_mb {
for _ in 0..1024 * size_mb {
write(f, &buffer);
}
close(f);
let time_ms = (get_time() - start) as usize;
let speed_kbs = size_mb * 1000000 / time_ms;
println!("{}MiB written, time cost = {}ms, write speed = {}KiB/s", size_mb, time_ms, speed_kbs);
println!(
"{}MiB written, time cost = {}ms, write speed = {}KiB/s",
size_mb, time_ms, speed_kbs
);
0
}

@ -3,12 +3,7 @@
extern crate user_lib;
use user_lib::{
fork,
wait,
exec,
yield_,
};
use user_lib::{exec, fork, wait, yield_};
#[no_mangle]
fn main() -> i32 {

@ -5,7 +5,7 @@
#[macro_use]
extern crate user_lib;
use user_lib::{fork, wait, yield_, exit, getpid, get_time};
use user_lib::{exit, fork, get_time, getpid, wait, yield_};
static NUM: usize = 30;
const N: usize = 10;

@ -7,10 +7,10 @@ extern crate user_lib;
extern crate alloc;
use user_lib::{semaphore_create, semaphore_up, semaphore_down};
use user_lib::{thread_create, waittid};
use user_lib::exit;
use alloc::vec::Vec;
use user_lib::exit;
use user_lib::{semaphore_create, semaphore_down, semaphore_up};
use user_lib::{thread_create, waittid};
const SEM_MUTEX: usize = 0;
const SEM_EMPTY: usize = 1;
@ -58,7 +58,10 @@ pub fn main() -> i32 {
let ids: Vec<_> = (0..PRODUCER_COUNT).collect();
let mut threads = Vec::new();
for i in 0..PRODUCER_COUNT {
threads.push(thread_create(producer as usize, &ids.as_slice()[i] as *const _ as usize));
threads.push(thread_create(
producer as usize,
&ids.as_slice()[i] as *const _ as usize,
));
}
threads.push(thread_create(consumer as usize, 0));
// wait for all threads to complete

@ -6,10 +6,10 @@
extern crate user_lib;
extern crate alloc;
use alloc::vec::Vec;
use user_lib::{exit, get_time, sleep};
use user_lib::{mutex_blocking_create, mutex_lock, mutex_unlock};
use user_lib::{thread_create, waittid};
use user_lib::{sleep, exit, get_time};
use alloc::vec::Vec;
const N: usize = 5;
const ROUND: usize = 4;
@ -39,16 +39,24 @@ fn philosopher_dining_problem(id: *const usize) {
let max = left + right - min;
for round in 0..ROUND {
// thinking
unsafe { THINK[id][2 * round] = get_time_u(); }
unsafe {
THINK[id][2 * round] = get_time_u();
}
sleep(ARR[id][2 * round]);
unsafe { THINK[id][2 * round + 1] = get_time_u(); }
unsafe {
THINK[id][2 * round + 1] = get_time_u();
}
// wait for forks
mutex_lock(min);
mutex_lock(max);
// eating
unsafe { EAT[id][2 * round] = get_time_u(); }
unsafe {
EAT[id][2 * round] = get_time_u();
}
sleep(ARR[id][2 * round + 1]);
unsafe { EAT[id][2 * round + 1] = get_time_u(); }
unsafe {
EAT[id][2 * round + 1] = get_time_u();
}
mutex_unlock(max);
mutex_unlock(min);
}
@ -62,7 +70,10 @@ pub fn main() -> i32 {
let start = get_time_u();
for i in 0..N {
assert_eq!(mutex_blocking_create(), i as isize);
v.push(thread_create(philosopher_dining_problem as usize, &ids.as_slice()[i] as *const _ as usize));
v.push(thread_create(
philosopher_dining_problem as usize,
&ids.as_slice()[i] as *const _ as usize,
));
}
for tid in v.iter() {
waittid(*tid as usize);
@ -71,8 +82,8 @@ pub fn main() -> i32 {
println!("time cost = {}", time_cost);
println!("'-' -> THINKING; 'x' -> EATING; ' ' -> WAITING ");
for id in (0..N).into_iter().chain(0..=0) {
print!("#{}:", id);
for j in 0..time_cost/GRAPH_SCALE {
print!("#{}:", id);
for j in 0..time_cost / GRAPH_SCALE {
let current_time = j * GRAPH_SCALE + start;
if (0..ROUND).any(|round| unsafe {
let start_thinking = THINK[id][2 * round];

@ -6,8 +6,8 @@ extern crate user_lib;
extern crate alloc;
use user_lib::{fork, close, pipe, read, write, wait, get_time};
use alloc::format;
use user_lib::{close, fork, get_time, pipe, read, wait, write};
const LENGTH: usize = 3000;
#[no_mangle]
@ -44,7 +44,10 @@ pub fn main() -> i32 {
*ch = get_time() as u8;
}
// send it
assert_eq!(write(down_pipe_fd[1], &random_str) as usize, random_str.len());
assert_eq!(
write(down_pipe_fd[1], &random_str) as usize,
random_str.len()
);
// close write end of down pipe
close(down_pipe_fd[1]);
// calculate sum(parent)
@ -57,9 +60,8 @@ pub fn main() -> i32 {
// check
assert_eq!(
sum,
str::parse::<usize>(
core::str::from_utf8(&child_result[..result_len]).unwrap()
).unwrap()
str::parse::<usize>(core::str::from_utf8(&child_result[..result_len]).unwrap())
.unwrap()
);
let mut _unused: i32 = 0;
wait(&mut _unused);

@ -4,7 +4,7 @@
#[macro_use]
extern crate user_lib;
use user_lib::{fork, close, pipe, read, write, wait};
use user_lib::{close, fork, pipe, read, wait, write};
static STR: &str = "Hello, world!";
@ -41,4 +41,4 @@ pub fn main() -> i32 {
println!("pipetest passed!");
0
}
}
}

@ -5,8 +5,8 @@
extern crate user_lib;
extern crate alloc;
use user_lib::{exit, thread_create, waittid, get_time};
use alloc::vec::Vec;
use user_lib::{exit, get_time, thread_create, waittid};
static mut A: usize = 0;
const PER_THREAD: usize = 1000;
@ -17,7 +17,9 @@ unsafe fn f() -> ! {
for _ in 0..PER_THREAD {
let a = &mut A as *mut usize;
let cur = a.read_volatile();
for _ in 0..500 { t = t * t % 10007; }
for _ in 0..500 {
t = t * t % 10007;
}
a.write_volatile(cur + 1);
}
exit(t as i32)
@ -26,7 +28,7 @@ unsafe fn f() -> ! {
#[no_mangle]
pub fn main() -> i32 {
let start = get_time();
let mut v = Vec::new();
let mut v = Vec::new();
for _ in 0..THREAD_COUNT {
v.push(thread_create(f as usize, 0) as usize);
}

@ -5,16 +5,15 @@
extern crate user_lib;
extern crate alloc;
use alloc::vec::Vec;
use crate::alloc::string::ToString;
use alloc::vec::Vec;
use user_lib::{exit, get_time, thread_create, waittid};
static mut A: usize = 0;
const PER_THREAD: usize = 1000;
const THREAD_COUNT: usize = 16;
unsafe fn f(count:usize) -> ! {
unsafe fn f(count: usize) -> ! {
let mut t = 2usize;
for _ in 0..PER_THREAD {
let a = &mut A as *mut usize;

@ -5,9 +5,9 @@
extern crate user_lib;
extern crate alloc;
use user_lib::{exit, thread_create, waittid, get_time, yield_};
use alloc::vec::Vec;
use core::sync::atomic::{AtomicBool, Ordering};
use user_lib::{exit, get_time, thread_create, waittid, yield_};
static mut A: usize = 0;
static OCCUPIED: AtomicBool = AtomicBool::new(false);
@ -17,12 +17,17 @@ const THREAD_COUNT: usize = 16;
unsafe fn f() -> ! {
let mut t = 2usize;
for _ in 0..PER_THREAD {
while OCCUPIED.compare_exchange(false, true, Ordering::Relaxed, Ordering::Relaxed).is_err() {
while OCCUPIED
.compare_exchange(false, true, Ordering::Relaxed, Ordering::Relaxed)
.is_err()
{
yield_();
}
let a = &mut A as *mut usize;
let cur = a.read_volatile();
for _ in 0..500 { t = t * t % 10007; }
for _ in 0..500 {
t = t * t % 10007;
}
a.write_volatile(cur + 1);
OCCUPIED.store(false, Ordering::Relaxed);
}
@ -32,7 +37,7 @@ unsafe fn f() -> ! {
#[no_mangle]
pub fn main() -> i32 {
let start = get_time();
let mut v = Vec::new();
let mut v = Vec::new();
for _ in 0..THREAD_COUNT {
v.push(thread_create(f as usize, 0) as usize);
}

@ -5,8 +5,8 @@
extern crate user_lib;
extern crate alloc;
use user_lib::{exit, thread_create, waittid, get_time, yield_};
use alloc::vec::Vec;
use user_lib::{exit, get_time, thread_create, waittid, yield_};
static mut A: usize = 0;
static mut OCCUPIED: bool = false;
@ -16,12 +16,16 @@ const THREAD_COUNT: usize = 16;
unsafe fn f() -> ! {
let mut t = 2usize;
for _ in 0..PER_THREAD {
while OCCUPIED { yield_(); }
while OCCUPIED {
yield_();
}
OCCUPIED = true;
// enter critical section
let a = &mut A as *mut usize;
let cur = a.read_volatile();
for _ in 0..500 { t = t * t % 10007; }
for _ in 0..500 {
t = t * t % 10007;
}
a.write_volatile(cur + 1);
// exit critical section
OCCUPIED = false;
@ -33,7 +37,7 @@ unsafe fn f() -> ! {
#[no_mangle]
pub fn main() -> i32 {
let start = get_time();
let mut v = Vec::new();
let mut v = Vec::new();
for _ in 0..THREAD_COUNT {
v.push(thread_create(f as usize, 0) as usize);
}

@ -5,9 +5,9 @@
extern crate user_lib;
extern crate alloc;
use user_lib::{exit, thread_create, waittid, get_time};
use user_lib::{mutex_blocking_create, mutex_lock, mutex_unlock};
use alloc::vec::Vec;
use user_lib::{exit, get_time, thread_create, waittid};
use user_lib::{mutex_blocking_create, mutex_lock, mutex_unlock};
static mut A: usize = 0;
const PER_THREAD: usize = 1000;
@ -19,7 +19,9 @@ unsafe fn f() -> ! {
mutex_lock(0);
let a = &mut A as *mut usize;
let cur = a.read_volatile();
for _ in 0..500 { t = t * t % 10007; }
for _ in 0..500 {
t = t * t % 10007;
}
a.write_volatile(cur + 1);
mutex_unlock(0);
}
@ -30,7 +32,7 @@ unsafe fn f() -> ! {
pub fn main() -> i32 {
let start = get_time();
assert_eq!(mutex_blocking_create(), 0);
let mut v = Vec::new();
let mut v = Vec::new();
for _ in 0..THREAD_COUNT {
v.push(thread_create(f as usize, 0) as usize);
}

@ -5,9 +5,9 @@
extern crate user_lib;
extern crate alloc;
use user_lib::{exit, thread_create, waittid, get_time};
use user_lib::{mutex_create, mutex_lock, mutex_unlock};
use alloc::vec::Vec;
use user_lib::{exit, get_time, thread_create, waittid};
use user_lib::{mutex_create, mutex_lock, mutex_unlock};
static mut A: usize = 0;
const PER_THREAD: usize = 1000;
@ -19,7 +19,9 @@ unsafe fn f() -> ! {
mutex_lock(0);
let a = &mut A as *mut usize;
let cur = a.read_volatile();
for _ in 0..500 { t = t * t % 10007; }
for _ in 0..500 {
t = t * t % 10007;
}
a.write_volatile(cur + 1);
mutex_unlock(0);
}
@ -30,7 +32,7 @@ unsafe fn f() -> ! {
pub fn main() -> i32 {
let start = get_time();
assert_eq!(mutex_create(), 0);
let mut v = Vec::new();
let mut v = Vec::new();
for _ in 0..THREAD_COUNT {
v.push(thread_create(f as usize, 0) as usize);
}

@ -4,7 +4,7 @@
#[macro_use]
extern crate user_lib;
use user_lib::{fork, exec, wait};
use user_lib::{exec, fork, wait};
#[no_mangle]
pub fn main() -> i32 {

@ -4,10 +4,10 @@
#[macro_use]
extern crate user_lib;
use user_lib::{sleep, exit, get_time, fork, waitpid};
use user_lib::{exit, fork, get_time, sleep, waitpid};
fn sleepy() {
let time: usize = 1000;
let time: usize = 100;
for i in 0..5 {
sleep(time);
println!("sleep {} x {} msecs.", i + 1, time);
@ -27,4 +27,4 @@ pub fn main() -> i32 {
println!("use {} msecs.", get_time() - current_time);
println!("sleep pass.");
0
}
}

@ -13,7 +13,11 @@ pub fn main() -> i32 {
println!("current time_msec = {}", start);
sleep(100);
let end = get_time();
println!("time_msec = {} after sleeping 100 ticks, delta = {}ms!", end, end - start);
println!(
"time_msec = {} after sleeping 100 ticks, delta = {}ms!",
end,
end - start
);
println!("r_sleep passed!");
0
}
}

@ -5,7 +5,7 @@
extern crate user_lib;
fn f(d: usize) {
println!("d = {}",d);
println!("d = {}", d);
f(d + 1);
}
@ -14,4 +14,4 @@ pub fn main() -> i32 {
println!("It should trigger segmentation fault!");
f(0);
0
}
}

@ -6,14 +6,13 @@ extern crate user_lib;
extern crate alloc;
use user_lib::{semaphore_create, semaphore_up, semaphore_down};
use user_lib::{thread_create, waittid, sleep};
use user_lib::exit;
use alloc::vec;
use user_lib::exit;
use user_lib::{semaphore_create, semaphore_down, semaphore_up};
use user_lib::{sleep, thread_create, waittid};
const SEM_SYNC: usize = 0;
unsafe fn first() -> ! {
sleep(10);
println!("First work and wakeup Second");

@ -6,10 +6,12 @@ extern crate user_lib;
extern crate alloc;
use user_lib::{condvar_create, condvar_signal, condvar_wait, mutex_blocking_create, mutex_lock, mutex_unlock};
use user_lib::{thread_create, waittid, sleep};
use user_lib::exit;
use alloc::vec;
use user_lib::exit;
use user_lib::{
condvar_create, condvar_signal, condvar_wait, mutex_blocking_create, mutex_lock, mutex_unlock,
};
use user_lib::{sleep, thread_create, waittid};
static mut A: usize = 0;
@ -20,7 +22,7 @@ unsafe fn first() -> ! {
sleep(10);
println!("First work, Change A --> 1 and wakeup Second");
mutex_lock(MUTEX_ID);
A=1;
A = 1;
condvar_signal(CONDVAR_ID);
mutex_unlock(MUTEX_ID);
exit(0)
@ -29,7 +31,7 @@ unsafe fn first() -> ! {
unsafe fn second() -> ! {
println!("Second want to continue,but need to wait A=1");
mutex_lock(MUTEX_ID);
while A==0 {
while A == 0 {
println!("Second: A is {}", A);
condvar_wait(CONDVAR_ID, MUTEX_ID);
}

@ -5,21 +5,27 @@
extern crate user_lib;
extern crate alloc;
use user_lib::{thread_create, waittid, exit};
use alloc::vec;
use user_lib::{exit, thread_create, waittid};
pub fn thread_a() -> ! {
for _ in 0..1000 { print!("a"); }
for _ in 0..1000 {
print!("a");
}
exit(1)
}
pub fn thread_b() -> ! {
for _ in 0..1000 { print!("b"); }
exit(2)
for _ in 0..1000 {
print!("b");
}
exit(2)
}
pub fn thread_c() -> ! {
for _ in 0..1000 { print!("c"); }
for _ in 0..1000 {
print!("c");
}
exit(3)
}

@ -5,8 +5,8 @@
extern crate user_lib;
extern crate alloc;
use user_lib::{thread_create, waittid, exit};
use alloc::vec::Vec;
use user_lib::{exit, thread_create, waittid};
struct Argument {
pub ch: char,
@ -15,7 +15,9 @@ struct Argument {
fn thread_print(arg: *const Argument) -> ! {
let arg = unsafe { &*arg };
for _ in 0..1000 { print!("{}", arg.ch); }
for _ in 0..1000 {
print!("{}", arg.ch);
}
exit(arg.rc)
}
@ -23,12 +25,15 @@ fn thread_print(arg: *const Argument) -> ! {
pub fn main() -> i32 {
let mut v = Vec::new();
let args = [
Argument { ch: 'a', rc: 1, },
Argument { ch: 'b', rc: 2, },
Argument { ch: 'c', rc: 3, },
];
Argument { ch: 'a', rc: 1 },
Argument { ch: 'b', rc: 2 },
Argument { ch: 'c', rc: 3 },
];
for arg in args.iter() {
v.push(thread_create(thread_print as usize, arg as *const _ as usize));
v.push(thread_create(
thread_print as usize,
arg as *const _ as usize,
));
}
for tid in v.iter() {
let exit_code = waittid(*tid as usize);

@ -1,4 +1,3 @@
#![no_std]
#![no_main]
#![allow(clippy::println_empty_string)]
@ -213,4 +212,3 @@ pub fn main() -> i32 {
}
}
}

@ -32,7 +32,10 @@ pub fn main() -> i32 {
let mut exit_code: i32 = Default::default();
let wait_pid = waitpid(pid as usize, &mut exit_code);
assert_eq!(pid, wait_pid);
println!("\x1b[32mUsertests: Test {} in Process {} exited with code {}\x1b[0m", test, pid, exit_code);
println!(
"\x1b[32mUsertests: Test {} in Process {} exited with code {}\x1b[0m",
test, pid, exit_code
);
}
}
println!("Usertests passed!");

@ -14,4 +14,4 @@ pub fn main() -> i32 {
}
println!("yield pass.");
0
}
}

@ -4,9 +4,14 @@ use super::exit;
fn panic_handler(panic_info: &core::panic::PanicInfo) -> ! {
let err = panic_info.message().unwrap();
if let Some(location) = panic_info.location() {
println!("Panicked at {}:{}, {}", location.file(), location.line(), err);
println!(
"Panicked at {}:{}, {}",
location.file(),
location.line(),
err
);
} else {
println!("Panicked: {}", err);
}
exit(-1);
}
}

@ -58,7 +58,10 @@ pub fn sys_pipe(pipe: &mut [usize]) -> isize {
}
pub fn sys_read(fd: usize, buffer: &mut [u8]) -> isize {
syscall(SYSCALL_READ, [fd, buffer.as_mut_ptr() as usize, buffer.len()])
syscall(
SYSCALL_READ,
[fd, buffer.as_mut_ptr() as usize, buffer.len()],
)
}
pub fn sys_write(fd: usize, buffer: &[u8]) -> isize {
@ -91,7 +94,10 @@ pub fn sys_fork() -> isize {
}
pub fn sys_exec(path: &str, args: &[*const u8]) -> isize {
syscall(SYSCALL_EXEC, [path.as_ptr() as usize, args.as_ptr() as usize, 0])
syscall(
SYSCALL_EXEC,
[path.as_ptr() as usize, args.as_ptr() as usize, 0],
)
}
pub fn sys_waitpid(pid: isize, exit_code: *mut i32) -> isize {
@ -142,6 +148,6 @@ pub fn sys_condvar_signal(condvar_id: usize) -> isize {
syscall(SYSCALL_CONDVAR_SIGNAL, [condvar_id, 0, 0])
}
pub fn sys_condvar_wait(condvar_id: usize, mutex_id:usize) -> isize {
pub fn sys_condvar_wait(condvar_id: usize, mutex_id: usize) -> isize {
syscall(SYSCALL_CONDVAR_WAIT, [condvar_id, mutex_id, 0])
}

Loading…
Cancel
Save