Merge branch 'master' into arch-aarch64

master
equation314 6 years ago
commit aa5bd3041b

@ -8,10 +8,12 @@ Going to be the next generation teaching operating system.
Supported architectures: x86_64, RISCV32IMA(S/M), AArch64
Tested boards: QEMU, Raspberry Pi 3B+
Tested boards: QEMU, labeled-RISCV, Raspberry Pi 3B+
[Dev docs](https://rucore.gitbook.io/rust-os-docs/) (in Chinese)
![demo](./docs/2_OSLab/os2atc/demo.png)
## Summary
This is a project of THU courses:

@ -1,416 +0,0 @@
//! memory set, area
//! and the inactive page table
use alloc::vec::Vec;
use core::fmt::{Debug, Error, Formatter};
use super::*;
use crate::paging::*;
/// an inactive page table
/// Note: InactivePageTable is not a PageTable
/// but it can be activated and "become" a PageTable
/// Why this trait is in this file?(seems should in paging/mod.rs)
pub trait InactivePageTable {
/// the active version of page table
type Active: PageTable;
/*
** @brief create a inactive page table with kernel memory mapped
** @retval InactivePageTable the created inactive page table
*/
fn new() -> Self;
/*
** @brief create an inactive page table without kernel memory mapped
** @retval InactivePageTable the created inactive page table
*/
fn new_bare() -> Self;
/*
** @brief temporarily active the page table and edit it
** @retval impl FnOnce(&mut Self::Active)
** the function of the editing action,
** which takes a temporarily activated page table as param
** @retval none
*/
fn edit(&mut self, f: impl FnOnce(&mut Self::Active));
/*
** @brief activate the inactive page table
** @retval none
*/
unsafe fn activate(&self);
/*
** @brief execute function with this inactive page table
** @param f: impl FnOnce() the function to be executed
** @retval none
*/
unsafe fn with<T>(&self, f: impl FnOnce() -> T) -> T;
/*
** @brief get the token of the inactive page table
** @retval usize the token of the inactive page table
*/
fn token(&self) -> usize;
/// Why the methods below are in this trait?
/*
** @brief allocate a frame for use
** @retval Option<PhysAddr> the physics address of the beginning of allocated frame, if present
*/
fn alloc_frame() -> Option<PhysAddr>;
/*
** @brief deallocate a frame for use
** @param PhysAddr the physics address of the beginning of frame to be deallocated
** @retval none
*/
fn dealloc_frame(target: PhysAddr);
}
/// a continuous memory space when the same attribute
/// like `vma_struct` in ucore
#[derive(Debug, Eq, PartialEq, Copy, Clone)]
pub struct MemoryArea {
start_addr: VirtAddr,
end_addr: VirtAddr,
phys_start_addr: Option<PhysAddr>,
flags: MemoryAttr,
name: &'static str,
}
impl MemoryArea {
/*
** @brief create a memory area from virtual address
** @param start_addr: VirtAddr the virtual address of beginning of the area
** @param end_addr: VirtAddr the virtual address of end of the area
** @param flags: MemoryAttr the common memory attribute of the memory area
** @param name: &'static str the name of the memory area
** @retval MemoryArea the memory area created
*/
pub fn new(start_addr: VirtAddr, end_addr: VirtAddr, flags: MemoryAttr, name: &'static str) -> Self {
assert!(start_addr <= end_addr, "invalid memory area");
MemoryArea { start_addr, end_addr, phys_start_addr: None, flags, name }
}
/*
** @brief create a memory area from virtual address which is identically mapped
** @param start_addr: VirtAddr the virtual address of beginning of the area
** @param end_addr: VirtAddr the virtual address of end of the area
** @param flags: MemoryAttr the common memory attribute of the memory area
** @param name: &'static str the name of the memory area
** @retval MemoryArea the memory area created
*/
pub fn new_identity(start_addr: VirtAddr, end_addr: VirtAddr, flags: MemoryAttr, name: &'static str) -> Self {
assert!(start_addr <= end_addr, "invalid memory area");
MemoryArea { start_addr, end_addr, phys_start_addr: Some(start_addr), flags, name }
}
/*
** @brief create a memory area from physics address
** @param start_addr: PhysAddr the physics address of beginning of the area
** @param end_addr: PhysAddr the physics address of end of the area
** @param offset: usiz the offset between physics address and virtual address
** @param flags: MemoryAttr the common memory attribute of the memory area
** @param name: &'static str the name of the memory area
** @retval MemoryArea the memory area created
*/
pub fn new_physical(phys_start_addr: PhysAddr, phys_end_addr: PhysAddr, offset: usize, flags: MemoryAttr, name: &'static str) -> Self {
let start_addr = phys_start_addr + offset;
let end_addr = phys_end_addr + offset;
assert!(start_addr <= end_addr, "invalid memory area");
let phys_start_addr = Some(phys_start_addr);
MemoryArea { start_addr, end_addr, phys_start_addr, flags, name }
}
/*
** @brief get slice of the content in the memory area
** @retval &[u8] the slice of the content in the memory area
*/
pub unsafe fn as_slice(&self) -> &[u8] {
use core::slice;
slice::from_raw_parts(self.start_addr as *const u8, self.end_addr - self.start_addr)
}
/*
** @brief get mutable slice of the content in the memory area
** @retval &mut[u8] the mutable slice of the content in the memory area
*/
pub unsafe fn as_slice_mut(&self) -> &mut [u8] {
use core::slice;
slice::from_raw_parts_mut(self.start_addr as *mut u8, self.end_addr - self.start_addr)
}
/*
** @brief test whether a virtual address is in the memory area
** @param addr: VirtAddr the virtual address to test
** @retval bool whether the virtual address is in the memory area
*/
pub fn contains(&self, addr: VirtAddr) -> bool {
addr >= self.start_addr && addr < self.end_addr
}
/*
** @brief test whether the memory area is overlap with another memory area
** @param other: &MemoryArea another memory area to test
** @retval bool whether the memory area is overlap with another memory area
*/
fn is_overlap_with(&self, other: &MemoryArea) -> bool {
let p0 = Page::of_addr(self.start_addr);
let p1 = Page::of_addr(self.end_addr - 1) + 1;
let p2 = Page::of_addr(other.start_addr);
let p3 = Page::of_addr(other.end_addr - 1) + 1;
!(p1 <= p2 || p0 >= p3)
}
/*
** @brief map the memory area to the physice address in a page table
** @param pt: &mut T::Active the page table to use
** @retval none
*/
fn map<T: InactivePageTable>(&self, pt: &mut T::Active) {
match self.phys_start_addr {
Some(phys_start) => {
for page in Page::range_of(self.start_addr, self.end_addr) {
let addr = page.start_address();
let target = page.start_address() - self.start_addr + phys_start;
self.flags.apply(pt.map(addr, target));
}
}
None => {
for page in Page::range_of(self.start_addr, self.end_addr) {
let addr = page.start_address();
let target = T::alloc_frame().expect("failed to allocate frame");
self.flags.apply(pt.map(addr, target));
// for frame delayed allocation
// let entry = pt.map(addr,0);
// self.flags.apply(entry);
// let entry = pt.get_entry(addr).expect("fail to get entry");
// entry.set_present(false);
// entry.update();
}
}
};
}
/*
** @brief unmap the memory area from the physice address in a page table
** @param pt: &mut T::Active the page table to use
** @retval none
*/
fn unmap<T: InactivePageTable>(&self, pt: &mut T::Active) {
for page in Page::range_of(self.start_addr, self.end_addr) {
let addr = page.start_address();
if self.phys_start_addr.is_none() {
if pt.get_entry(addr).expect("fail to get entry").present(){
let target = pt.get_entry(addr).expect("fail to get entry").target();
T::dealloc_frame(target);
}
else{
// set valid for pt.unmap function
pt.get_entry(addr).expect("fail to get entry").set_present(true);
}
}
pt.unmap(addr);
}
}
pub fn get_start_addr(&self) -> VirtAddr {
self.start_addr
}
pub fn get_end_addr(&self) -> VirtAddr{
self.end_addr
}
pub fn get_flags(&self) -> &MemoryAttr{
&self.flags
}
}
/// The attributes of the memory
#[derive(Debug, Copy, Clone, Eq, PartialEq, Default)]
pub struct MemoryAttr {
user: bool,
readonly: bool,
execute: bool,
hide: bool,
mmio: u8,
}
impl MemoryAttr {
/*
** @brief set the memory attribute's user bit
** @retval MemoryAttr the memory attribute itself
*/
pub fn user(mut self) -> Self {
self.user = true;
self
}
/*
** @brief set the memory attribute's readonly bit
** @retval MemoryAttr the memory attribute itself
*/
pub fn readonly(mut self) -> Self {
self.readonly = true;
self
}
/*
** @brief set the memory attribute's execute bit
** @retval MemoryAttr the memory attribute itself
*/
pub fn execute(mut self) -> Self {
self.execute = true;
self
}
/*
** @brief set the MMIO type
** @retval MemoryAttr the memory attribute itself
*/
pub fn mmio(mut self, value: u8) -> Self {
self.mmio = value;
self
}
/*
** @brief set the memory attribute's hide bit
** @retval MemoryAttr the memory attribute itself
*/
pub fn hide(mut self) -> Self {
self.hide = true;
self
}
/*
** @brief apply the memory attribute to a page table entry
** @param entry: &mut impl Entry
** the page table entry to apply the attribute
** @retval none
*/
fn apply(&self, entry: &mut impl Entry) {
if self.user { entry.set_user(true); }
if self.readonly { entry.set_writable(false); }
if self.execute { entry.set_execute(true); }
if self.mmio != 0 { entry.set_mmio(self.mmio); }
if self.hide { entry.set_present(false); }
if self.user || self.readonly || self.execute || self.mmio != 0 || self.hide { entry.update(); }
}
}
/// set of memory space with multiple memory area with associated page table and stack space
/// like `mm_struct` in ucore
pub struct MemorySet<T: InactivePageTable> {
areas: Vec<MemoryArea>,
page_table: T,
}
impl<T: InactivePageTable> MemorySet<T> {
/*
** @brief create a memory set
** @retval MemorySet<T> the memory set created
*/
pub fn new() -> Self {
MemorySet {
areas: Vec::<MemoryArea>::new(),
page_table: T::new(),
}
}
pub fn new_bare() -> Self {
MemorySet {
areas: Vec::<MemoryArea>::new(),
page_table: T::new_bare(),
}
}
/*
** @brief find the memory area from virtual address
** @param addr: VirtAddr the virtual address to find
** @retval Option<&MemoryArea> the memory area with the virtual address, if presented
*/
pub fn find_area(&self, addr: VirtAddr) -> Option<&MemoryArea> {
self.areas.iter().find(|area| area.contains(addr))
}
/*
** @brief add the memory area to the memory set
** @param area: MemoryArea the memory area to add
** @retval none
*/
pub fn push(&mut self, area: MemoryArea) {
assert!(self.areas.iter()
.find(|other| area.is_overlap_with(other))
.is_none(), "memory area overlap");
self.page_table.edit(|pt| area.map::<T>(pt));
self.areas.push(area);
}
/*
** @brief get iterator of the memory area
** @retval impl Iterator<Item=&MemoryArea>
** the memory area iterator
*/
pub fn iter(&self) -> impl Iterator<Item=&MemoryArea> {
self.areas.iter()
}
pub fn edit(&mut self, f: impl FnOnce(&mut T::Active)) {
self.page_table.edit(f);
}
/*
** @brief execute function with the associated page table
** @param f: impl FnOnce() the function to be executed
** @retval none
*/
pub unsafe fn with(&self, f: impl FnOnce()) {
self.page_table.with(f);
}
/*
** @brief activate the associated page table
** @retval none
*/
pub unsafe fn activate(&self) {
self.page_table.activate();
}
/*
** @brief get the token of the associated page table
** @retval usize the token of the inactive page table
*/
pub fn token(&self) -> usize {
self.page_table.token()
}
/*
** @brief clear the memory set
** @retval none
*/
pub fn clear(&mut self) {
let Self { ref mut page_table, ref mut areas, .. } = self;
page_table.edit(|pt| {
for area in areas.iter() {
area.unmap::<T>(pt);
}
});
areas.clear();
}
/*
** @brief get the mutable reference for the inactive page table
** @retval: &mut T the mutable reference of the inactive page table
*/
pub fn get_page_table_mut(&mut self) -> &mut T{
&mut self.page_table
}
}
impl<T: InactivePageTable> Clone for MemorySet<T> {
fn clone(&self) -> Self {
let mut page_table = T::new();
page_table.edit(|pt| {
for area in self.areas.iter() {
area.map::<T>(pt);
}
});
info!("finish map in clone!");
MemorySet {
areas: self.areas.clone(),
page_table,
}
}
}
impl<T: InactivePageTable> Drop for MemorySet<T> {
fn drop(&mut self) {
info!("come into drop func for memoryset");
self.clear();
}
}
impl<T: InactivePageTable> Debug for MemorySet<T> {
fn fmt(&self, f: &mut Formatter) -> Result<(), Error> {
f.debug_list()
.entries(self.areas.iter())
.finish()
}
}

@ -0,0 +1,34 @@
use super::*;
#[derive(Debug, Clone)]
pub struct ByFrame<T: FrameAllocator> {
flags: MemoryAttr,
allocator: T,
}
impl<T: FrameAllocator> MemoryHandler for ByFrame<T> {
fn box_clone(&self) -> Box<MemoryHandler> {
Box::new(self.clone())
}
fn map(&self, pt: &mut PageTable, addr: VirtAddr) {
let target = self.allocator.alloc().expect("failed to allocate frame");
self.flags.apply(pt.map(addr, target));
}
fn unmap(&self, pt: &mut PageTable, addr: VirtAddr) {
let target = pt.get_entry(addr).expect("fail to get entry").target();
self.allocator.dealloc(target);
pt.unmap(addr);
}
fn page_fault_handler(&self, _pt: &mut PageTable, _addr: VirtAddr) -> bool {
false
}
}
impl<T: FrameAllocator> ByFrame<T> {
pub fn new(flags: MemoryAttr, allocator: T) -> Self {
ByFrame { flags, allocator }
}
}

@ -0,0 +1,45 @@
use super::*;
#[derive(Debug, Clone)]
pub struct Delay<T: FrameAllocator> {
flags: MemoryAttr,
allocator: T,
}
impl<T: FrameAllocator> MemoryHandler for Delay<T> {
fn box_clone(&self) -> Box<MemoryHandler> {
Box::new(self.clone())
}
fn map(&self, pt: &mut PageTable, addr: VirtAddr) {
let entry = pt.map(addr, 0);
entry.set_present(false);
entry.update();
}
fn unmap(&self, pt: &mut PageTable, addr: VirtAddr) {
let entry = pt.get_entry(addr).expect("failed to get entry");
if entry.present() {
self.allocator.dealloc(entry.target());
pt.unmap(addr);
}
}
fn page_fault_handler(&self, pt: &mut PageTable, addr: VirtAddr) -> bool {
let entry = pt.get_entry(addr).expect("failed to get entry");
if entry.present() {
// not a delay case
return false;
}
let frame = self.allocator.alloc().expect("failed to alloc frame");
entry.set_target(frame);
self.flags.apply(entry);
true
}
}
impl<T: FrameAllocator> Delay<T> {
pub fn new(flags: MemoryAttr, allocator: T) -> Self {
Delay { flags, allocator }
}
}

@ -0,0 +1,32 @@
use super::*;
#[derive(Debug, Eq, PartialEq, Clone)]
pub struct Linear {
offset: isize,
flags: MemoryAttr,
}
impl MemoryHandler for Linear {
fn box_clone(&self) -> Box<MemoryHandler> {
Box::new(self.clone())
}
fn map(&self, pt: &mut PageTable, addr: VirtAddr) {
let target = (addr as isize + self.offset) as PhysAddr;
self.flags.apply(pt.map(addr, target));
}
fn unmap(&self, pt: &mut PageTable, addr: VirtAddr) {
pt.unmap(addr);
}
fn page_fault_handler(&self, _pt: &mut PageTable, _addr: VirtAddr) -> bool {
false
}
}
impl Linear {
pub fn new(offset: isize, flags: MemoryAttr) -> Self {
Linear { offset, flags }
}
}

@ -0,0 +1,29 @@
use super::*;
// here may be a interesting part for lab
pub trait MemoryHandler: Debug + 'static {
fn box_clone(&self) -> Box<MemoryHandler>;
fn map(&self, pt: &mut PageTable, addr: VirtAddr);
fn unmap(&self, pt: &mut PageTable, addr: VirtAddr);
fn page_fault_handler(&self, pt: &mut PageTable, addr: VirtAddr) -> bool;
}
impl Clone for Box<MemoryHandler> {
fn clone(&self) -> Box<MemoryHandler> {
self.box_clone()
}
}
pub trait FrameAllocator: Debug + Clone + 'static {
fn alloc(&self) -> Option<PhysAddr>;
fn dealloc(&self, target: PhysAddr);
}
mod linear;
mod byframe;
mod delay;
//mod swap;
pub use self::linear::Linear;
pub use self::byframe::ByFrame;
pub use self::delay::Delay;

@ -0,0 +1,274 @@
//! memory set, area
//! and the inactive page table
use alloc::{vec::Vec, boxed::Box};
use core::fmt::{Debug, Error, Formatter};
use super::*;
use crate::paging::*;
use self::handler::MemoryHandler;
pub mod handler;
/// a continuous memory space when the same attribute
/// like `vma_struct` in ucore
#[derive(Debug, Clone)]
pub struct MemoryArea {
start_addr: VirtAddr,
end_addr: VirtAddr,
handler: Box<MemoryHandler>,
name: &'static str,
}
impl MemoryArea {
/*
** @brief get slice of the content in the memory area
** @retval &[u8] the slice of the content in the memory area
*/
pub unsafe fn as_slice(&self) -> &[u8] {
::core::slice::from_raw_parts(self.start_addr as *const u8, self.end_addr - self.start_addr)
}
/*
** @brief get mutable slice of the content in the memory area
** @retval &mut[u8] the mutable slice of the content in the memory area
*/
pub unsafe fn as_slice_mut(&self) -> &mut [u8] {
::core::slice::from_raw_parts_mut(self.start_addr as *mut u8, self.end_addr - self.start_addr)
}
/*
** @brief test whether a virtual address is in the memory area
** @param addr: VirtAddr the virtual address to test
** @retval bool whether the virtual address is in the memory area
*/
pub fn contains(&self, addr: VirtAddr) -> bool {
addr >= self.start_addr && addr < self.end_addr
}
/*
** @brief test whether the memory area is overlap with another memory area
** @param other: &MemoryArea another memory area to test
** @retval bool whether the memory area is overlap with another memory area
*/
fn is_overlap_with(&self, other: &MemoryArea) -> bool {
let p0 = Page::of_addr(self.start_addr);
let p1 = Page::of_addr(self.end_addr - 1) + 1;
let p2 = Page::of_addr(other.start_addr);
let p3 = Page::of_addr(other.end_addr - 1) + 1;
!(p1 <= p2 || p0 >= p3)
}
/*
** @brief map the memory area to the physice address in a page table
** @param pt: &mut T::Active the page table to use
** @retval none
*/
fn map(&self, pt: &mut PageTable) {
for page in Page::range_of(self.start_addr, self.end_addr) {
self.handler.map(pt, page.start_address());
}
}
/*
** @brief unmap the memory area from the physice address in a page table
** @param pt: &mut T::Active the page table to use
** @retval none
*/
fn unmap(&self, pt: &mut PageTable) {
for page in Page::range_of(self.start_addr, self.end_addr) {
self.handler.unmap(pt, page.start_address());
}
}
}
/// The attributes of the memory
#[derive(Debug, Copy, Clone, Eq, PartialEq, Default)]
pub struct MemoryAttr {
user: bool,
readonly: bool,
execute: bool,
mmio: u8,
}
impl MemoryAttr {
/*
** @brief set the memory attribute's user bit
** @retval MemoryAttr the memory attribute itself
*/
pub fn user(mut self) -> Self {
self.user = true;
self
}
/*
** @brief set the memory attribute's readonly bit
** @retval MemoryAttr the memory attribute itself
*/
pub fn readonly(mut self) -> Self {
self.readonly = true;
self
}
/*
** @brief set the memory attribute's execute bit
** @retval MemoryAttr the memory attribute itself
*/
pub fn execute(mut self) -> Self {
self.execute = true;
self
}
/*
** @brief set the MMIO type
** @retval MemoryAttr the memory attribute itself
*/
pub fn mmio(mut self, value: u8) -> Self {
self.mmio = value;
self
}
/*
** @brief apply the memory attribute to a page table entry
** @param entry: &mut impl Entry
** the page table entry to apply the attribute
** @retval none
*/
pub fn apply(&self, entry: &mut Entry) {
entry.set_present(true);
entry.set_user(self.user);
entry.set_writable(!self.readonly);
entry.set_execute(self.execute);
entry.set_mmio(self.mmio);
entry.update();
}
}
/// set of memory space with multiple memory area with associated page table and stack space
/// like `mm_struct` in ucore
pub struct MemorySet<T: InactivePageTable> {
areas: Vec<MemoryArea>,
page_table: T,
}
impl<T: InactivePageTable> MemorySet<T> {
/*
** @brief create a memory set
** @retval MemorySet<T> the memory set created
*/
pub fn new() -> Self {
MemorySet {
areas: Vec::new(),
page_table: T::new(),
}
}
pub fn new_bare() -> Self {
MemorySet {
areas: Vec::new(),
page_table: T::new_bare(),
}
}
/*
** @brief find the memory area from virtual address
** @param addr: VirtAddr the virtual address to find
** @retval Option<&MemoryArea> the memory area with the virtual address, if presented
*/
pub fn find_area(&self, addr: VirtAddr) -> Option<&MemoryArea> {
self.areas.iter().find(|area| area.contains(addr))
}
/*
** @brief add the memory area to the memory set
** @param area: MemoryArea the memory area to add
** @retval none
*/
pub fn push(&mut self, start_addr: VirtAddr, end_addr: VirtAddr, handler: impl MemoryHandler, name: &'static str) {
assert!(start_addr <= end_addr, "invalid memory area");
let area = MemoryArea { start_addr, end_addr, handler: Box::new(handler), name };
assert!(self.areas.iter()
.find(|other| area.is_overlap_with(other))
.is_none(), "memory area overlap");
self.page_table.edit(|pt| area.map(pt));
self.areas.push(area);
}
/*
** @brief get iterator of the memory area
** @retval impl Iterator<Item=&MemoryArea>
** the memory area iterator
*/
pub fn iter(&self) -> impl Iterator<Item=&MemoryArea> {
self.areas.iter()
}
pub fn edit(&mut self, f: impl FnOnce(&mut T::Active)) {
self.page_table.edit(f);
}
/*
** @brief execute function with the associated page table
** @param f: impl FnOnce() the function to be executed
** @retval none
*/
pub unsafe fn with(&self, f: impl FnOnce()) {
self.page_table.with(f);
}
/*
** @brief activate the associated page table
** @retval none
*/
pub unsafe fn activate(&self) {
self.page_table.activate();
}
/*
** @brief get the token of the associated page table
** @retval usize the token of the inactive page table
*/
pub fn token(&self) -> usize {
self.page_table.token()
}
/*
** @brief clear the memory set
** @retval none
*/
pub fn clear(&mut self) {
let Self { ref mut page_table, ref mut areas, .. } = self;
page_table.edit(|pt| {
for area in areas.iter() {
area.unmap(pt);
}
});
areas.clear();
}
/*
** @brief get the mutable reference for the inactive page table
** @retval: &mut T the mutable reference of the inactive page table
*/
pub fn get_page_table_mut(&mut self) -> &mut T{
&mut self.page_table
}
pub fn page_fault_handler(&mut self, addr: VirtAddr) -> bool {
let area = self.areas.iter().find(|area| area.contains(addr));
match area {
Some(area) => self.page_table.edit(|pt| area.handler.page_fault_handler(pt, addr)),
None => false,
}
}
}
impl<T: InactivePageTable> Clone for MemorySet<T> {
fn clone(&self) -> Self {
let mut page_table = T::new();
page_table.edit(|pt| {
for area in self.areas.iter() {
area.map(pt);
}
});
MemorySet {
areas: self.areas.clone(),
page_table,
}
}
}
impl<T: InactivePageTable> Drop for MemorySet<T> {
fn drop(&mut self) {
self.clear();
}
}
impl<T: InactivePageTable> Debug for MemorySet<T> {
fn fmt(&self, f: &mut Formatter) -> Result<(), Error> {
f.debug_list()
.entries(self.areas.iter())
.finish()
}
}

@ -0,0 +1,15 @@
//! Helper functions
use super::*;
pub trait PageTableExt: PageTable {
const TEMP_PAGE_ADDR: VirtAddr = 0xcafeb000;
fn with_temporary_map<T, D>(&mut self, target: PhysAddr, f: impl FnOnce(&mut Self, &mut D) -> T) -> T {
self.map(Self::TEMP_PAGE_ADDR, target);
let data = unsafe { &mut *(self.get_page_slice_mut(Self::TEMP_PAGE_ADDR).as_ptr() as *mut D) };
let ret = f(self, data);
self.unmap(Self::TEMP_PAGE_ADDR);
ret
}
}

@ -57,14 +57,16 @@ impl Entry for MockEntry {
fn set_user(&mut self, value: bool) { unimplemented!() }
fn execute(&self) -> bool { unimplemented!() }
fn set_execute(&mut self, value: bool) { unimplemented!() }
fn mmio(&self) -> bool { unimplemented!() }
fn set_mmio(&mut self, value: bool) { unimplemented!() }
}
type PageFaultHandler = Box<FnMut(&mut MockPageTable, VirtAddr)>;
impl PageTable for MockPageTable {
type Entry = MockEntry;
// type Entry = MockEntry;
fn map(&mut self, addr: VirtAddr, target: PhysAddr) -> &mut Self::Entry {
fn map(&mut self, addr: VirtAddr, target: PhysAddr) -> &mut Entry {
let entry = &mut self.entries[addr / PAGE_SIZE];
assert!(!entry.present);
entry.present = true;
@ -77,7 +79,7 @@ impl PageTable for MockPageTable {
assert!(entry.present);
entry.present = false;
}
fn get_entry(&mut self, addr: VirtAddr) -> Option<&mut Self::Entry> {
fn get_entry(&mut self, addr: VirtAddr) -> Option<&mut Entry> {
Some(&mut self.entries[addr / PAGE_SIZE])
}
fn get_page_slice_mut<'a,'b>(&'a mut self, addr: VirtAddr) -> &'b mut [u8] {

@ -3,208 +3,149 @@
//! Implemented for every architecture, used by OS.
use super::*;
use super::memory_set::InactivePageTable;
use log::*;
#[cfg(test)]
pub use self::mock_page_table::MockPageTable;
pub use self::ext::*;
#[cfg(test)]
mod mock_page_table;
mod ext;
// trait for PageTable
pub trait PageTable {
type Entry: Entry;
/*
** @brief map a virual address to the target physics address
** @param addr: VirtAddr the virual address to map
** @param target: VirtAddr the target physics address
** @retval Entry the page table entry of the mapped virual address
*/
fn map(&mut self, addr: VirtAddr, target: PhysAddr) -> &mut Self::Entry;
/*
** @brief unmap a virual address from physics address
** @param addr: VirtAddr the virual address to unmap
** @retval none
*/
// type Entry: Entry;
/// Map a page of virual address `addr` to the frame of physics address `target`
/// Return the page table entry of the mapped virual address
fn map(&mut self, addr: VirtAddr, target: PhysAddr) -> &mut Entry;
/// Unmap a page of virual address `addr`
fn unmap(&mut self, addr: VirtAddr);
/*
** @brief get the page table entry of a virual address
** @param addr: VirtAddr the virual address
** @retval Entry the page table entry of the virual address
*/
fn get_entry(&mut self, addr: VirtAddr) -> Option<&mut Self::Entry>;
// For testing with mock
/*
** @brief used for testing with mock
** get a mutable reference of the content of a page from a virtual address
** @param addr: VirtAddr the virual address of the page
** @retval &'b mut [u8] mutable reference of the content of a page as array of bytes
*/
fn get_page_slice_mut<'a,'b>(&'a mut self, addr: VirtAddr) -> &'b mut [u8];
/*
** @brief used for testing with mock
** read data from a virtual address
** @param addr: VirtAddr the virual address of data to read
** @retval u8 the data read
*/
fn read(&mut self, addr: VirtAddr) -> u8;
/*
** @brief used for testing with mock
** write data to a virtual address
** @param addr: VirtAddr the virual address of data to write
** @param data: u8 the data to write
** @retval none
*/
fn write(&mut self, addr: VirtAddr, data: u8);
}
/// Get the page table entry of a page of virual address `addr`
/// If its page do not exist, return `None`
fn get_entry(&mut self, addr: VirtAddr) -> Option<&mut Entry>;
/// Get a mutable reference of the content of a page of virtual address `addr`
/// Used for testing with mock
fn get_page_slice_mut<'a>(&mut self, addr: VirtAddr) -> &'a mut [u8] {
unsafe { core::slice::from_raw_parts_mut((addr & !(PAGE_SIZE - 1)) as *mut u8, PAGE_SIZE) }
}
/// Read data from virtual address `addr`
/// Used for testing with mock
fn read(&mut self, addr: VirtAddr) -> u8 {
unsafe { (addr as *const u8).read() }
}
// trait for Entry in PageTable
/// Write data to virtual address `addr`
/// Used for testing with mock
fn write(&mut self, addr: VirtAddr, data: u8) {
unsafe { (addr as *mut u8).write(data) }
}
}
/// Page Table Entry
pub trait Entry {
/*
** @brief force update this page table entry
** IMPORTANT!
** This must be called after any change to ensure it become effective.
** Usually this will make a flush to TLB/MMU.
** @retval none
*/
/// Make all changes take effect.
///
/// IMPORTANT!
/// This must be called after any change to ensure it become effective.
/// Usually it will cause a TLB/MMU flush.
fn update(&mut self);
/*
** @brief get the accessed bit of the entry
** Will be set when accessed
** @retval bool the accessed bit
*/
/// A bit set by hardware when the page is accessed
fn accessed(&self) -> bool;
/*
** @brief get the dirty bit of the entry
** Will be set when written
** @retval bool the dirty bit
*/
/// A bit set by hardware when the page is written
fn dirty(&self) -> bool;
/*
** @brief get the writable bit of the entry
** Will PageFault when try to write page where writable=0
** @retval bool the writable bit
*/
/// Will PageFault when try to write page where writable=0
fn writable(&self) -> bool;
/*
** @brief get the present bit of the entry
** Will PageFault when try to access page where present=0
** @retval bool the present bit
*/
/// Will PageFault when try to access page where present=0
fn present(&self) -> bool;
/*
** @brief clear the accessed bit
** @retval none
*/
fn clear_accessed(&mut self);
/*
** @brief clear the dirty bit
** @retval none
*/
fn clear_dirty(&mut self);
/*
** @brief set value of writable bit
** @param value: bool the writable bit value
** @retval none
*/
fn set_writable(&mut self, value: bool);
/*
** @brief set value of present bit
** @param value: bool the present bit value
** @retval none
*/
fn set_present(&mut self, value: bool);
/*
** @brief get the target physics address in the entry
** can be used for other purpose if present=0
** @retval target: PhysAddr the target physics address
*/
/// The target physics address in the entry
/// Can be used for other purpose if present=0
fn target(&self) -> PhysAddr;
/*
** @brief set the target physics address in the entry
** @param target: PhysAddr the target physics address
** @retval none
*/
fn set_target(&mut self, target: PhysAddr);
// For Copy-on-write extension
/*
** @brief used for Copy-on-write extension
** get the writable and shared bit
** @retval value: bool the writable and shared bit
*/
// For Copy-on-write
fn writable_shared(&self) -> bool;
/*
** @brief used for Copy-on-write extension
** get the readonly and shared bit
** @retval value: bool the readonly and shared bit
*/
fn readonly_shared(&self) -> bool;
/*
** @brief used for Copy-on-write extension
** mark the page as (writable or readonly) shared
** @param writable: bool if it is true, set the page as writable and shared
** else set the page as readonly and shared
** @retval value: none
*/
fn set_shared(&mut self, writable: bool);
/*
** @brief used for Copy-on-write extension
** mark the page as not shared
** @retval value: none
*/
fn clear_shared(&mut self);
// For Swap extension
/*
** @brief used for Swap extension
** get the swapped bit
** @retval value: bool the swapped bit
*/
// For Swap
fn swapped(&self) -> bool;
/*
** @brief used for Swap extension
** set the swapped bit
** @param value: bool the swapped bit value
** @retval none
*/
fn set_swapped(&mut self, value: bool);
/*
** @brief get the user bit of the entry
** @retval bool the user bit
*/
fn user(&self) -> bool;
/*
** @brief set value of user bit
** @param value: bool the user bit value
** @retval none
*/
fn set_user(&mut self, value: bool);
/*
** @brief get the execute bit of the entry
** @retval bool the execute bit
*/
fn execute(&self) -> bool;
/*
** @brief set value of user bit
** @param value: bool the execute bit value
** @retval none
*/
fn set_execute(&mut self, value: bool);
/*
** @brief get MMIO type
** (e.g. aarch64 can have normal/device/normal_non_cacheable memory)
** @retval u8 the MMIO type
*/
fn mmio(&self) -> u8;
/*
** @brief set MMIO type
** @param value: u8 the MMIO type
** @retval none
*/
fn set_mmio(&mut self, value: u8);
}
/// An inactive page table
/// Note: InactivePageTable is not a PageTable
/// but it can be activated and "become" a PageTable
pub trait InactivePageTable: Sized {
/// the active version of page table
type Active: PageTable;
/// Create a new page table with kernel memory mapped
fn new() -> Self {
let mut pt = Self::new_bare();
pt.map_kernel();
pt
}
/// Create a new page table without kernel memory mapped
fn new_bare() -> Self;
/// Map kernel segments
fn map_kernel(&mut self);
/// CR3 on x86, SATP on RISCV, TTBR on AArch64
fn token(&self) -> usize;
unsafe fn set_token(token: usize);
fn active_token() -> usize;
fn flush_tlb();
/// Make this page table editable
/// Set the recursive entry of current active page table to this
fn edit<T>(&mut self, f: impl FnOnce(&mut Self::Active) -> T) -> T;
/// Activate this page table
unsafe fn activate(&self) {
let old_token = Self::active_token();
let new_token = self.token();
debug!("switch table {:x?} -> {:x?}", old_token, new_token);
if old_token != new_token {
Self::set_token(new_token);
Self::flush_tlb();
}
}
/// Execute function `f` with this page table activated
unsafe fn with<T>(&self, f: impl FnOnce() -> T) -> T {
let old_token = Self::active_token();
let new_token = self.token();
debug!("switch table {:x?} -> {:x?}", old_token, new_token);
if old_token != new_token {
Self::set_token(new_token);
Self::flush_tlb();
}
let ret = f();
debug!("switch table {:x?} -> {:x?}", new_token, old_token);
if old_token != new_token {
Self::set_token(old_token);
Self::flush_tlb();
}
ret
}
}

@ -8,7 +8,6 @@
use super::*;
use super::paging::*;
use super::memory_set::InactivePageTable;
use super::addr::Frame;
use core::ops::{Deref, DerefMut};

Binary file not shown.

After

Width:  |  Height:  |  Size: 316 KiB

@ -0,0 +1,616 @@
<!-- page_number: true -->
<!-- $width:12in -->
<!-- $height: 6.75in -->
# Rust版 uCore OS 的设计与实现
## Design and implementation of uCore OS in Rust
王润基
清华大学计算机系
2018.12.16 @ OS2ATC
---
# 提纲
## 简介Rust uCore OS是什么
## 动机为什么要用Rust写OS
## 体会用Rust写OS有何好处
## 未来:接下来会做什么?
---
# 简介Rust uCore OS是什么
---
# uCore OS
清华大学教学操作系统
参考 xv6 & jos in MIT, OS161 in Harvard, Linux
用C语言编写的宏内核OS
* [ucore_os_lab](https://github.com/chyyuu/ucore_os_lab):操作系统课实验
* [ucore_os_plus](https://github.com/chyyuu/ucore_os_plus):教学科研系统
---
# uCore OS in Rust -- RustOS
2018年操作系统课大实验项目
“用Rust语言重新实现uCore”
#
之后在OS专题训练课上推广目前
支持三大平台x86_64, RISCV32, AArch64
支持硬件计算所Labeled-RISCV树莓派3B
支持多核CPU
---
## 大实验选题列举
OS
* RustOS for x86_64 SMP
* Rustable - ucore 在 arm 平台的 rust 移植
* Rucore with LKM Drivers
OS专题训练
* RustOS 上树莓派 USB 与 VideoCore IV 显卡驱动的移植
* RustOS 多核移植与基于PARD框架的线程级Label管理
* RustOS wiki完善与教学lab实验的制作
* RustOS 参考sv6的多核实现和优化
* RustOS 移植到 rv64 及llvm编译器支持
---
# 动机为什么要用Rust写OS
* C语言有何不足
* Rust解决了哪些痛点
* 条件是否成熟?
---
# C语言有何不足
---
## 简单?简陋?
C语言简单、直接为OS而生。
但从现代编程语言的角度看C语言有些简陋难以表达复杂逻辑和抽象。
![90%](./C.jpg)
*上图出自[一篇知乎回答](https://www.zhihu.com/question/25038841/answer/44396770)*
---
### 缺乏对OOP和接口的语言级支持
C中常用函数指针实现接口
```c
struct device {
size_t d_blocks;
size_t d_blocksize;
int (*d_open)(struct device *dev, uint32_t open_flags);
int (*d_close)(struct device *dev);
int (*d_io)(struct device *dev, struct iobuf *iob, bool write);
int (*d_ioctl)(struct device *dev, int op, void *data);
};
```
---
### 缺乏基础数据结构支持
OS中常用的侵入式链表摘自ucore_os_lab
```c
// 链表节点
struct list_entry {
struct list_entry *prev, *next;
};
// 在宿主类型中嵌入链表节点
struct Page {
list_entry_t page_link;
...
};
// 从list类型转化回宿主类型
#_define le2page(le, member) \
to_struct((le), struct Page, member)
#_define offsetof(type, member) \
((size_t)(&((type *)0)->member))
#_define to_struct(ptr, type, member) \
((type *)((char *)(ptr) - offsetof(type, member)))
```
---
### 缺乏工程模块系统
* 编译配置复杂
* 难以复用代码
---
## SegmentFault
悬空指针,重复释放,数据竞争……
#
![60%](./pointer.png)
---
# Rust解决了哪些痛点
* 强类型内存安全线程安全——减少Bug
* 现代语言特性——提升开发体验
* 完善的模块系统——方便代码复用
* 零开销抽象——能写OS的根本保障
---
# 是时候尝试Rust了
### 社区:[Redox](https://www.redox-os.org)
全功能Rust OS微内核架构支持GUI
### 教学:[CS140e](https://web.stanford.edu/class/cs140e/)
斯坦福大学实验性OS课程2018年新开设
Rust编写OS面向ARM在树莓派3上运行
### 兴趣:[Writing an OS in Rust](https://os.phil-opp.com)
手把手带你用Rust编写OS的系列博客
面向x86_64教程极为详尽
作者为Rust编写OS提供了大量开源工具
---
## 万事俱备,只是……
# 还不会Rust怎么办
编写OS是学习Rust的高效途径
---
# 体会用Rust写OS有何好处
* 内存与线程安全减少Bug
* 包管理系统:复用已有代码
* 接口与泛型:内核模块化
* 所有权和RAII机制简化资源管理
---
# 安全!
类型系统 + 所有权机制 + 生命周期机制
=> 内存安全 + 线程安全
---
## Rust如何减少Bug
### 消除了越界访问
=> panic
### 消除了因生命周期导致的非法访存
=> 编译错误
### 消除了数据竞争
=> 死锁
### 缩小了Bug的查找范围
=> unsafe块
---
## Rust如何减少Bug
* 大部分低级错误在编译期避免
* 少数逻辑错误在运行时暴露
* 难以发现的错误被限制了范围
---
## 充分利用现成轮子
引用的外部库crate
* `alloc`: 容器
* `log`: 日志
* `spin`: 自旋锁
* `xmas-elf`: 解析ELF文件
* `linked_list_allocator`: 堆分配算法
* `uart_116500`: 串口驱动
* `x86_64`: 包装汇编指令,封装页表等数据结构
更好地专注于OS核心逻辑
---
## 制造我们自己的轮子
仿照`x86_64`库,并基于社区现有成果,
我们分别实现了`riscv`和`aarch64`库。
---
# 内核模块化
ucore_os_lab = 内存管理 + 进程管理 + 文件系统
lab1-lab8层层依赖高度耦合。
然而它们在逻辑上互不相关,理应分开。
---
# 内核模块化
每个部分作为独立的crate存在互不依赖。
内核通过实现它们的接口,把它们粘合在一起。
可以分别内部单元测试,然后放在一起集成测试。
配合泛型,可做到零开销。
---
### 内存模块
接口:页表,页表项,缺页处理函数
功能:面向进程的虚存管理(`mm_struct`
-------支持内存置换、写时复制、延迟分配等机制
### 线程模块
接口:上下文切换,新线程的构造
功能:线程调度和管理
### 文件系统
接口块设备VFS虚拟文件系统
功能:文件操作和管理
---
### 内存模块——接口
```rust
pub trait PageTable {
fn map(&mut self, addr: VirtAddr, target: PhysAddr)
-> &mut Entry;
fn unmap(&mut self, addr: VirtAddr);
fn get_entry(&mut self, addr: VirtAddr)
-> Option<&mut Entry>;
}
pub trait Entry {
fn update(&mut self); // flush TLB
fn present(&self) -> bool;
fn target(&self) -> PhysAddr;
fn set_present(&mut self, value: bool);
fn set_target(&mut self, target: PhysAddr);
...
}
```
---
### 内存模块——面向接口的上层实现
```rust
pub struct MemoryArea {
start_addr: VirtAddr,
end_addr: VirtAddr,
...
}
impl MemoryArea {
fn map(&self, pt: &mut PageTable) {
for page in Page::range_of(self.start_addr, self.end_addr) {
let target = alloc_frame();
pt.map(addr, target);
}
}
}
```
为一段连续的虚拟地址空间 映射页表项。
---
### 内存模块——接口的Mock实现
```rust
pub struct MockPageTable {
entries: [MockEntry; PAGE_COUNT],
data: [u8; PAGE_SIZE * PAGE_COUNT],
page_fault_handler: Option<PageFaultHandler>,
}
impl PageTable for MockPageTable {
fn map(...) {...}
fn unmap(...) {...}
fn get_entry(...) {...}
}
impl MockPageTable {
fn read(&mut self, addr: VirtAddr) -> u8 {...}
fn write(&mut self, addr: VirtAddr, data: u8) {...}
}
```
实现一个仿真页表,模拟地址转换的过程,从数组中存取数据。
---
### 内存模块——基于Mock的单元测试
```rust
#[test]
fn memory_area_map() {
let mut pt = MockPageTable {...};
let area = MemoryArea {...};
area.map(&mut pt);
pt.write(0x1000, 1);
assert_eq!(pt.read(0x1000), 1);
}
```
可用`cargo test`在任意环境下运行单元测试不依赖QEMU。
---
### 线程模块——接口与实现
```rust
pub trait Context {
unsafe extern "C"
fn switch_to(&mut self, target: &mut Context);
}
pub struct X86Context {
rip: usize,
... // callee-saved registers
}
impl Context for X86Context {
unsafe extern "C" // Store caller-saved registers
fn switch_to(&mut self, target: &mut Context) {
// Store callee-saved registers
// Restore callee-saved registers
} // Restore caller-saved registers
}
```
上下文切换:保存和恢复寄存器
---
### 线程模块——面向接口的上层实现
```rust
/// 管理所有线程的状态及调度,全局唯一
pub struct ProcessManager {...}
/// 线程执行者每个CPU核对应一个
pub struct Processor {
manager: Arc<ProcessManager>,
context: Box<Context>,
...
}
impl Processor {
/// 调度线程,无限循环
fn run(&mut self) -> ! { loop {
let mut process = self.manager.pop();
unsafe { self.context.switch_to(&mut process); }
self.manager.push(process);
}}
}
```
每个CPU核不断地从运行队列中取出线程-运行-放回
---
### 线程模块——兼容标准库的高层封装
```rust
// thread.rs
pub fn current() -> Thread {...}
pub fn sleep(dur: Duration) {...}
pub fn spawn<F, T>(f: F) -> JoinHandle<T> {...}
pub fn yield_now() {...}
pub fn park() {...}
```
在上页基础上进一步封装。
提供和标准库`std::thread`完全一致的上层接口。
使得依赖std的多线程代码可以方便地迁移到内核中。
---
# 所有权和RAII机制简化资源管理
OS需要管理复杂的资源
资源之间有复杂的共享和依赖关系
---
![80%](./resources.png)
---
进程的对象结构:
```rust
pub struct Process {
context: Context,
kstack: KernelStack,
memory: MemorySet,
files: BTreeMap<usize, Arc<Mutex<File>>>,
...
}
```
---
将资源封装成对象,在析构函数中释放资源。
```rust
pub struct KernelStack {
ptr: *mut u8,
layout: Layout,
}
impl Drop for KernelStack {
fn drop(&mut self) {
unsafe{ dealloc(self.ptr, self.layout); }
}
}
```
---
当对象的生命周期结束时,资源自动释放。
```rust
pub struct Process {
context: Context,
kstack: KernelStack,
memory: MemorySet,
files: BTreeMap<usize, Arc<Mutex<File>>>,
...
}
pub struct ProcessManager {
procs: BTreeMap<usize, Process>,
}
impl ProcessManager {
pub fn remove(&mut self, pid: usize) {
self.procs.remove(&pid);
// All resources have been released here
}
}
```
---
# Rust vs C
## 代码风格
||Rust|C|
|-|-|-|
|数据结构|泛型容器Vec|侵入式(链表)|
|全局变量|少量|大量|
|数据分布|倾向分散|倾向集中|
|数据类型|鼓励自定义类型|基础类型|
|思维方式|所有权+生命周期|数据+行为|
---
## 代码量
||Rust|C|
|-|-|-|
|内存管理|1600|1800|
|线程管理|1200|1200|
|文件系统|1300|3400|
|同步互斥|500|400|
|内核其它|800|1200|
|共计|5400|8000|
*使用`loc`统计代码行数基于RISCV版本粗略计算*
---
## 语言能力
在底层:
* 具有同等的底层操作能力
* 二者具有良好的互操作性
在上层:
* Rust能编写更加安全的代码减少Bug
* Rust具有更好的表达能力胜任复杂逻辑
* Rust具有更强的抽象能力有助于代码的分离和复用
——Rust更加适合编写OS
---
# Rust的问题
学习曲线过于陡峭!
所有权、生命周期等机制难以驾驭!
初学者大部分时间在与编译器作斗争。
一种可能的解决方案:
* 先用unsafe、C风格实现
* 再逐步消除unsafe、重构成Rust风格
---
# 未来:接下来会做什么?
* 真机测试HiFiveU, K210等RISCV64开发板
* 教学实验2019年操作系统课实验
* 功能完善实现POSIX接口
* 性能优化发掘Rust的潜力
* 对比借鉴其它有趣OS
* 潜力探索async异步机制
---
## 其它有趣OS
### [Tock](https://www.tockos.org)
Rust编写的嵌入式操作系统
关注Capsule内核模块设计进程的内存分配策略……
### [Biscuit](https://github.com/mit-pdos/biscuit)
Golang编写的POSIX兼容OSMIT出品
关注Go异步机制Go vs Rust
---
## 潜力探索async无栈协程应用于OS内核的探讨
async-await用户态异步编程机制
用同步的方式,编写异步的代码。
背后的实现机制和OS线程调度高度一致。
能否应用于Kernel中与传统线程机制相比有何优缺点
---
# 感谢
### 指导老师
陈渝,向勇
### 参与开发的同学们
王润基,戴臻旸,王纪霆
贾越凯,寇明阳,孔彦
刘辰屹,陈秋昊,朱书聪
---
# 欢迎试玩!
![40%](demo.png)
GitHubhttps://github.com/wangrunji0408/RustOS
---
# 感谢聆听
# Q&A

@ -32,14 +32,16 @@ target := $(arch)-blog_os
kernel := target/$(target)/$(mode)/ucore
bin := target/$(target)/$(mode)/kernel.bin
bootimage := target/$(target)/bootimage.bin
user_dir := ../user
user_bin_path := $(user_dir)/target/$(arch)-ucore/debug
user_bins := $(patsubst $(user_bin_path)/%.d, $(user_bin_path)/%, $(wildcard $(user_bin_path)/*.d))
user_obj := build/$(arch)/user.o
export ARCH = $(arch)
export SFSIMG = $(user_dir)/build/user-$(arch).img
export SMP = $(smp)
#export SFSIMG = $(user_dir)/build/user-$(arch).img
ifeq ($(arch), x86_64)
export SFSIMG = $(user_dir)/img/ucore-i386.img
else
export SFSIMG = $(user_dir)/img/ucore-$(arch).img
endif
ifeq ($(arch), aarch64)
graphic ?= on
@ -209,11 +211,6 @@ endif
sfsimg:
@cd $(user_dir) && make sfsimg
# make user.o from binary files
$(user_obj): $(user_bins)
@cd $(user_bin_path) && \
$(ld) -o $(abspath $@) $(patsubst %, -b binary %, $(notdir $(user_bins)))
### install ###
ifeq ($(board), raspi3)

@ -145,6 +145,13 @@ impl Framebuffer {
use crate::arch::memory;
let paddr = info.bus_addr & !0xC0000000;
let vaddr = memory::ioremap(paddr as usize, info.screen_size as usize, "fb") as u32;
if vaddr == 0 {
Err(format!(
"cannot remap memory range [{:#x?}..{:#x?}]",
paddr,
paddr + info.screen_size
))?;
}
Ok(Framebuffer {
buf: ColorBuffer::new(color_depth, vaddr, info.screen_size),
color_depth,

@ -1,13 +1,11 @@
//! Memory initialization for aarch64.
use crate::memory::{init_heap, MemoryArea, MemoryAttr, MemorySet, FRAME_ALLOCATOR};
use crate::memory::{init_heap, Linear, MemoryAttr, MemorySet, FRAME_ALLOCATOR};
use super::paging::MMIOType;
use aarch64::paging::{memory_attribute::*, PhysFrame as Frame};
use aarch64::{addr::*, barrier, regs::*};
use atags::atags::Atags;
use lazy_static::lazy_static;
use log::*;
use spin::Mutex;
use ucore_memory::PAGE_SIZE;
/// Memory initialization.
@ -100,41 +98,31 @@ fn init_frame_allocator() {
}
}
lazy_static! {
pub static ref KERNEL_MEMORY_SET: Mutex<MemorySet> = Mutex::new(MemorySet::new_bare());
}
static mut KERNEL_MEMORY_SET: Option<MemorySet> = None;
/// remap kernel page table after all initialization.
fn remap_the_kernel() {
let mut ms = KERNEL_MEMORY_SET.lock();
ms.push(MemoryArea::new_identity(0, bootstacktop as usize, MemoryAttr::default(), "kstack"));
ms.push(MemoryArea::new_identity(stext as usize, etext as usize, MemoryAttr::default().execute().readonly(), "text"));
ms.push(MemoryArea::new_identity(sdata as usize, edata as usize, MemoryAttr::default(), "data"));
ms.push(MemoryArea::new_identity(srodata as usize, erodata as usize, MemoryAttr::default().readonly(), "rodata"));
ms.push(MemoryArea::new_identity(sbss as usize, ebss as usize, MemoryAttr::default(), "bss"));
let mut ms = MemorySet::new_bare();
ms.push(0, bootstacktop as usize, Linear::new(0, MemoryAttr::default()), "kstack");
ms.push(stext as usize, etext as usize, Linear::new(0, MemoryAttr::default().execute().readonly()), "text");
ms.push(sdata as usize, edata as usize, Linear::new(0, MemoryAttr::default()), "data");
ms.push(srodata as usize, erodata as usize, Linear::new(0, MemoryAttr::default().readonly()), "rodata");
ms.push(sbss as usize, ebss as usize, Linear::new(0, MemoryAttr::default()), "bss");
use super::board::{IO_REMAP_BASE, IO_REMAP_END};
ms.push(MemoryArea::new_identity(
IO_REMAP_BASE,
IO_REMAP_END,
MemoryAttr::default().mmio(MMIOType::Device as u8),
"io_remap",
));
ms.push(IO_REMAP_BASE, IO_REMAP_END, Linear::new(0, MemoryAttr::default().mmio(MMIOType::Device as u8)), "io_remap");
unsafe { ms.get_page_table_mut().activate_as_kernel() }
unsafe { KERNEL_MEMORY_SET = Some(ms) }
info!("kernel remap end");
}
pub fn ioremap(start: usize, len: usize, name: &'static str) -> usize {
let mut ms = KERNEL_MEMORY_SET.lock();
let area = MemoryArea::new_identity(
start,
start + len,
MemoryAttr::default().mmio(MMIOType::NormalNonCacheable as u8),
name,
);
ms.push(area);
start
if let Some(ms) = unsafe { KERNEL_MEMORY_SET.as_mut() } {
ms.push(start, start + len, Linear::new(0, MemoryAttr::default().mmio(MMIOType::NormalNonCacheable as u8)), name);
return start;
}
0
}
/// Returns the (start address, end address) of the available memory on this

@ -1,6 +1,4 @@
//! Page table implementations for aarch64.
use ucore_memory::memory_set::*;
use ucore_memory::PAGE_SIZE;
use ucore_memory::paging::*;
use aarch64::asm::{tlb_invalidate, tlb_invalidate_all, flush_icache_all, ttbr_el1_read, ttbr_el1_write};
use aarch64::{PhysAddr, VirtAddr};
@ -9,7 +7,7 @@ use aarch64::paging::{FrameAllocator, FrameDeallocator, Page, PhysFrame as Frame
use aarch64::paging::memory_attribute::*;
use log::*;
// Depends on kernel
use crate::consts::RECURSIVE_INDEX;
use crate::consts::{KERNEL_PML4, RECURSIVE_INDEX};
use crate::memory::{active_table, alloc_frame, dealloc_frame};
// need 3 page
@ -49,9 +47,7 @@ pub struct ActivePageTable(RecursivePageTable<'static>);
pub struct PageEntry(PageTableEntry);
impl PageTable for ActivePageTable {
type Entry = PageEntry;
fn map(&mut self, addr: usize, target: usize) -> &mut PageEntry {
fn map(&mut self, addr: usize, target: usize) -> &mut Entry {
let flags = EF::default();
let attr = MairNormal::attr_value();
self.0.map_to(Page::of_addr(addr), Frame::of_addr(target), flags, attr, &mut FrameAllocatorForAarch64)
@ -64,27 +60,16 @@ impl PageTable for ActivePageTable {
flush.flush();
}
fn get_entry(&mut self, vaddr: usize) -> Option<&mut PageEntry> {
fn get_entry(&mut self, vaddr: usize) -> Option<&mut Entry> {
// get p1 entry
let entry_addr = ((vaddr >> 9) & 0o777_777_777_7770) | (RECURSIVE_INDEX << 39)
| (vaddr & 0xffff_0000_0000_0000);
Some(unsafe { &mut *(entry_addr as *mut PageEntry) })
}
fn get_page_slice_mut<'a, 'b>(&'a mut self, addr: usize) -> &'b mut [u8] {
use core::slice;
unsafe { slice::from_raw_parts_mut((addr & !0xfffusize) as *mut u8, PAGE_SIZE) }
}
fn read(&mut self, addr: usize) -> u8 {
unsafe { *(addr as *const u8) }
}
fn write(&mut self, addr: usize, data: u8) {
unsafe { *(addr as *mut u8) = data; }
}
}
impl PageTableExt for ActivePageTable {}
const ROOT_PAGE_TABLE: *mut Aarch64PageTable =
((RECURSIVE_INDEX << 39) | (RECURSIVE_INDEX << 30) | (RECURSIVE_INDEX << 21) | (RECURSIVE_INDEX << 12)) as *mut Aarch64PageTable;
@ -92,18 +77,6 @@ impl ActivePageTable {
pub unsafe fn new() -> Self {
ActivePageTable(RecursivePageTable::new(&mut *(ROOT_PAGE_TABLE as *mut _)).unwrap())
}
fn with_temporary_map(&mut self, frame: &Frame, f: impl FnOnce(&mut ActivePageTable, &mut Aarch64PageTable)) {
// Create a temporary page
let page = Page::of_addr(0xcafebabe);
assert!(self.0.translate_page(page).is_none(), "temporary page is already mapped");
// Map it to table
self.map(page.start_address().as_u64() as usize, frame.start_address().as_u64() as usize);
// Call f
let table = unsafe { &mut *page.start_address().as_mut_ptr() };
f(self, table);
// Unmap the page
self.unmap(0xcafebabe);
}
}
#[repr(u8)]
@ -217,9 +190,9 @@ impl InactivePageTable for InactivePageTable0 {
}
fn new_bare() -> Self {
let frame = Self::alloc_frame().map(|target| Frame::of_addr(target))
.expect("failed to allocate frame");
active_table().with_temporary_map(&frame, |_, table: &mut Aarch64PageTable| {
let target = alloc_frame().expect("failed to allocate frame");
let frame = Frame::of_addr(target);
active_table().with_temporary_map(target, |_, table: &mut Aarch64PageTable| {
table.zero();
// set up recursive mapping for the table
table[RECURSIVE_INDEX].set_frame(frame.clone(), EF::default(), MairNormal::attr_value());
@ -227,8 +200,35 @@ impl InactivePageTable for InactivePageTable0 {
InactivePageTable0 { p4_frame: frame }
}
fn edit(&mut self, f: impl FnOnce(&mut Self::Active)) {
active_table().with_temporary_map(&ttbr_el1_read(0), |active_table, p4_table: &mut Aarch64PageTable| {
fn map_kernel(&mut self) {
let table = unsafe { &mut *ROOT_PAGE_TABLE };
let e0 = table[KERNEL_PML4].clone();
assert!(!e0.is_unused());
self.edit(|_| {
table[KERNEL_PML4].set_frame(Frame::containing_address(e0.addr()), EF::default(), MairNormal::attr_value());
});
}
fn token(&self) -> usize {
self.p4_frame.start_address().as_u64() as usize // as TTBRx_EL1
}
unsafe fn set_token(token: usize) {
ttbr_el1_write(1, Frame::containing_address(PhysAddr::new(token as u64)));
}
fn active_token() -> usize {
ttbr_el1_read(1).start_address().as_u64() as usize
}
fn flush_tlb() {
tlb_invalidate_all();
}
fn edit<T>(&mut self, f: impl FnOnce(&mut Self::Active) -> T) -> T {
let target = ttbr_el1_read(0).start_address().as_u64() as usize;
active_table().with_temporary_map(target, |active_table, p4_table: &mut Aarch64PageTable| {
let backup = p4_table[RECURSIVE_INDEX].clone();
let old_frame = ttbr_el1_read(1);
@ -238,54 +238,14 @@ impl InactivePageTable for InactivePageTable0 {
tlb_invalidate_all();
// execute f in the new context
f(active_table);
let ret = f(active_table);
// restore recursive mapping to original p4 table
p4_table[RECURSIVE_INDEX] = backup;
ttbr_el1_write(1, old_frame);
tlb_invalidate_all();
});
}
unsafe fn activate(&self) {
let old_frame = ttbr_el1_read(1);
let new_frame = self.p4_frame.clone();
debug!("switch TTBR1 {:?} -> {:?}", old_frame, new_frame);
if old_frame != new_frame {
ttbr_el1_write(1, new_frame);
tlb_invalidate_all();
}
}
unsafe fn with<T>(&self, f: impl FnOnce() -> T) -> T {
// Just need to switch the user TTBR
let old_frame = ttbr_el1_read(1);
let new_frame = self.p4_frame.clone();
debug!("switch TTBR1 {:?} -> {:?}", old_frame, new_frame);
if old_frame != new_frame {
ttbr_el1_write(1, new_frame);
tlb_invalidate_all();
}
let ret = f();
debug!("switch TTBR1 {:?} -> {:?}", new_frame, old_frame);
if old_frame != new_frame {
ttbr_el1_write(1, old_frame);
tlb_invalidate_all();
flush_icache_all();
}
ret
}
fn token(&self) -> usize {
self.p4_frame.start_address().as_u64() as usize // as TTBRx_EL1
}
fn alloc_frame() -> Option<usize> {
alloc_frame()
}
fn dealloc_frame(target: usize) {
dealloc_frame(target)
ret
})
}
}
@ -306,7 +266,7 @@ impl InactivePageTable0 {
impl Drop for InactivePageTable0 {
fn drop(&mut self) {
info!("PageTable dropping: {:?}", self);
Self::dealloc_frame(self.p4_frame.start_address().as_u64() as usize);
dealloc_frame(self.p4_frame.start_address().as_u64() as usize);
}
}

@ -10,7 +10,7 @@ pub extern fn __mulsi3(mut a: u32, mut b: u32) -> u32 {
while a > 0 {
if a & 1 > 0 {
r += b;
r = r.overflowing_add(b).0;
}
a >>= 1;
b <<= 1;

@ -2,7 +2,7 @@ use core::{slice, mem};
use riscv::{addr::*, register::sstatus};
use ucore_memory::PAGE_SIZE;
use log::*;
use crate::memory::{active_table, FRAME_ALLOCATOR, init_heap, MemoryArea, MemoryAttr, MemorySet, MEMORY_ALLOCATOR};
use crate::memory::{active_table, FRAME_ALLOCATOR, init_heap, MemoryArea, MemoryAttr, MemorySet, MEMORY_ALLOCATOR, Linear};
use crate::consts::{MEMORY_OFFSET, MEMORY_END};
#[cfg(feature = "no_mmu")]
@ -78,11 +78,11 @@ fn init_frame_allocator() {
fn remap_the_kernel() {
let mut ms = MemorySet::new_bare();
#[cfg(feature = "no_bbl")]
ms.push(MemoryArea::new_identity(0x10000000, 0x10000008, MemoryAttr::default(), "serial"));
ms.push(MemoryArea::new_identity(stext as usize, etext as usize, MemoryAttr::default().execute().readonly(), "text"));
ms.push(MemoryArea::new_identity(sdata as usize, edata as usize, MemoryAttr::default(), "data"));
ms.push(MemoryArea::new_identity(srodata as usize, erodata as usize, MemoryAttr::default().readonly(), "rodata"));
ms.push(MemoryArea::new_identity(sbss as usize, ebss as usize, MemoryAttr::default(), "bss"));
ms.push(0x10000000, 0x10000008, Linear::new(0, MemoryAttr::default()), "serial");
ms.push(stext as usize, etext as usize, Linear::new(0, MemoryAttr::default().execute().readonly()), "text");
ms.push(sdata as usize, edata as usize, Linear::new(0, MemoryAttr::default()), "data");
ms.push(srodata as usize, erodata as usize, Linear::new(0, MemoryAttr::default().readonly()), "rodata");
ms.push(sbss as usize, ebss as usize, Linear::new(0, MemoryAttr::default()), "bss");
unsafe { ms.activate(); }
unsafe { SATP = ms.token(); }
mem::forget(ms);

@ -31,19 +31,16 @@ pub fn setup_page_table(frame: Frame) {
p2.map_identity(KERNEL_P2_INDEX + 1, EF::VALID | EF::READABLE | EF::WRITABLE | EF::EXECUTABLE);
p2.map_identity(KERNEL_P2_INDEX + 2, EF::VALID | EF::READABLE | EF::WRITABLE | EF::EXECUTABLE);
use riscv::register::satp;
unsafe { satp::set(satp::Mode::Sv32, 0, frame); }
sfence_vma_all();
info!("setup init page table end");
}
pub struct ActivePageTable(RecursivePageTable<'static>);
pub struct ActivePageTable(RecursivePageTable<'static>, PageEntry);
pub struct PageEntry(PageTableEntry);
pub struct PageEntry(PageTableEntry, Page);
impl PageTable for ActivePageTable {
type Entry = PageEntry;
/*
* @param:
* addr: the virtual addr to be matched
@ -53,7 +50,7 @@ impl PageTable for ActivePageTable {
* @retval:
* the matched PageEntry
*/
fn map(&mut self, addr: usize, target: usize) -> &mut PageEntry {
fn map(&mut self, addr: usize, target: usize) -> &mut Entry {
// the flag for the new page entry
let flags = EF::VALID | EF::READABLE | EF::WRITABLE;
// here page is for the virtual address while frame is for the physical, both of them is 4096 bytes align
@ -86,96 +83,56 @@ impl PageTable for ActivePageTable {
* @retval:
* a mutable PageEntry reference of 'addr'
*/
fn get_entry(&mut self, addr: usize) -> Option<&mut PageEntry> {
if unsafe { !(*ROOT_PAGE_TABLE)[addr >> 22].flags().contains(EF::VALID) } {
fn get_entry(&mut self, addr: usize) -> Option<&mut Entry> {
let p2 = unsafe { ROOT_PAGE_TABLE.as_mut().unwrap() };
let page = Page::of_addr(VirtAddr::new(addr));
if !p2[page.p2_index()].flags().contains(EF::VALID) {
return None;
}
let page = Page::of_addr(VirtAddr::new(addr));
// ???
let _ = self.0.translate_page(page);
let entry_addr = ((addr >> 10) & ((1 << 22) - 4)) | (RECURSIVE_INDEX << 22);
unsafe { Some(&mut *(entry_addr as *mut PageEntry)) }
}
/*
* @param:
* addr:the input (virutal) address
* @brief:
* get the addr's memory page slice
* @retval:
* a mutable reference slice of 'addr' 's page
*/
fn get_page_slice_mut<'a, 'b>(&'a mut self, addr: usize) -> &'b mut [u8] {
use core::slice;
unsafe { slice::from_raw_parts_mut((addr & !(PAGE_SIZE - 1)) as *mut u8, PAGE_SIZE) }
}
/*
* @param:
* addr: virtual address
* @brief:
* get the address's content
* @retval:
* the content(u8) of 'addr'
*/
fn read(&mut self, addr: usize) -> u8 {
unsafe { *(addr as *const u8) }
let entry = edit_entry_of(&page, |entry| *entry);
self.1 = PageEntry(entry, page);
Some(&mut self.1)
}
}
/*
* @param:
* addr: virtual address
* @brief:
* write the address's content
*/
fn write(&mut self, addr: usize, data: u8) {
unsafe { *(addr as *mut u8) = data; }
}
fn edit_entry_of<T>(page: &Page, f: impl FnOnce(&mut PageTableEntry) -> T) -> T {
let p2_flags = unsafe { (*ROOT_PAGE_TABLE)[page.p2_index()].flags_mut() };
p2_flags.insert(EF::READABLE | EF::WRITABLE);
let entry_addr = (RECURSIVE_INDEX << 22) | (page.p2_index() << 12) | (page.p1_index() << 2);
let entry = unsafe { &mut *(entry_addr as *mut PageTableEntry) };
let ret = f(entry);
p2_flags.remove(EF::READABLE | EF::WRITABLE);
ret
}
impl PageTableExt for ActivePageTable {}
// define the ROOT_PAGE_TABLE, and the virtual address of it?
const ROOT_PAGE_TABLE: *mut RvPageTable =
(((RECURSIVE_INDEX << 10) | (RECURSIVE_INDEX + 1)) << 12) as *mut RvPageTable;
impl ActivePageTable {
pub unsafe fn new() -> Self {
ActivePageTable(RecursivePageTable::new(&mut *ROOT_PAGE_TABLE).unwrap())
}
/*
* @param:
* frame: the target physical frame which will be temporarily mapped
* f: the function you would like to apply for once
* @brief:
* do something on the target physical frame?
*/
fn with_temporary_map(&mut self, frame: &Frame, f: impl FnOnce(&mut ActivePageTable, &mut RvPageTable)) {
// Create a temporary page
let page = Page::of_addr(VirtAddr::new(0xcafebabe));
assert!(self.0.translate_page(page).is_none(), "temporary page is already mapped");
// Map it to table
self.map(page.start_address().as_usize(), frame.start_address().as_u32() as usize);
// Call f
let table = unsafe { &mut *(page.start_address().as_usize() as *mut _) };
f(self, table);
// Unmap the page
self.unmap(0xcafebabe);
ActivePageTable(
RecursivePageTable::new(&mut *ROOT_PAGE_TABLE).unwrap(),
::core::mem::zeroed()
)
}
}
/// implementation for the Entry trait in /crate/memory/src/paging/mod.rs
impl Entry for PageEntry {
fn update(&mut self) {
let addr = VirtAddr::new((self as *const _ as usize) << 10);
sfence_vma(0, addr);
edit_entry_of(&self.1, |entry| *entry = self.0);
sfence_vma(0, self.1.start_address());
}
fn accessed(&self) -> bool { self.0.flags().contains(EF::ACCESSED) }
fn dirty(&self) -> bool { self.0.flags().contains(EF::DIRTY) }
fn writable(&self) -> bool { self.0.flags().contains(EF::WRITABLE) }
fn present(&self) -> bool { self.0.flags().contains(EF::VALID | EF::READABLE) }
fn clear_accessed(&mut self) { self.as_flags().remove(EF::ACCESSED); }
fn clear_dirty(&mut self) { self.as_flags().remove(EF::DIRTY); }
fn set_writable(&mut self, value: bool) { self.as_flags().set(EF::WRITABLE, value); }
fn set_present(&mut self, value: bool) { self.as_flags().set(EF::VALID | EF::READABLE, value); }
fn clear_accessed(&mut self) { self.0.flags_mut().remove(EF::ACCESSED); }
fn clear_dirty(&mut self) { self.0.flags_mut().remove(EF::DIRTY); }
fn set_writable(&mut self, value: bool) { self.0.flags_mut().set(EF::WRITABLE, value); }
fn set_present(&mut self, value: bool) { self.0.flags_mut().set(EF::VALID | EF::READABLE, value); }
fn target(&self) -> usize { self.0.addr().as_u32() as usize }
fn set_target(&mut self, target: usize) {
let flags = self.0.flags();
@ -185,27 +142,21 @@ impl Entry for PageEntry {
fn writable_shared(&self) -> bool { self.0.flags().contains(EF::RESERVED1) }
fn readonly_shared(&self) -> bool { self.0.flags().contains(EF::RESERVED2) }
fn set_shared(&mut self, writable: bool) {
let flags = self.as_flags();
let flags = self.0.flags_mut();
flags.set(EF::RESERVED1, writable);
flags.set(EF::RESERVED2, !writable);
}
fn clear_shared(&mut self) { self.as_flags().remove(EF::RESERVED1 | EF::RESERVED2); }
fn clear_shared(&mut self) { self.0.flags_mut().remove(EF::RESERVED1 | EF::RESERVED2); }
fn swapped(&self) -> bool { self.0.flags().contains(EF::RESERVED1) }
fn set_swapped(&mut self, value: bool) { self.as_flags().set(EF::RESERVED1, value); }
fn set_swapped(&mut self, value: bool) { self.0.flags_mut().set(EF::RESERVED1, value); }
fn user(&self) -> bool { self.0.flags().contains(EF::USER) }
fn set_user(&mut self, value: bool) { self.as_flags().set(EF::USER, value); }
fn set_user(&mut self, value: bool) { self.0.flags_mut().set(EF::USER, value); }
fn execute(&self) -> bool { self.0.flags().contains(EF::EXECUTABLE) }
fn set_execute(&mut self, value: bool) { self.as_flags().set(EF::EXECUTABLE, value); }
fn set_execute(&mut self, value: bool) { self.0.flags_mut().set(EF::EXECUTABLE, value); }
fn mmio(&self) -> u8 { 0 }
fn set_mmio(&mut self, _value: u8) { }
}
impl PageEntry {
fn as_flags(&mut self) -> &mut EF {
unsafe { &mut *(self as *mut _ as *mut EF) }
}
}
#[derive(Debug)]
pub struct InactivePageTable0 {
p2_frame: Frame,
@ -214,145 +165,72 @@ pub struct InactivePageTable0 {
impl InactivePageTable for InactivePageTable0 {
type Active = ActivePageTable;
/*
* @brief:
* get a new pagetable (for a new process or thread)
* @retbal:
* the new pagetable
*/
fn new() -> Self {
let mut pt = Self::new_bare();
pt.map_kernel();
pt
}
/*
* @brief:
* allocate a new frame and then self-mapping it and regard it as the inactivepagetale
* retval:
* the inactive page table
*/
fn new_bare() -> Self {
let frame = Self::alloc_frame().map(|target| Frame::of_addr(PhysAddr::new(target as u32)))
.expect("failed to allocate frame");
active_table().with_temporary_map(&frame, |_, table: &mut RvPageTable| {
let target = alloc_frame().expect("failed to allocate frame");
let frame = Frame::of_addr(PhysAddr::new(target as u32));
active_table().with_temporary_map(target, |_, table: &mut RvPageTable| {
table.zero();
table.set_recursive(RECURSIVE_INDEX, frame.clone());
});
InactivePageTable0 { p2_frame: frame }
}
/*
* @param:
* f: a function to do something with the temporary modified activate page table
* @brief:
* temporarily map the inactive pagetable as an active p2page and apply f on the temporary modified active page table
*/
fn edit(&mut self, f: impl FnOnce(&mut Self::Active)) {
active_table().with_temporary_map(&satp::read().frame(), |active_table, p2_table: &mut RvPageTable| {
let backup = p2_table[RECURSIVE_INDEX].clone();
// overwrite recursive mapping
p2_table[RECURSIVE_INDEX].set(self.p2_frame.clone(), EF::VALID);
sfence_vma_all();
// execute f in the new context
f(active_table);
fn map_kernel(&mut self) {
let table = unsafe { &mut *ROOT_PAGE_TABLE };
let e0 = table[0x40];
let e1 = table[KERNEL_P2_INDEX];
let e2 = table[KERNEL_P2_INDEX + 1];
let e3 = table[KERNEL_P2_INDEX + 2];
// restore recursive mapping to original p2 table
p2_table[RECURSIVE_INDEX] = backup;
sfence_vma_all();
self.edit(|_| {
table[0x40] = e0;
table[KERNEL_P2_INDEX] = e1;
table[KERNEL_P2_INDEX + 1] = e2;
table[KERNEL_P2_INDEX + 2] = e3;
});
}
/*
* @brief:
* active self as the current active page table
*/
unsafe fn activate(&self) {
let old_frame = satp::read().frame();
let new_frame = self.p2_frame.clone();
debug!("switch table {:x?} -> {:x?}", old_frame, new_frame);
if old_frame != new_frame {
satp::set(satp::Mode::Sv32, 0, new_frame);
sfence_vma_all();
}
fn token(&self) -> usize {
self.p2_frame.number() | (1 << 31) // as satp
}
/*
* @param:
* f: the function to run when temporarily activate self as current page table
* @brief:
* Temporarily activate self and run the process, and return the return value of f
* @retval:
* the return value of f
*/
unsafe fn with<T>(&self, f: impl FnOnce() -> T) -> T {
let old_frame = satp::read().frame();
let new_frame = self.p2_frame.clone();
debug!("switch table {:x?} -> {:x?}", old_frame, new_frame);
if old_frame != new_frame {
satp::set(satp::Mode::Sv32, 0, new_frame);
sfence_vma_all();
}
let target = f();
debug!("switch table {:x?} -> {:x?}", new_frame, old_frame);
if old_frame != new_frame {
satp::set(satp::Mode::Sv32, 0, old_frame);
sfence_vma_all();
}
target
unsafe fn set_token(token: usize) {
asm!("csrw 0x180, $0" :: "r"(token) :: "volatile");
}
/*
* @brief:
* get the token of self, the token is self's pagetable frame's starting physical address
* @retval:
* self token
*/
fn token(&self) -> usize {
self.p2_frame.number() | (1 << 31) // as satp
fn active_token() -> usize {
satp::read().bits()
}
fn alloc_frame() -> Option<usize> {
alloc_frame()
fn flush_tlb() {
sfence_vma_all();
}
fn dealloc_frame(target: usize) {
dealloc_frame(target)
}
}
fn edit<T>(&mut self, f: impl FnOnce(&mut Self::Active) -> T) -> T {
let target = satp::read().frame().start_address().as_u32() as usize;
active_table().with_temporary_map(target, |active_table, p2_table: &mut RvPageTable| {
let backup = p2_table[RECURSIVE_INDEX].clone();
impl InactivePageTable0 {
/*
* @brief:
* map the kernel code memory address (p2 page table) in the new inactive page table according the current active page table
*/
fn map_kernel(&mut self) {
let table = unsafe { &mut *ROOT_PAGE_TABLE };
let e0 = table[0x40];
let e1 = table[KERNEL_P2_INDEX];
assert!(!e1.is_unused());
// for larger heap memroy
let e2 = table[KERNEL_P2_INDEX + 1];
assert!(!e2.is_unused());
let e3 = table[KERNEL_P2_INDEX + 2];
assert!(!e2.is_unused());
// overwrite recursive mapping
p2_table[RECURSIVE_INDEX].set(self.p2_frame.clone(), EF::VALID);
sfence_vma_all();
self.edit(|_| {
table[0x40] = e0;
table[KERNEL_P2_INDEX].set(e1.frame(), EF::VALID | EF::GLOBAL);
// for larger heap memroy
table[KERNEL_P2_INDEX + 1].set(e2.frame(), EF::VALID | EF::GLOBAL);
table[KERNEL_P2_INDEX + 2].set(e3.frame(), EF::VALID | EF::GLOBAL);
});
// execute f in the new context
let ret = f(active_table);
// restore recursive mapping to original p2 table
p2_table[RECURSIVE_INDEX] = backup;
sfence_vma_all();
ret
})
}
}
impl Drop for InactivePageTable0 {
fn drop(&mut self) {
info!("PageTable dropping: {:?}", self);
Self::dealloc_frame(self.p2_frame.start_address().as_u32() as usize);
dealloc_frame(self.p2_frame.start_address().as_u32() as usize);
}
}

@ -1,4 +1,3 @@
use bit_allocator::{BitAlloc, BitAlloc64K};
// Depends on kernel
use crate::memory::{active_table, alloc_frame, dealloc_frame};
use spin::{Mutex, MutexGuard};
@ -44,9 +43,7 @@ pub struct ActivePageTable(RecursivePageTable<'static>);
pub struct PageEntry(PageTableEntry);
impl PageTable for ActivePageTable {
type Entry = PageEntry;
fn map(&mut self, addr: usize, target: usize) -> &mut PageEntry {
fn map(&mut self, addr: usize, target: usize) -> &mut Entry {
let flags = EF::PRESENT | EF::WRITABLE | EF::NO_EXECUTE;
self.0.map_to(Page::of_addr(addr), Frame::of_addr(target), flags, &mut FrameAllocatorForX86)
.unwrap().flush();
@ -58,44 +55,21 @@ impl PageTable for ActivePageTable {
flush.flush();
}
fn get_entry(&mut self, addr: usize) -> Option<&mut PageEntry> {
fn get_entry(&mut self, addr: usize) -> Option<&mut Entry> {
for level in 0..3 {
let entry = get_entry_ptr(addr, 4 - level);
if unsafe { !(*entry).present() } { return None; }
}
unsafe { Some(&mut *(get_entry_ptr(addr, 1))) }
}
fn get_page_slice_mut<'a, 'b>(&'a mut self, addr: usize) -> &'b mut [u8] {
use core::slice;
unsafe { slice::from_raw_parts_mut((addr & !0xfffusize) as *mut u8, PAGE_SIZE) }
}
fn read(&mut self, addr: usize) -> u8 {
unsafe { *(addr as *const u8) }
}
fn write(&mut self, addr: usize, data: u8) {
unsafe { *(addr as *mut u8) = data; }
}
}
impl PageTableExt for ActivePageTable {}
impl ActivePageTable {
pub unsafe fn new() -> Self {
ActivePageTable(RecursivePageTable::new(&mut *(0xffffffff_fffff000 as *mut _)).unwrap())
}
fn with_temporary_map(&mut self, frame: &Frame, f: impl FnOnce(&mut ActivePageTable, &mut x86PageTable)) {
// Create a temporary page
let page = Page::of_addr(0xcafebabe);
assert!(self.0.translate_page(page).is_none(), "temporary page is already mapped");
// Map it to table
self.map(page.start_address().as_u64() as usize, frame.start_address().as_u64() as usize);
// Call f
let table = unsafe { &mut *page.start_address().as_mut_ptr() };
f(self, table);
// Unmap the page
self.unmap(0xcafebabe);
}
}
impl Entry for PageEntry {
@ -166,16 +140,10 @@ pub struct InactivePageTable0 {
impl InactivePageTable for InactivePageTable0 {
type Active = ActivePageTable;
fn new() -> Self {
let mut pt = Self::new_bare();
pt.map_kernel();
pt
}
fn new_bare() -> Self {
let frame = Self::alloc_frame().map(|target| Frame::of_addr(target))
.expect("failed to allocate frame");
active_table().with_temporary_map(&frame, |_, table: &mut x86PageTable| {
let target = alloc_frame().expect("failed to allocate frame");
let frame = Frame::of_addr(target);
active_table().with_temporary_map(target, |_, table: &mut x86PageTable| {
table.zero();
// set up recursive mapping for the table
table[511].set_frame(frame.clone(), EF::PRESENT | EF::WRITABLE);
@ -183,78 +151,58 @@ impl InactivePageTable for InactivePageTable0 {
InactivePageTable0 { p4_frame: frame }
}
fn edit(&mut self, f: impl FnOnce(&mut Self::Active)) {
active_table().with_temporary_map(&Cr3::read().0, |active_table, p4_table: &mut x86PageTable| {
let backup = p4_table[0o777].clone();
// overwrite recursive mapping
p4_table[0o777].set_frame(self.p4_frame.clone(), EF::PRESENT | EF::WRITABLE);
tlb::flush_all();
// execute f in the new context
f(active_table);
// restore recursive mapping to original p4 table
p4_table[0o777] = backup;
tlb::flush_all();
fn map_kernel(&mut self) {
let mut table = unsafe { &mut *(0xffffffff_fffff000 as *mut x86PageTable) };
// Kernel at 0xffff_ff00_0000_0000
// Kernel stack at 0x0000_57ac_0000_0000 (defined in bootloader crate)
let e510 = table[510].clone();
let estack = table[175].clone();
self.edit(|_| {
table[510].set_addr(e510.addr(), e510.flags() | EF::GLOBAL);
table[175].set_addr(estack.addr(), estack.flags() | EF::GLOBAL);
});
}
unsafe fn activate(&self) {
let old_frame = Cr3::read().0;
let new_frame = self.p4_frame.clone();
debug!("switch table {:?} -> {:?}", old_frame, new_frame);
if old_frame != new_frame {
Cr3::write(new_frame, Cr3Flags::empty());
}
fn token(&self) -> usize {
self.p4_frame.start_address().as_u64() as usize // as CR3
}
unsafe fn with<T>(&self, f: impl FnOnce() -> T) -> T {
let old_frame = Cr3::read().0;
let new_frame = self.p4_frame.clone();
debug!("switch table {:?} -> {:?}", old_frame, new_frame);
if old_frame != new_frame {
Cr3::write(new_frame, Cr3Flags::empty());
}
let ret = f();
debug!("switch table {:?} -> {:?}", new_frame, old_frame);
if old_frame != new_frame {
Cr3::write(old_frame, Cr3Flags::empty());
}
ret
unsafe fn set_token(token: usize) {
Cr3::write(Frame::containing_address(PhysAddr::new(token as u64)), Cr3Flags::empty());
}
fn token(&self) -> usize {
self.p4_frame.start_address().as_u64() as usize // as CR3
fn active_token() -> usize {
Cr3::read().0.start_address().as_u64() as usize
}
fn alloc_frame() -> Option<usize> {
alloc_frame()
fn flush_tlb() {
tlb::flush_all();
}
fn dealloc_frame(target: usize) {
dealloc_frame(target)
}
}
fn edit<T>(&mut self, f: impl FnOnce(&mut Self::Active) -> T) -> T {
let target = Cr3::read().0.start_address().as_u64() as usize;
active_table().with_temporary_map(target, |active_table, p4_table: &mut x86PageTable| {
let backup = p4_table[0o777].clone();
impl InactivePageTable0 {
fn map_kernel(&mut self) {
let mut table = unsafe { &mut *(0xffffffff_fffff000 as *mut x86PageTable) };
// Kernel at 0xffff_ff00_0000_0000
// Kernel stack at 0x0000_57ac_0000_0000 (defined in bootloader crate)
let e510 = table[510].clone();
let estack = table[175].clone();
self.edit(|_| {
table[510].set_addr(e510.addr(), e510.flags() | EF::GLOBAL);
table[175].set_addr(estack.addr(), estack.flags() | EF::GLOBAL);
});
// overwrite recursive mapping
p4_table[0o777].set_frame(self.p4_frame.clone(), EF::PRESENT | EF::WRITABLE);
tlb::flush_all();
// execute f in the new context
let ret = f(active_table);
// restore recursive mapping to original p4 table
p4_table[0o777] = backup;
tlb::flush_all();
ret
})
}
}
impl Drop for InactivePageTable0 {
fn drop(&mut self) {
info!("PageTable dropping: {:?}", self);
Self::dealloc_frame(self.p4_frame.start_address().as_u64() as usize);
dealloc_frame(self.p4_frame.start_address().as_u64() as usize);
}
}

@ -4,7 +4,7 @@ use crate::consts::MEMORY_OFFSET;
use super::HEAP_ALLOCATOR;
use ucore_memory::{*, paging::PageTable};
use ucore_memory::cow::CowExt;
pub use ucore_memory::memory_set::{MemoryArea, MemoryAttr, InactivePageTable};
pub use ucore_memory::memory_set::{MemoryArea, MemoryAttr, handler::*};
use ucore_memory::swap::*;
use crate::process::{process};
use crate::sync::{SpinNoIrqLock, SpinNoIrq, MutexGuard};
@ -46,33 +46,29 @@ pub fn active_table() -> MutexGuard<'static, CowExt<ActivePageTable>, SpinNoIrq>
ACTIVE_TABLE.lock()
}
// Page table for swap in and out
lazy_static!{
static ref ACTIVE_TABLE_SWAP: SpinNoIrqLock<SwapExt<ActivePageTable, fifo::FifoSwapManager, mock_swapper::MockSwapper>> =
SpinNoIrqLock::new(unsafe{SwapExt::new(ActivePageTable::new(), fifo::FifoSwapManager::default(), mock_swapper::MockSwapper::default())});
}
pub fn active_table_swap() -> MutexGuard<'static, SwapExt<ActivePageTable, fifo::FifoSwapManager, mock_swapper::MockSwapper>, SpinNoIrq>{
ACTIVE_TABLE_SWAP.lock()
#[derive(Debug, Clone, Copy)]
pub struct GlobalFrameAlloc;
impl FrameAllocator for GlobalFrameAlloc {
fn alloc(&self) -> Option<usize> {
// get the real address of the alloc frame
let ret = FRAME_ALLOCATOR.lock().alloc().map(|id| id * PAGE_SIZE + MEMORY_OFFSET);
trace!("Allocate frame: {:x?}", ret);
ret
// TODO: try to swap out when alloc failed
}
fn dealloc(&self, target: usize) {
trace!("Deallocate frame: {:x}", target);
FRAME_ALLOCATOR.lock().dealloc((target - MEMORY_OFFSET) / PAGE_SIZE);
}
}
/*
* @brief:
* allocate a free physical frame, if no free frame, then swap out one page and reture mapped frame as the free one
* @retval:
* the physical address for the allocated frame
*/
pub fn alloc_frame() -> Option<usize> {
// get the real address of the alloc frame
let ret = FRAME_ALLOCATOR.lock().alloc().map(|id| id * PAGE_SIZE + MEMORY_OFFSET);
trace!("Allocate frame: {:x?}", ret);
//do we need : unsafe { ACTIVE_TABLE_SWAP.force_unlock(); } ???
Some(ret.unwrap_or_else(|| active_table_swap().swap_out_any::<InactivePageTable0>().ok().expect("fail to swap out page")))
GlobalFrameAlloc.alloc()
}
pub fn dealloc_frame(target: usize) {
trace!("Deallocate frame: {:x}", target);
FRAME_ALLOCATOR.lock().dealloc((target - MEMORY_OFFSET) / PAGE_SIZE);
GlobalFrameAlloc.dealloc(target);
}
pub struct KernelStack(usize);
@ -97,44 +93,12 @@ impl Drop for KernelStack {
}
/*
* @param:
* addr: the virtual address of the page fault
* @brief:
* handle page fault
* @retval:
* Return true to continue, false to halt
*/
/// Handle page fault at `addr`.
/// Return true to continue, false to halt.
#[cfg(not(feature = "no_mmu"))]
pub fn page_fault_handler(addr: usize) -> bool {
info!("start handling swap in/out page fault");
//unsafe { ACTIVE_TABLE_SWAP.force_unlock(); }
/*LAB3 EXERCISE 1: YOUR STUDENT NUMBER
* handle the frame deallocated
*/
info!("get pt from processor()");
if process().memory_set.find_area(addr).is_none(){
return false;
}
let pt = process().memory_set.get_page_table_mut();
info!("pt got");
if active_table_swap().page_fault_handler(pt as *mut InactivePageTable0, addr, true, || alloc_frame().expect("fail to alloc frame")){
return true;
}
//////////////////////////////////////////////////////////////////////////////
// Handle copy on write (not being used now)
/*
unsafe { ACTIVE_TABLE.force_unlock(); }
if active_table().page_fault_handler(addr, || alloc_frame().expect("fail to alloc frame")){
return true;
}
*/
false
process().memory_set.page_fault_handler(addr)
}
pub fn init_heap() {

@ -1,18 +1,16 @@
use crate::arch::interrupt::{TrapFrame, Context as ArchContext};
use crate::memory::{MemoryArea, MemoryAttr, MemorySet, KernelStack, active_table_swap, alloc_frame, InactivePageTable0};
use xmas_elf::{ElfFile, header, program::{Flags, ProgramHeader, Type, SegmentData}};
use core::fmt::{Debug, Error, Formatter};
use alloc::{boxed::Box, collections::BTreeMap, vec::Vec, sync::Arc, string::String};
use ucore_memory::{Page};
use ucore_memory::memory_set::*;
use ucore_process::Context;
use alloc::{boxed::Box, collections::BTreeMap, string::String, sync::Arc, vec::Vec};
use log::*;
use simple_filesystem::file::File;
use spin::Mutex;
use log::*;
use ucore_process::Context;
use xmas_elf::{ElfFile, header, program::{Flags, ProgramHeader, SegmentData, Type}};
use crate::arch::interrupt::{Context as ArchContext, TrapFrame};
use crate::memory::{ByFrame, Delay, FrameAllocator, GlobalFrameAlloc, KernelStack, MemoryArea, MemoryAttr, MemorySet};
// TODO: avoid pub
pub struct ContextImpl {
pub struct Process {
pub arch: ArchContext,
pub memory_set: MemorySet,
pub kstack: KernelStack,
@ -20,17 +18,17 @@ pub struct ContextImpl {
pub cwd: String,
}
impl Context for ContextImpl {
impl Context for Process {
unsafe fn switch_to(&mut self, target: &mut Context) {
use core::mem::transmute;
let (target, _): (&mut ContextImpl, *const ()) = transmute(target);
let (target, _): (&mut Process, *const ()) = transmute(target);
self.arch.switch(&mut target.arch);
}
}
impl ContextImpl {
impl Process {
pub unsafe fn new_init() -> Box<Context> {
Box::new(ContextImpl {
Box::new(Process {
arch: ArchContext::null(),
memory_set: MemorySet::new(),
kstack: KernelStack::new(),
@ -42,7 +40,7 @@ impl ContextImpl {
pub fn new_kernel(entry: extern fn(usize) -> !, arg: usize) -> Box<Context> {
let memory_set = MemorySet::new();
let kstack = KernelStack::new();
Box::new(ContextImpl {
Box::new(Process {
arch: unsafe { ArchContext::new_kernel_thread(entry, arg, kstack.top(), memory_set.token()) },
memory_set,
kstack,
@ -52,7 +50,7 @@ impl ContextImpl {
}
/// Make a new user thread from ELF data
pub fn new_user<'a, Iter>(data: &[u8], args: Iter) -> Box<ContextImpl>
pub fn new_user<'a, Iter>(data: &[u8], args: Iter) -> Box<Process>
where Iter: Iterator<Item=&'a str>
{
// Parse elf
@ -82,7 +80,7 @@ impl ContextImpl {
true => (USER32_STACK_OFFSET, USER32_STACK_OFFSET + USER_STACK_SIZE),
false => (USER_STACK_OFFSET, USER_STACK_OFFSET + USER_STACK_SIZE),
};
memory_set.push(MemoryArea::new(ustack_buttom, ustack_top, MemoryAttr::default().user(), "user_stack"));
memory_set.push(ustack_buttom, ustack_top, ByFrame::new(MemoryAttr::default().user(), GlobalFrameAlloc), "user_stack");
ustack_top
};
#[cfg(feature = "no_mmu")]
@ -96,10 +94,7 @@ impl ContextImpl {
let kstack = KernelStack::new();
//set the user Memory pages in the memory set swappable
memory_set_map_swappable(&mut memory_set);
Box::new(ContextImpl {
Box::new(Process {
arch: unsafe {
ArchContext::new_user_thread(
entry_addr, ustack_top, kstack.top(), is32, memory_set.token())
@ -131,10 +126,7 @@ impl ContextImpl {
info!("temporary copy data!");
let kstack = KernelStack::new();
memory_set_map_swappable(&mut memory_set);
info!("FORK() finsihed!");
Box::new(ContextImpl {
Box::new(Process {
arch: unsafe { ArchContext::new_fork(tf, kstack.top(), memory_set.token()) },
memory_set,
kstack,
@ -144,33 +136,6 @@ impl ContextImpl {
}
}
#[cfg(not(feature = "no_mmu"))]
impl Drop for ContextImpl {
fn drop(&mut self){
info!("come in to drop for ContextImpl");
//set the user Memory pages in the memory set unswappable
let Self {ref mut arch, ref mut memory_set, ref mut kstack, ..} = self;
let pt = {
memory_set.get_page_table_mut() as *mut InactivePageTable0
};
for area in memory_set.iter(){
for page in Page::range_of(area.get_start_addr(), area.get_end_addr()) {
let addr = page.start_address();
unsafe {
active_table_swap().remove_from_swappable(pt, addr, || alloc_frame().expect("alloc frame failed"));
}
}
}
debug!("Finishing setting pages unswappable");
}
}
impl Debug for ContextImpl {
fn fmt(&self, f: &mut Formatter) -> Result<(), Error> {
write!(f, "{:x?}", self.arch)
}
}
/// Push a slice at the stack. Return the new sp.
unsafe fn push_slice<T: Copy>(mut sp: usize, vs: &[T]) -> usize {
use core::{mem::{size_of, align_of}, slice};
@ -200,7 +165,7 @@ unsafe fn push_args_at_stack<'a, Iter>(args: Iter, stack_top: usize) -> usize
/// Generate a MemorySet according to the ELF file.
/// Also return the real entry point address.
fn memory_set_from<'a>(elf: &'a ElfFile<'a>) -> (MemorySet, usize) {
fn memory_set_from(elf: &ElfFile<'_>) -> (MemorySet, usize) {
debug!("come in to memory_set_from");
let mut ms = MemorySet::new();
let mut entry = None;
@ -221,7 +186,7 @@ fn memory_set_from<'a>(elf: &'a ElfFile<'a>) -> (MemorySet, usize) {
let target = ms.push(mem_size);
#[cfg(not(feature = "no_mmu"))]
let target = {
ms.push(MemoryArea::new(virt_addr, virt_addr + mem_size, memory_attr_from(ph.flags()), ""));
ms.push(virt_addr, virt_addr + mem_size, ByFrame::new(memory_attr_from(ph.flags()), GlobalFrameAlloc), "");
unsafe { ::core::slice::from_raw_parts_mut(virt_addr as *mut u8, mem_size) }
};
// Copy data
@ -248,29 +213,3 @@ fn memory_attr_from(elf_flags: Flags) -> MemoryAttr {
if elf_flags.is_execute() { flags = flags.execute(); }
flags
}
/*
* @param:
* memory_set: the target MemorySet to set swappable
* @brief:
* map the memory area in the memory_set swappalbe, specially for the user process
*/
#[cfg(not(feature = "no_mmu"))]
pub fn memory_set_map_swappable(memory_set: &mut MemorySet) {
info!("COME INTO memory set map swappable!");
let pt = unsafe {
memory_set.get_page_table_mut() as *mut InactivePageTable0
};
for area in memory_set.iter(){
for page in Page::range_of(area.get_start_addr(), area.get_end_addr()) {
let addr = page.start_address();
unsafe { active_table_swap().set_swappable(pt, addr); }
}
}
info!("Finishing setting pages swappable");
}
#[cfg(feature = "no_mmu")]
pub fn memory_set_map_swappable(memory_set: &mut MemorySet) {
// NOTE: This function may disappear after refactor memory crate
}

@ -1,5 +1,5 @@
use spin::Mutex;
pub use self::context::ContextImpl;
pub use self::context::Process;
pub use ucore_process::*;
use crate::consts::{MAX_CPU_NUM, MAX_PROCESS_NUM};
use crate::arch::cpu;
@ -17,15 +17,18 @@ pub fn init() {
unsafe {
for cpu_id in 0..MAX_CPU_NUM {
PROCESSORS[cpu_id].init(cpu_id, ContextImpl::new_init(), manager.clone());
PROCESSORS[cpu_id].init(cpu_id, Process::new_init(), manager.clone());
}
}
// Add idle threads
extern fn idle(_arg: usize) -> ! {
loop { cpu::halt(); }
}
for i in 0..4 {
manager.add(ContextImpl::new_kernel(idle, i), 0);
use core::str::FromStr;
let cores = usize::from_str(env!("SMP")).unwrap();
for i in 0..cores {
manager.add(Process::new_kernel(idle, i), 0);
}
crate::shell::run_user_shell();
@ -35,9 +38,11 @@ pub fn init() {
static PROCESSORS: [Processor; MAX_CPU_NUM] = [Processor::new(), Processor::new(), Processor::new(), Processor::new(), Processor::new(), Processor::new(), Processor::new(), Processor::new()];
/// Get current thread struct
pub fn process() -> &'static mut ContextImpl {
///
/// FIXME: It's obviously unsafe to get &mut !
pub fn process() -> &'static mut Process {
use core::mem::transmute;
let (process, _): (&mut ContextImpl, *const ()) = unsafe {
let (process, _): (&mut Process, *const ()) = unsafe {
transmute(processor().context())
};
process
@ -53,5 +58,5 @@ pub fn processor() -> &'static Processor {
#[no_mangle]
pub fn new_kernel_context(entry: extern fn(usize) -> !, arg: usize) -> Box<Context> {
ContextImpl::new_kernel(entry, arg)
Process::new_kernel(entry, arg)
}

@ -7,10 +7,12 @@ use crate::process::*;
pub fn run_user_shell() {
if let Ok(inode) = ROOT_INODE.lookup("sh") {
println!("Going to user mode shell.");
println!("Use 'ls' to list available programs.");
let data = inode.read_as_vec().unwrap();
processor().manager().add(ContextImpl::new_user(data.as_slice(), "sh".split(' ')), 0);
processor().manager().add(Process::new_user(data.as_slice(), "sh".split(' ')), 0);
} else {
processor().manager().add(ContextImpl::new_kernel(shell, 0), 0);
processor().manager().add(Process::new_kernel(shell, 0), 0);
}
}
@ -27,7 +29,7 @@ pub extern fn shell(_arg: usize) -> ! {
let name = cmd.split(' ').next().unwrap();
if let Ok(file) = ROOT_INODE.lookup(name) {
let data = file.read_as_vec().unwrap();
let pid = processor().manager().add(ContextImpl::new_user(data.as_slice(), cmd.split(' ')), thread::current().id());
let pid = processor().manager().add(Process::new_user(data.as_slice(), cmd.split(' ')), thread::current().id());
unsafe { thread::JoinHandle::<()>::_of(pid) }.join().unwrap();
} else {
println!("Program not exist");

@ -208,7 +208,7 @@ fn sys_exec(name: *const u8, argc: usize, argv: *const *const u8, tf: &mut TrapF
// Make new Context
let iter = args.iter().map(|s| s.as_str());
let mut context = ContextImpl::new_user(buf.as_slice(), iter);
let mut context = Process::new_user(buf.as_slice(), iter);
// Activate new page table
unsafe { context.memory_set.activate(); }

Loading…
Cancel
Save