Merge branch 'master' into rust-rv64

# Conflicts:
#	crate/memory/src/memory_set.rs
#	kernel/src/arch/riscv32/compiler_rt.rs
#	kernel/src/arch/riscv32/memory.rs
#	kernel/src/arch/riscv32/paging.rs
#	kernel/src/memory.rs
#	kernel/src/process/mod.rs
toolchain_update
WangRunji 6 years ago
commit a852c96136

@ -8,10 +8,12 @@ Going to be the next generation teaching operating system.
Supported architectures: x86_64, RISCV32IMA(S/M), AArch64
Tested boards: QEMU, Raspberry Pi 3B+
Tested boards: QEMU, labeled-RISCV, Raspberry Pi 3B+
[Dev docs](https://rucore.gitbook.io/rust-os-docs/) (in Chinese)
![demo](./docs/2_OSLab/os2atc/demo.png)
## Summary
This is a project of THU courses:

@ -2,10 +2,11 @@
name = "bcm2837"
version = "0.1.0"
authors = ["equation314 <equation618@gmail.com>"]
edition = "2018"
[features]
use_generic_timer = []
use_generic_timer = ["aarch64"]
[dependencies]
volatile = "0.2.4"
cortex-a = "2.2.2"
aarch64= { git = "https://github.com/equation314/aarch64", optional = true }

@ -1,5 +1,5 @@
use IO_BASE;
use timer::delay;
use crate::IO_BASE;
use crate::timer::delay;
use core::marker::PhantomData;
use volatile::{ReadOnly, Volatile, WriteOnly};

@ -1,9 +1,10 @@
use IO_BASE;
use crate::IO_BASE;
use volatile::{ReadOnly, Volatile};
const INT_BASE: usize = IO_BASE + 0xB000 + 0x200;
/// Allowed interrupts (ref: peripherals 7.5, page 113)
#[repr(u8)]
#[derive(Copy, Clone, PartialEq, Debug)]
pub enum Interrupt {
Timer1 = 1,
@ -30,6 +31,24 @@ struct Registers {
DisableBasicIRQ: Volatile<u32>,
}
/// Pending interrupts
pub struct PendingInterrupts(u64);
impl Iterator for PendingInterrupts {
type Item = usize;
#[inline]
fn next(&mut self) -> Option<Self::Item> {
let int = self.0.trailing_zeros();
if int < 64 {
self.0 &= !(1 << int);
Some(int as usize)
} else {
None
}
}
}
/// An interrupt controller. Used to enable and disable interrupts as well as to
/// check if an interrupt is pending.
pub struct Controller {
@ -38,6 +57,7 @@ pub struct Controller {
impl Controller {
/// Returns a new handle to the interrupt controller.
#[inline]
pub fn new() -> Controller {
Controller {
registers: unsafe { &mut *(INT_BASE as *mut Registers) },
@ -58,4 +78,11 @@ impl Controller {
pub fn is_pending(&self, int: Interrupt) -> bool {
self.registers.IRQPending[int as usize / 32].read() & (1 << (int as usize) % 32) != 0
}
/// Return all pending interrupts.
pub fn pending_interrupts(&self) -> PendingInterrupts {
let irq1 = self.registers.IRQPending[0].read() as u64;
let irq2 = self.registers.IRQPending[1].read() as u64;
PendingInterrupts((irq2 << 32) | irq1)
}
}

@ -1,5 +1,5 @@
use IO_BASE;
use gpio::{Function, Gpio};
use crate::IO_BASE;
use crate::gpio::{Function, Gpio};
use volatile::{ReadOnly, Volatile};
/// The base address for the `MU` registers.
@ -8,6 +8,13 @@ const MU_REG_BASE: usize = IO_BASE + 0x215040;
/// The `AUXENB` register from page 9 of the BCM2837 documentation.
const AUX_ENABLES: *mut Volatile<u8> = (IO_BASE + 0x215004) as *mut Volatile<u8>;
/// Enum representing bit fields of the `AUX_MU_IIR_REG` register.
#[repr(u8)]
pub enum MiniUartInterruptId {
Transmit = 0b010,
Recive = 0b100,
}
/// Enum representing bit fields of the `AUX_MU_LSR_REG` register.
#[repr(u8)]
enum LsrStatus {
@ -15,7 +22,7 @@ enum LsrStatus {
TxAvailable = 1 << 5,
}
/// MU registers starting from `AUX_ENABLES` (ref: peripherals 2.1, page 8)
/// MU registers starting from `MU_REG_BASE` (ref: peripherals 2.1, page 8)
#[repr(C)]
#[allow(non_snake_case)]
struct Registers {
@ -62,23 +69,25 @@ impl MiniUart {
&mut *(MU_REG_BASE as *mut Registers)
};
Gpio::new(14).into_alt(Function::Alt5).set_gpio_pd(0);
Gpio::new(15).into_alt(Function::Alt5).set_gpio_pd(0);
registers.AUX_MU_CNTL_REG.write(0); // Disable auto flow control and disable receiver and transmitter (for now)
registers.AUX_MU_IER_REG.write(0); // Disable receive and transmit interrupts
registers.AUX_MU_LCR_REG.write(3); // Enable 8 bit mode
registers.AUX_MU_MCR_REG.write(0); // Set RTS line to be always high
registers.AUX_MU_BAUD_REG.write(270); // Set baud rate to 115200
registers.AUX_MU_CNTL_REG.write(3); // Finally, enable transmitter and receiver
MiniUart {
registers: registers,
timeout: None,
}
}
pub fn init(&mut self) {
Gpio::new(14).into_alt(Function::Alt5).set_gpio_pd(0);
Gpio::new(15).into_alt(Function::Alt5).set_gpio_pd(0);
self.registers.AUX_MU_CNTL_REG.write(0); // Disable auto flow control and disable receiver and transmitter (for now)
self.registers.AUX_MU_IER_REG.write(1); // Enable receive interrupts and disable transmit interrupts
self.registers.AUX_MU_LCR_REG.write(3); // Enable 8 bit mode
self.registers.AUX_MU_MCR_REG.write(0); // Set RTS line to be always high
self.registers.AUX_MU_BAUD_REG.write(270); // Set baud rate to 115200
self.registers.AUX_MU_CNTL_REG.write(3); // Finally, enable transmitter and receiver
}
/// Set the read timeout to `milliseconds` milliseconds.
pub fn set_read_timeout(&mut self, milliseconds: u32) {
self.timeout = Some(milliseconds)
@ -111,8 +120,13 @@ impl MiniUart {
}
/// Reads a byte. Blocks indefinitely until a byte is ready to be read.
pub fn read_byte(&mut self) -> u8 {
pub fn read_byte(&self) -> u8 {
while !self.has_byte() {}
self.registers.AUX_MU_IO_REG.read()
}
// Read `AUX_MU_IIR_REG` and determine if the interrupt `id` is pending.
pub fn interrupt_is_pending(&self, id: MiniUartInterruptId) -> bool {
self.registers.AUX_MU_IIR_REG.read() & 0b110 == id as u8
}
}

@ -1,6 +1,6 @@
extern crate cortex_a;
extern crate aarch64;
use self::cortex_a::regs::*;
use aarch64::regs::*;
use volatile::*;
/// The base address for the ARM generic timer, IRQs, mailboxes
@ -58,7 +58,7 @@ impl Timer {
/// Reads the generic timer's counter and returns the 64-bit counter value.
/// The returned value is the number of elapsed microseconds.
pub fn read(&self) -> u64 {
let cntfrq = CNTFRQ_EL0.get();
let cntfrq = CNTFRQ_EL0.get(); // 62500000
(CNTPCT_EL0.get() * 1000000 / (cntfrq as u64)) as u64
}
@ -66,7 +66,7 @@ impl Timer {
/// interrupts for timer 1 are enabled and IRQs are unmasked, then a timer
/// interrupt will be issued in `us` microseconds.
pub fn tick_in(&mut self, us: u32) {
let cntfrq = CNTFRQ_EL0.get();
let cntfrq = CNTFRQ_EL0.get(); // 62500000
CNTP_TVAL_EL0.set(((cntfrq as f64) * (us as f64) / 1000000.0) as u32);
}

@ -1,6 +1,6 @@
use ::IO_BASE;
use crate::IO_BASE;
use crate::interrupt::{Controller, Interrupt};
use volatile::{ReadOnly, Volatile};
use interrupt::{Controller, Interrupt};
/// The base address for the ARM system timer registers.
const TIMER_REG_BASE: usize = IO_BASE + 0x3000;

@ -1,7 +1,6 @@
#![cfg_attr(not(test), no_std)]
#![feature(alloc)]
#![feature(nll)]
#![feature(extern_crate_item_prelude)]
// import macros from log
use log::*;

@ -1,418 +0,0 @@
//! memory set, area
//! and the inactive page table
use alloc::vec::Vec;
use core::fmt::{Debug, Error, Formatter};
use super::*;
use crate::paging::*;
/// an inactive page table
/// Note: InactivePageTable is not a PageTable
/// but it can be activated and "become" a PageTable
/// Why this trait is in this file?(seems should in paging/mod.rs)
pub trait InactivePageTable {
/// the active version of page table
type Active: PageTable;
/*
** @brief create a inactive page table with kernel memory mapped
** @retval InactivePageTable the created inactive page table
*/
fn new() -> Self;
/*
** @brief create an inactive page table without kernel memory mapped
** @retval InactivePageTable the created inactive page table
*/
fn new_bare() -> Self;
/*
** @brief temporarily active the page table and edit it
** @retval impl FnOnce(&mut Self::Active)
** the function of the editing action,
** which takes a temporarily activated page table as param
** @retval none
*/
fn edit(&mut self, f: impl FnOnce(&mut Self::Active));
/*
** @brief activate the inactive page table
** @retval none
*/
unsafe fn activate(&self);
/*
** @brief execute function with this inactive page table
** @param f: impl FnOnce() the function to be executed
** @retval none
*/
unsafe fn with<T>(&self, f: impl FnOnce() -> T) -> T;
/*
** @brief get the token of the inactive page table
** @retval usize the token of the inactive page table
*/
fn token(&self) -> usize;
/// Why the methods below are in this trait?
/*
** @brief allocate a frame for use
** @retval Option<PhysAddr> the physics address of the beginning of allocated frame, if present
*/
fn alloc_frame() -> Option<PhysAddr>;
/*
** @brief deallocate a frame for use
** @param PhysAddr the physics address of the beginning of frame to be deallocated
** @retval none
*/
fn dealloc_frame(target: PhysAddr);
}
/// a continuous memory space when the same attribute
/// like `vma_struct` in ucore
#[derive(Debug, Eq, PartialEq, Copy, Clone)]
pub struct MemoryArea {
start_addr: VirtAddr,
end_addr: VirtAddr,
phys_start_addr: Option<PhysAddr>,
flags: MemoryAttr,
name: &'static str,
}
impl MemoryArea {
/*
** @brief create a memory area from virtual address
** @param start_addr: VirtAddr the virtual address of beginning of the area
** @param end_addr: VirtAddr the virtual address of end of the area
** @param flags: MemoryAttr the common memory attribute of the memory area
** @param name: &'static str the name of the memory area
** @retval MemoryArea the memory area created
*/
pub fn new(start_addr: VirtAddr, end_addr: VirtAddr, flags: MemoryAttr, name: &'static str) -> Self {
assert!(start_addr <= end_addr, "invalid memory area");
MemoryArea { start_addr, end_addr, phys_start_addr: None, flags, name }
}
/*
** @brief create a memory area from virtual address which is identically mapped
** @param start_addr: VirtAddr the virtual address of beginning of the area
** @param end_addr: VirtAddr the virtual address of end of the area
** @param flags: MemoryAttr the common memory attribute of the memory area
** @param name: &'static str the name of the memory area
** @retval MemoryArea the memory area created
*/
// TODO: VirtAddr and PhysAddr should not be the same `usize`, it's not type safe.
#[cfg(target_arch = "riscv32")]
pub fn new_identity(start_addr: VirtAddr, end_addr: VirtAddr, flags: MemoryAttr, name: &'static str) -> Self {
assert!(start_addr <= end_addr, "invalid memory area");
MemoryArea { start_addr, end_addr, phys_start_addr: Some(start_addr), flags, name }
}
#[cfg(target_arch = "riscv64")]
pub fn new_identity(start_addr: VirtAddr, end_addr: VirtAddr, flags: MemoryAttr, name: &'static str) -> Self {
assert!(start_addr <= end_addr, "invalid memory area");
let paddr = start_addr - 0xFFFF_FFFF_0000_0000;
MemoryArea { start_addr, end_addr, phys_start_addr: Some(paddr), flags, name }
}
/*
** @brief create a memory area from physics address
** @param start_addr: PhysAddr the physics address of beginning of the area
** @param end_addr: PhysAddr the physics address of end of the area
** @param offset: usiz the offset between physics address and virtual address
** @param flags: MemoryAttr the common memory attribute of the memory area
** @param name: &'static str the name of the memory area
** @retval MemoryArea the memory area created
*/
pub fn new_physical(phys_start_addr: PhysAddr, phys_end_addr: PhysAddr, offset: usize, flags: MemoryAttr, name: &'static str) -> Self {
let start_addr = phys_start_addr + offset;
let end_addr = phys_end_addr + offset;
assert!(start_addr <= end_addr, "invalid memory area");
let phys_start_addr = Some(phys_start_addr);
MemoryArea { start_addr, end_addr, phys_start_addr, flags, name }
}
/*
** @brief get slice of the content in the memory area
** @retval &[u8] the slice of the content in the memory area
*/
pub unsafe fn as_slice(&self) -> &[u8] {
use core::slice;
slice::from_raw_parts(self.start_addr as *const u8, self.end_addr - self.start_addr)
}
/*
** @brief get mutable slice of the content in the memory area
** @retval &mut[u8] the mutable slice of the content in the memory area
*/
pub unsafe fn as_slice_mut(&self) -> &mut [u8] {
use core::slice;
slice::from_raw_parts_mut(self.start_addr as *mut u8, self.end_addr - self.start_addr)
}
/*
** @brief test whether a virtual address is in the memory area
** @param addr: VirtAddr the virtual address to test
** @retval bool whether the virtual address is in the memory area
*/
pub fn contains(&self, addr: VirtAddr) -> bool {
addr >= self.start_addr && addr < self.end_addr
}
/*
** @brief test whether the memory area is overlap with another memory area
** @param other: &MemoryArea another memory area to test
** @retval bool whether the memory area is overlap with another memory area
*/
fn is_overlap_with(&self, other: &MemoryArea) -> bool {
let p0 = Page::of_addr(self.start_addr);
let p1 = Page::of_addr(self.end_addr - 1) + 1;
let p2 = Page::of_addr(other.start_addr);
let p3 = Page::of_addr(other.end_addr - 1) + 1;
!(p1 <= p2 || p0 >= p3)
}
/*
** @brief map the memory area to the physice address in a page table
** @param pt: &mut T::Active the page table to use
** @retval none
*/
fn map<T: InactivePageTable>(&self, pt: &mut T::Active) {
match self.phys_start_addr {
Some(phys_start) => {
for page in Page::range_of(self.start_addr, self.end_addr) {
let addr = page.start_address();
let target = page.start_address() - self.start_addr + phys_start;
self.flags.apply(pt.map(addr, target));
}
}
None => {
for page in Page::range_of(self.start_addr, self.end_addr) {
let addr = page.start_address();
let target = T::alloc_frame().expect("failed to allocate frame");
self.flags.apply(pt.map(addr, target));
// for frame delayed allocation
// let entry = pt.map(addr,0);
// self.flags.apply(entry);
// let entry = pt.get_entry(addr).expect("fail to get entry");
// entry.set_present(false);
// entry.update();
}
}
};
}
/*
** @brief unmap the memory area from the physice address in a page table
** @param pt: &mut T::Active the page table to use
** @retval none
*/
fn unmap<T: InactivePageTable>(&self, pt: &mut T::Active) {
for page in Page::range_of(self.start_addr, self.end_addr) {
let addr = page.start_address();
if self.phys_start_addr.is_none() {
if pt.get_entry(addr).expect("fail to get entry").present(){
let target = pt.get_entry(addr).expect("fail to get entry").target();
T::dealloc_frame(target);
}
else{
// set valid for pt.unmap function
pt.get_entry(addr).expect("fail to get entry").set_present(true);
}
}
pt.unmap(addr);
}
}
pub fn get_start_addr(&self) -> VirtAddr {
self.start_addr
}
pub fn get_end_addr(&self) -> VirtAddr{
self.end_addr
}
pub fn get_flags(&self) -> &MemoryAttr{
&self.flags
}
}
/// The attributes of the memory
#[derive(Debug, Copy, Clone, Eq, PartialEq, Default)]
pub struct MemoryAttr {
user: bool,
readonly: bool,
execute: bool,
mmio: bool,
hide: bool,
}
impl MemoryAttr {
/*
** @brief set the memory attribute's user bit
** @retval MemoryAttr the memory attribute itself
*/
pub fn user(mut self) -> Self {
self.user = true;
self
}
/*
** @brief set the memory attribute's readonly bit
** @retval MemoryAttr the memory attribute itself
*/
pub fn readonly(mut self) -> Self {
self.readonly = true;
self
}
/*
** @brief set the memory attribute's execute bit
** @retval MemoryAttr the memory attribute itself
*/
pub fn execute(mut self) -> Self {
self.execute = true;
self
}
pub fn mmio(mut self) -> Self {
self.mmio = true;
self
}
/*
** @brief set the memory attribute's hide bit
** @retval MemoryAttr the memory attribute itself
*/
pub fn hide(mut self) -> Self {
self.hide = true;
self
}
/*
** @brief apply the memory attribute to a page table entry
** @param entry: &mut impl Entry
** the page table entry to apply the attribute
** @retval none
*/
fn apply(&self, entry: &mut impl Entry) {
if self.user { entry.set_user(true); }
if self.readonly { entry.set_writable(false); }
if self.execute { entry.set_execute(true); }
if self.mmio { entry.set_mmio(true); }
if self.hide { entry.set_present(false); }
if self.user || self.readonly || self.execute || self.mmio || self.hide { entry.update(); }
}
}
/// set of memory space with multiple memory area with associated page table and stack space
/// like `mm_struct` in ucore
pub struct MemorySet<T: InactivePageTable> {
areas: Vec<MemoryArea>,
page_table: T,
}
impl<T: InactivePageTable> MemorySet<T> {
/*
** @brief create a memory set
** @retval MemorySet<T> the memory set created
*/
pub fn new() -> Self {
MemorySet {
areas: Vec::<MemoryArea>::new(),
page_table: T::new(),
}
}
pub fn new_bare() -> Self {
MemorySet {
areas: Vec::<MemoryArea>::new(),
page_table: T::new_bare()
}
}
/*
** @brief find the memory area from virtual address
** @param addr: VirtAddr the virtual address to find
** @retval Option<&MemoryArea> the memory area with the virtual address, if presented
*/
pub fn find_area(&self, addr: VirtAddr) -> Option<&MemoryArea> {
self.areas.iter().find(|area| area.contains(addr))
}
/*
** @brief add the memory area to the memory set
** @param area: MemoryArea the memory area to add
** @retval none
*/
pub fn push(&mut self, area: MemoryArea) {
assert!(self.areas.iter()
.find(|other| area.is_overlap_with(other))
.is_none(), "memory area overlap");
self.page_table.edit(|pt| area.map::<T>(pt));
self.areas.push(area);
}
/*
** @brief get iterator of the memory area
** @retval impl Iterator<Item=&MemoryArea>
** the memory area iterator
*/
pub fn iter(&self) -> impl Iterator<Item=&MemoryArea> {
self.areas.iter()
}
pub fn edit(&mut self, f: impl FnOnce(&mut T::Active)) {
self.page_table.edit(f);
}
/*
** @brief execute function with the associated page table
** @param f: impl FnOnce() the function to be executed
** @retval none
*/
pub unsafe fn with(&self, f: impl FnOnce()) {
self.page_table.with(f);
}
/*
** @brief activate the associated page table
** @retval none
*/
pub unsafe fn activate(&self) {
self.page_table.activate();
}
/*
** @brief get the token of the associated page table
** @retval usize the token of the inactive page table
*/
pub fn token(&self) -> usize {
self.page_table.token()
}
/*
** @brief clear the memory set
** @retval none
*/
pub fn clear(&mut self) {
let Self { ref mut page_table, ref mut areas, .. } = self;
page_table.edit(|pt| {
for area in areas.iter() {
area.unmap::<T>(pt);
}
});
areas.clear();
}
/*
** @brief get the mutable reference for the inactive page table
** @retval: &mut T the mutable reference of the inactive page table
*/
pub fn get_page_table_mut(&mut self) -> &mut T{
&mut self.page_table
}
}
impl<T: InactivePageTable> Clone for MemorySet<T> {
fn clone(&self) -> Self {
let mut page_table = T::new();
page_table.edit(|pt| {
for area in self.areas.iter() {
area.map::<T>(pt);
}
});
MemorySet {
areas: self.areas.clone(),
page_table,
}
}
}
impl<T: InactivePageTable> Drop for MemorySet<T> {
fn drop(&mut self) {
self.clear();
}
}
impl<T: InactivePageTable> Debug for MemorySet<T> {
fn fmt(&self, f: &mut Formatter) -> Result<(), Error> {
f.debug_list()
.entries(self.areas.iter())
.finish()
}
}

@ -0,0 +1,34 @@
use super::*;
#[derive(Debug, Clone)]
pub struct ByFrame<T: FrameAllocator> {
flags: MemoryAttr,
allocator: T,
}
impl<T: FrameAllocator> MemoryHandler for ByFrame<T> {
fn box_clone(&self) -> Box<MemoryHandler> {
Box::new(self.clone())
}
fn map(&self, pt: &mut PageTable, addr: VirtAddr) {
let target = self.allocator.alloc().expect("failed to allocate frame");
self.flags.apply(pt.map(addr, target));
}
fn unmap(&self, pt: &mut PageTable, addr: VirtAddr) {
let target = pt.get_entry(addr).expect("fail to get entry").target();
self.allocator.dealloc(target);
pt.unmap(addr);
}
fn page_fault_handler(&self, _pt: &mut PageTable, _addr: VirtAddr) -> bool {
false
}
}
impl<T: FrameAllocator> ByFrame<T> {
pub fn new(flags: MemoryAttr, allocator: T) -> Self {
ByFrame { flags, allocator }
}
}

@ -0,0 +1,45 @@
use super::*;
#[derive(Debug, Clone)]
pub struct Delay<T: FrameAllocator> {
flags: MemoryAttr,
allocator: T,
}
impl<T: FrameAllocator> MemoryHandler for Delay<T> {
fn box_clone(&self) -> Box<MemoryHandler> {
Box::new(self.clone())
}
fn map(&self, pt: &mut PageTable, addr: VirtAddr) {
let entry = pt.map(addr, 0);
entry.set_present(false);
entry.update();
}
fn unmap(&self, pt: &mut PageTable, addr: VirtAddr) {
let entry = pt.get_entry(addr).expect("failed to get entry");
if entry.present() {
self.allocator.dealloc(entry.target());
pt.unmap(addr);
}
}
fn page_fault_handler(&self, pt: &mut PageTable, addr: VirtAddr) -> bool {
let entry = pt.get_entry(addr).expect("failed to get entry");
if entry.present() {
// not a delay case
return false;
}
let frame = self.allocator.alloc().expect("failed to alloc frame");
entry.set_target(frame);
self.flags.apply(entry);
true
}
}
impl<T: FrameAllocator> Delay<T> {
pub fn new(flags: MemoryAttr, allocator: T) -> Self {
Delay { flags, allocator }
}
}

@ -0,0 +1,32 @@
use super::*;
#[derive(Debug, Eq, PartialEq, Clone)]
pub struct Linear {
offset: isize,
flags: MemoryAttr,
}
impl MemoryHandler for Linear {
fn box_clone(&self) -> Box<MemoryHandler> {
Box::new(self.clone())
}
fn map(&self, pt: &mut PageTable, addr: VirtAddr) {
let target = (addr as isize + self.offset) as PhysAddr;
self.flags.apply(pt.map(addr, target));
}
fn unmap(&self, pt: &mut PageTable, addr: VirtAddr) {
pt.unmap(addr);
}
fn page_fault_handler(&self, _pt: &mut PageTable, _addr: VirtAddr) -> bool {
false
}
}
impl Linear {
pub fn new(offset: isize, flags: MemoryAttr) -> Self {
Linear { offset, flags }
}
}

@ -0,0 +1,29 @@
use super::*;
// here may be a interesting part for lab
pub trait MemoryHandler: Debug + 'static {
fn box_clone(&self) -> Box<MemoryHandler>;
fn map(&self, pt: &mut PageTable, addr: VirtAddr);
fn unmap(&self, pt: &mut PageTable, addr: VirtAddr);
fn page_fault_handler(&self, pt: &mut PageTable, addr: VirtAddr) -> bool;
}
impl Clone for Box<MemoryHandler> {
fn clone(&self) -> Box<MemoryHandler> {
self.box_clone()
}
}
pub trait FrameAllocator: Debug + Clone + 'static {
fn alloc(&self) -> Option<PhysAddr>;
fn dealloc(&self, target: PhysAddr);
}
mod linear;
mod byframe;
mod delay;
//mod swap;
pub use self::linear::Linear;
pub use self::byframe::ByFrame;
pub use self::delay::Delay;

@ -0,0 +1,270 @@
//! memory set, area
//! and the inactive page table
use alloc::{vec::Vec, boxed::Box};
use core::fmt::{Debug, Error, Formatter};
use super::*;
use crate::paging::*;
use self::handler::MemoryHandler;
pub mod handler;
/// a continuous memory space when the same attribute
/// like `vma_struct` in ucore
#[derive(Debug, Clone)]
pub struct MemoryArea {
start_addr: VirtAddr,
end_addr: VirtAddr,
handler: Box<MemoryHandler>,
name: &'static str,
}
impl MemoryArea {
/*
** @brief get slice of the content in the memory area
** @retval &[u8] the slice of the content in the memory area
*/
pub unsafe fn as_slice(&self) -> &[u8] {
::core::slice::from_raw_parts(self.start_addr as *const u8, self.end_addr - self.start_addr)
}
/*
** @brief get mutable slice of the content in the memory area
** @retval &mut[u8] the mutable slice of the content in the memory area
*/
pub unsafe fn as_slice_mut(&self) -> &mut [u8] {
::core::slice::from_raw_parts_mut(self.start_addr as *mut u8, self.end_addr - self.start_addr)
}
/*
** @brief test whether a virtual address is in the memory area
** @param addr: VirtAddr the virtual address to test
** @retval bool whether the virtual address is in the memory area
*/
pub fn contains(&self, addr: VirtAddr) -> bool {
addr >= self.start_addr && addr < self.end_addr
}
/*
** @brief test whether the memory area is overlap with another memory area
** @param other: &MemoryArea another memory area to test
** @retval bool whether the memory area is overlap with another memory area
*/
fn is_overlap_with(&self, other: &MemoryArea) -> bool {
let p0 = Page::of_addr(self.start_addr);
let p1 = Page::of_addr(self.end_addr - 1) + 1;
let p2 = Page::of_addr(other.start_addr);
let p3 = Page::of_addr(other.end_addr - 1) + 1;
!(p1 <= p2 || p0 >= p3)
}
/*
** @brief map the memory area to the physice address in a page table
** @param pt: &mut T::Active the page table to use
** @retval none
*/
fn map(&self, pt: &mut PageTable) {
for page in Page::range_of(self.start_addr, self.end_addr) {
self.handler.map(pt, page.start_address());
}
}
/*
** @brief unmap the memory area from the physice address in a page table
** @param pt: &mut T::Active the page table to use
** @retval none
*/
fn unmap(&self, pt: &mut PageTable) {
for page in Page::range_of(self.start_addr, self.end_addr) {
self.handler.unmap(pt, page.start_address());
}
}
}
/// The attributes of the memory
#[derive(Debug, Copy, Clone, Eq, PartialEq, Default)]
pub struct MemoryAttr {
user: bool,
readonly: bool,
execute: bool,
mmio: bool,
}
impl MemoryAttr {
/*
** @brief set the memory attribute's user bit
** @retval MemoryAttr the memory attribute itself
*/
pub fn user(mut self) -> Self {
self.user = true;
self
}
/*
** @brief set the memory attribute's readonly bit
** @retval MemoryAttr the memory attribute itself
*/
pub fn readonly(mut self) -> Self {
self.readonly = true;
self
}
/*
** @brief set the memory attribute's execute bit
** @retval MemoryAttr the memory attribute itself
*/
pub fn execute(mut self) -> Self {
self.execute = true;
self
}
pub fn mmio(mut self) -> Self {
self.mmio = true;
self
}
/*
** @brief apply the memory attribute to a page table entry
** @param entry: &mut impl Entry
** the page table entry to apply the attribute
** @retval none
*/
pub fn apply(&self, entry: &mut Entry) {
entry.set_present(true);
entry.set_user(self.user);
entry.set_writable(!self.readonly);
entry.set_execute(self.execute);
entry.set_mmio(self.mmio);
entry.update();
}
}
/// set of memory space with multiple memory area with associated page table and stack space
/// like `mm_struct` in ucore
pub struct MemorySet<T: InactivePageTable> {
areas: Vec<MemoryArea>,
page_table: T,
}
impl<T: InactivePageTable> MemorySet<T> {
/*
** @brief create a memory set
** @retval MemorySet<T> the memory set created
*/
pub fn new() -> Self {
MemorySet {
areas: Vec::new(),
page_table: T::new(),
}
}
pub fn new_bare() -> Self {
MemorySet {
areas: Vec::new(),
page_table: T::new_bare(),
}
}
/*
** @brief find the memory area from virtual address
** @param addr: VirtAddr the virtual address to find
** @retval Option<&MemoryArea> the memory area with the virtual address, if presented
*/
pub fn find_area(&self, addr: VirtAddr) -> Option<&MemoryArea> {
self.areas.iter().find(|area| area.contains(addr))
}
/*
** @brief add the memory area to the memory set
** @param area: MemoryArea the memory area to add
** @retval none
*/
pub fn push(&mut self, start_addr: VirtAddr, end_addr: VirtAddr, handler: impl MemoryHandler, name: &'static str) {
assert!(start_addr <= end_addr, "invalid memory area");
let area = MemoryArea { start_addr, end_addr, handler: Box::new(handler), name };
assert!(self.areas.iter()
.find(|other| area.is_overlap_with(other))
.is_none(), "memory area overlap");
self.page_table.edit(|pt| area.map(pt));
self.areas.push(area);
}
/*
** @brief get iterator of the memory area
** @retval impl Iterator<Item=&MemoryArea>
** the memory area iterator
*/
pub fn iter(&self) -> impl Iterator<Item=&MemoryArea> {
self.areas.iter()
}
pub fn edit(&mut self, f: impl FnOnce(&mut T::Active)) {
self.page_table.edit(f);
}
/*
** @brief execute function with the associated page table
** @param f: impl FnOnce() the function to be executed
** @retval none
*/
pub unsafe fn with(&self, f: impl FnOnce()) {
self.page_table.with(f);
}
/*
** @brief activate the associated page table
** @retval none
*/
pub unsafe fn activate(&self) {
self.page_table.activate();
}
/*
** @brief get the token of the associated page table
** @retval usize the token of the inactive page table
*/
pub fn token(&self) -> usize {
self.page_table.token()
}
/*
** @brief clear the memory set
** @retval none
*/
pub fn clear(&mut self) {
let Self { ref mut page_table, ref mut areas, .. } = self;
page_table.edit(|pt| {
for area in areas.iter() {
area.unmap(pt);
}
});
areas.clear();
}
/*
** @brief get the mutable reference for the inactive page table
** @retval: &mut T the mutable reference of the inactive page table
*/
pub fn get_page_table_mut(&mut self) -> &mut T{
&mut self.page_table
}
pub fn page_fault_handler(&mut self, addr: VirtAddr) -> bool {
let area = self.areas.iter().find(|area| area.contains(addr));
match area {
Some(area) => self.page_table.edit(|pt| area.handler.page_fault_handler(pt, addr)),
None => false,
}
}
}
impl<T: InactivePageTable> Clone for MemorySet<T> {
fn clone(&self) -> Self {
let mut page_table = T::new();
page_table.edit(|pt| {
for area in self.areas.iter() {
area.map(pt);
}
});
MemorySet {
areas: self.areas.clone(),
page_table,
}
}
}
impl<T: InactivePageTable> Drop for MemorySet<T> {
fn drop(&mut self) {
self.clear();
}
}
impl<T: InactivePageTable> Debug for MemorySet<T> {
fn fmt(&self, f: &mut Formatter) -> Result<(), Error> {
f.debug_list()
.entries(self.areas.iter())
.finish()
}
}

@ -0,0 +1,15 @@
//! Helper functions
use super::*;
pub trait PageTableExt: PageTable {
const TEMP_PAGE_ADDR: VirtAddr = 0xcafeb000;
fn with_temporary_map<T, D>(&mut self, target: PhysAddr, f: impl FnOnce(&mut Self, &mut D) -> T) -> T {
self.map(Self::TEMP_PAGE_ADDR, target);
let data = unsafe { &mut *(self.get_page_slice_mut(Self::TEMP_PAGE_ADDR).as_ptr() as *mut D) };
let ret = f(self, data);
self.unmap(Self::TEMP_PAGE_ADDR);
ret
}
}

@ -57,14 +57,16 @@ impl Entry for MockEntry {
fn set_user(&mut self, value: bool) { unimplemented!() }
fn execute(&self) -> bool { unimplemented!() }
fn set_execute(&mut self, value: bool) { unimplemented!() }
fn mmio(&self) -> bool { unimplemented!() }
fn set_mmio(&mut self, value: bool) { unimplemented!() }
}
type PageFaultHandler = Box<FnMut(&mut MockPageTable, VirtAddr)>;
impl PageTable for MockPageTable {
type Entry = MockEntry;
// type Entry = MockEntry;
fn map(&mut self, addr: VirtAddr, target: PhysAddr) -> &mut Self::Entry {
fn map(&mut self, addr: VirtAddr, target: PhysAddr) -> &mut Entry {
let entry = &mut self.entries[addr / PAGE_SIZE];
assert!(!entry.present);
entry.present = true;
@ -77,7 +79,7 @@ impl PageTable for MockPageTable {
assert!(entry.present);
entry.present = false;
}
fn get_entry(&mut self, addr: VirtAddr) -> Option<&mut Self::Entry> {
fn get_entry(&mut self, addr: VirtAddr) -> Option<&mut Entry> {
Some(&mut self.entries[addr / PAGE_SIZE])
}
fn get_page_slice_mut<'a,'b>(&'a mut self, addr: VirtAddr) -> &'b mut [u8] {

@ -3,198 +3,149 @@
//! Implemented for every architecture, used by OS.
use super::*;
use super::memory_set::InactivePageTable;
use log::*;
#[cfg(test)]
pub use self::mock_page_table::MockPageTable;
pub use self::ext::*;
#[cfg(test)]
mod mock_page_table;
mod ext;
// trait for PageTable
pub trait PageTable {
type Entry: Entry;
/*
** @brief map a virual address to the target physics address
** @param addr: VirtAddr the virual address to map
** @param target: VirtAddr the target physics address
** @retval Entry the page table entry of the mapped virual address
*/
fn map(&mut self, addr: VirtAddr, target: PhysAddr) -> &mut Self::Entry;
/*
** @brief unmap a virual address from physics address
** @param addr: VirtAddr the virual address to unmap
** @retval none
*/
// type Entry: Entry;
/// Map a page of virual address `addr` to the frame of physics address `target`
/// Return the page table entry of the mapped virual address
fn map(&mut self, addr: VirtAddr, target: PhysAddr) -> &mut Entry;
/// Unmap a page of virual address `addr`
fn unmap(&mut self, addr: VirtAddr);
/*
** @brief get the page table entry of a virual address
** @param addr: VirtAddr the virual address
** @retval Entry the page table entry of the virual address
*/
fn get_entry(&mut self, addr: VirtAddr) -> Option<&mut Self::Entry>;
// For testing with mock
/*
** @brief used for testing with mock
** get a mutable reference of the content of a page from a virtual address
** @param addr: VirtAddr the virual address of the page
** @retval &'b mut [u8] mutable reference of the content of a page as array of bytes
*/
fn get_page_slice_mut<'a,'b>(&'a mut self, addr: VirtAddr) -> &'b mut [u8];
/*
** @brief used for testing with mock
** read data from a virtual address
** @param addr: VirtAddr the virual address of data to read
** @retval u8 the data read
*/
fn read(&mut self, addr: VirtAddr) -> u8;
/*
** @brief used for testing with mock
** write data to a virtual address
** @param addr: VirtAddr the virual address of data to write
** @param data: u8 the data to write
** @retval none
*/
fn write(&mut self, addr: VirtAddr, data: u8);
/// Get the page table entry of a page of virual address `addr`
/// If its page do not exist, return `None`
fn get_entry(&mut self, addr: VirtAddr) -> Option<&mut Entry>;
/// Get a mutable reference of the content of a page of virtual address `addr`
/// Used for testing with mock
fn get_page_slice_mut<'a>(&mut self, addr: VirtAddr) -> &'a mut [u8] {
unsafe { core::slice::from_raw_parts_mut((addr & !(PAGE_SIZE - 1)) as *mut u8, PAGE_SIZE) }
}
/// Read data from virtual address `addr`
/// Used for testing with mock
fn read(&mut self, addr: VirtAddr) -> u8 {
unsafe { (addr as *const u8).read() }
}
/// Write data to virtual address `addr`
/// Used for testing with mock
fn write(&mut self, addr: VirtAddr, data: u8) {
unsafe { (addr as *mut u8).write(data) }
}
}
// trait for Entry in PageTable
/// Page Table Entry
pub trait Entry {
/*
** @brief force update this page table entry
** IMPORTANT!
** This must be called after any change to ensure it become effective.
** Usually this will make a flush to TLB/MMU.
** @retval none
*/
/// Make all changes take effect.
///
/// IMPORTANT!
/// This must be called after any change to ensure it become effective.
/// Usually it will cause a TLB/MMU flush.
fn update(&mut self);
/*
** @brief get the accessed bit of the entry
** Will be set when accessed
** @retval bool the accessed bit
*/
/// A bit set by hardware when the page is accessed
fn accessed(&self) -> bool;
/*
** @brief get the dirty bit of the entry
** Will be set when written
** @retval bool the dirty bit
*/
/// A bit set by hardware when the page is written
fn dirty(&self) -> bool;
/*
** @brief get the writable bit of the entry
** Will PageFault when try to write page where writable=0
** @retval bool the writable bit
*/
/// Will PageFault when try to write page where writable=0
fn writable(&self) -> bool;
/*
** @brief get the present bit of the entry
** Will PageFault when try to access page where present=0
** @retval bool the present bit
*/
/// Will PageFault when try to access page where present=0
fn present(&self) -> bool;
/*
** @brief clear the accessed bit
** @retval none
*/
fn clear_accessed(&mut self);
/*
** @brief clear the dirty bit
** @retval none
*/
fn clear_dirty(&mut self);
/*
** @brief set value of writable bit
** @param value: bool the writable bit value
** @retval none
*/
fn set_writable(&mut self, value: bool);
/*
** @brief set value of present bit
** @param value: bool the present bit value
** @retval none
*/
fn set_present(&mut self, value: bool);
/*
** @brief get the target physics address in the entry
** can be used for other purpose if present=0
** @retval target: PhysAddr the target physics address
*/
/// The target physics address in the entry
/// Can be used for other purpose if present=0
fn target(&self) -> PhysAddr;
/*
** @brief set the target physics address in the entry
** @param target: PhysAddr the target physics address
** @retval none
*/
fn set_target(&mut self, target: PhysAddr);
// For Copy-on-write extension
/*
** @brief used for Copy-on-write extension
** get the writable and shared bit
** @retval value: bool the writable and shared bit
*/
// For Copy-on-write
fn writable_shared(&self) -> bool;
/*
** @brief used for Copy-on-write extension
** get the readonly and shared bit
** @retval value: bool the readonly and shared bit
*/
fn readonly_shared(&self) -> bool;
/*
** @brief used for Copy-on-write extension
** mark the page as (writable or readonly) shared
** @param writable: bool if it is true, set the page as writable and shared
** else set the page as readonly and shared
** @retval value: none
*/
fn set_shared(&mut self, writable: bool);
/*
** @brief used for Copy-on-write extension
** mark the page as not shared
** @retval value: none
*/
fn clear_shared(&mut self);
// For Swap extension
/*
** @brief used for Swap extension
** get the swapped bit
** @retval value: bool the swapped bit
*/
// For Swap
fn swapped(&self) -> bool;
/*
** @brief used for Swap extension
** set the swapped bit
** @param value: bool the swapped bit value
** @retval none
*/
fn set_swapped(&mut self, value: bool);
/*
** @brief get the user bit of the entry
** @retval bool the user bit
*/
fn user(&self) -> bool;
/*
** @brief set value of user bit
** @param value: bool the user bit value
** @retval none
*/
fn set_user(&mut self, value: bool);
/*
** @brief get the execute bit of the entry
** @retval bool the execute bit
*/
fn execute(&self) -> bool;
/*
** @brief set value of user bit
** @param value: bool the execute bit value
** @retval none
*/
fn set_execute(&mut self, value: bool);
fn mmio(&self) -> bool;
fn set_mmio(&mut self, value: bool);
}
/// An inactive page table
/// Note: InactivePageTable is not a PageTable
/// but it can be activated and "become" a PageTable
pub trait InactivePageTable: Sized {
/// the active version of page table
type Active: PageTable;
/// Create a new page table with kernel memory mapped
fn new() -> Self {
let mut pt = Self::new_bare();
pt.map_kernel();
pt
}
/// Create a new page table without kernel memory mapped
fn new_bare() -> Self;
/// Map kernel segments
fn map_kernel(&mut self);
/// CR3 on x86, SATP on RISCV, TTBR on AArch64
fn token(&self) -> usize;
unsafe fn set_token(token: usize);
fn active_token() -> usize;
fn flush_tlb();
/// Make this page table editable
/// Set the recursive entry of current active page table to this
fn edit<T>(&mut self, f: impl FnOnce(&mut Self::Active) -> T) -> T;
/// Activate this page table
unsafe fn activate(&self) {
let old_token = Self::active_token();
let new_token = self.token();
debug!("switch table {:x?} -> {:x?}", old_token, new_token);
if old_token != new_token {
Self::set_token(new_token);
Self::flush_tlb();
}
}
/// Execute function `f` with this page table activated
unsafe fn with<T>(&self, f: impl FnOnce() -> T) -> T {
let old_token = Self::active_token();
let new_token = self.token();
debug!("switch table {:x?} -> {:x?}", old_token, new_token);
if old_token != new_token {
Self::set_token(new_token);
Self::flush_tlb();
}
let ret = f();
debug!("switch table {:x?} -> {:x?}", new_token, old_token);
if old_token != new_token {
Self::set_token(old_token);
Self::flush_tlb();
}
ret
}
}

@ -8,7 +8,6 @@
use super::*;
use super::paging::*;
use super::memory_set::InactivePageTable;
use super::addr::Frame;
use core::ops::{Deref, DerefMut};

@ -6,7 +6,6 @@
#![feature(vec_resize_default)]
#![feature(asm)]
#![feature(exact_size_is_empty)]
#![feature(extern_crate_item_prelude)]
extern crate alloc;

Binary file not shown.

After

Width:  |  Height:  |  Size: 316 KiB

@ -0,0 +1,616 @@
<!-- page_number: true -->
<!-- $width:12in -->
<!-- $height: 6.75in -->
# Rust版 uCore OS 的设计与实现
## Design and implementation of uCore OS in Rust
王润基
清华大学计算机系
2018.12.16 @ OS2ATC
---
# 提纲
## 简介Rust uCore OS是什么
## 动机为什么要用Rust写OS
## 体会用Rust写OS有何好处
## 未来:接下来会做什么?
---
# 简介Rust uCore OS是什么
---
# uCore OS
清华大学教学操作系统
参考 xv6 & jos in MIT, OS161 in Harvard, Linux
用C语言编写的宏内核OS
* [ucore_os_lab](https://github.com/chyyuu/ucore_os_lab):操作系统课实验
* [ucore_os_plus](https://github.com/chyyuu/ucore_os_plus):教学科研系统
---
# uCore OS in Rust -- RustOS
2018年操作系统课大实验项目
“用Rust语言重新实现uCore”
#
之后在OS专题训练课上推广目前
支持三大平台x86_64, RISCV32, AArch64
支持硬件计算所Labeled-RISCV树莓派3B
支持多核CPU
---
## 大实验选题列举
OS
* RustOS for x86_64 SMP
* Rustable - ucore 在 arm 平台的 rust 移植
* Rucore with LKM Drivers
OS专题训练
* RustOS 上树莓派 USB 与 VideoCore IV 显卡驱动的移植
* RustOS 多核移植与基于PARD框架的线程级Label管理
* RustOS wiki完善与教学lab实验的制作
* RustOS 参考sv6的多核实现和优化
* RustOS 移植到 rv64 及llvm编译器支持
---
# 动机为什么要用Rust写OS
* C语言有何不足
* Rust解决了哪些痛点
* 条件是否成熟?
---
# C语言有何不足
---
## 简单?简陋?
C语言简单、直接为OS而生。
但从现代编程语言的角度看C语言有些简陋难以表达复杂逻辑和抽象。
![90%](./C.jpg)
*上图出自[一篇知乎回答](https://www.zhihu.com/question/25038841/answer/44396770)*
---
### 缺乏对OOP和接口的语言级支持
C中常用函数指针实现接口
```c
struct device {
size_t d_blocks;
size_t d_blocksize;
int (*d_open)(struct device *dev, uint32_t open_flags);
int (*d_close)(struct device *dev);
int (*d_io)(struct device *dev, struct iobuf *iob, bool write);
int (*d_ioctl)(struct device *dev, int op, void *data);
};
```
---
### 缺乏基础数据结构支持
OS中常用的侵入式链表摘自ucore_os_lab
```c
// 链表节点
struct list_entry {
struct list_entry *prev, *next;
};
// 在宿主类型中嵌入链表节点
struct Page {
list_entry_t page_link;
...
};
// 从list类型转化回宿主类型
#_define le2page(le, member) \
to_struct((le), struct Page, member)
#_define offsetof(type, member) \
((size_t)(&((type *)0)->member))
#_define to_struct(ptr, type, member) \
((type *)((char *)(ptr) - offsetof(type, member)))
```
---
### 缺乏工程模块系统
* 编译配置复杂
* 难以复用代码
---
## SegmentFault
悬空指针,重复释放,数据竞争……
#
![60%](./pointer.png)
---
# Rust解决了哪些痛点
* 强类型内存安全线程安全——减少Bug
* 现代语言特性——提升开发体验
* 完善的模块系统——方便代码复用
* 零开销抽象——能写OS的根本保障
---
# 是时候尝试Rust了
### 社区:[Redox](https://www.redox-os.org)
全功能Rust OS微内核架构支持GUI
### 教学:[CS140e](https://web.stanford.edu/class/cs140e/)
斯坦福大学实验性OS课程2018年新开设
Rust编写OS面向ARM在树莓派3上运行
### 兴趣:[Writing an OS in Rust](https://os.phil-opp.com)
手把手带你用Rust编写OS的系列博客
面向x86_64教程极为详尽
作者为Rust编写OS提供了大量开源工具
---
## 万事俱备,只是……
# 还不会Rust怎么办
编写OS是学习Rust的高效途径
---
# 体会用Rust写OS有何好处
* 内存与线程安全减少Bug
* 包管理系统:复用已有代码
* 接口与泛型:内核模块化
* 所有权和RAII机制简化资源管理
---
# 安全!
类型系统 + 所有权机制 + 生命周期机制
=> 内存安全 + 线程安全
---
## Rust如何减少Bug
### 消除了越界访问
=> panic
### 消除了因生命周期导致的非法访存
=> 编译错误
### 消除了数据竞争
=> 死锁
### 缩小了Bug的查找范围
=> unsafe块
---
## Rust如何减少Bug
* 大部分低级错误在编译期避免
* 少数逻辑错误在运行时暴露
* 难以发现的错误被限制了范围
---
## 充分利用现成轮子
引用的外部库crate
* `alloc`: 容器
* `log`: 日志
* `spin`: 自旋锁
* `xmas-elf`: 解析ELF文件
* `linked_list_allocator`: 堆分配算法
* `uart_116500`: 串口驱动
* `x86_64`: 包装汇编指令,封装页表等数据结构
更好地专注于OS核心逻辑
---
## 制造我们自己的轮子
仿照`x86_64`库,并基于社区现有成果,
我们分别实现了`riscv`和`aarch64`库。
---
# 内核模块化
ucore_os_lab = 内存管理 + 进程管理 + 文件系统
lab1-lab8层层依赖高度耦合。
然而它们在逻辑上互不相关,理应分开。
---
# 内核模块化
每个部分作为独立的crate存在互不依赖。
内核通过实现它们的接口,把它们粘合在一起。
可以分别内部单元测试,然后放在一起集成测试。
配合泛型,可做到零开销。
---
### 内存模块
接口:页表,页表项,缺页处理函数
功能:面向进程的虚存管理(`mm_struct`
-------支持内存置换、写时复制、延迟分配等机制
### 线程模块
接口:上下文切换,新线程的构造
功能:线程调度和管理
### 文件系统
接口块设备VFS虚拟文件系统
功能:文件操作和管理
---
### 内存模块——接口
```rust
pub trait PageTable {
fn map(&mut self, addr: VirtAddr, target: PhysAddr)
-> &mut Entry;
fn unmap(&mut self, addr: VirtAddr);
fn get_entry(&mut self, addr: VirtAddr)
-> Option<&mut Entry>;
}
pub trait Entry {
fn update(&mut self); // flush TLB
fn present(&self) -> bool;
fn target(&self) -> PhysAddr;
fn set_present(&mut self, value: bool);
fn set_target(&mut self, target: PhysAddr);
...
}
```
---
### 内存模块——面向接口的上层实现
```rust
pub struct MemoryArea {
start_addr: VirtAddr,
end_addr: VirtAddr,
...
}
impl MemoryArea {
fn map(&self, pt: &mut PageTable) {
for page in Page::range_of(self.start_addr, self.end_addr) {
let target = alloc_frame();
pt.map(addr, target);
}
}
}
```
为一段连续的虚拟地址空间 映射页表项。
---
### 内存模块——接口的Mock实现
```rust
pub struct MockPageTable {
entries: [MockEntry; PAGE_COUNT],
data: [u8; PAGE_SIZE * PAGE_COUNT],
page_fault_handler: Option<PageFaultHandler>,
}
impl PageTable for MockPageTable {
fn map(...) {...}
fn unmap(...) {...}
fn get_entry(...) {...}
}
impl MockPageTable {
fn read(&mut self, addr: VirtAddr) -> u8 {...}
fn write(&mut self, addr: VirtAddr, data: u8) {...}
}
```
实现一个仿真页表,模拟地址转换的过程,从数组中存取数据。
---
### 内存模块——基于Mock的单元测试
```rust
#[test]
fn memory_area_map() {
let mut pt = MockPageTable {...};
let area = MemoryArea {...};
area.map(&mut pt);
pt.write(0x1000, 1);
assert_eq!(pt.read(0x1000), 1);
}
```
可用`cargo test`在任意环境下运行单元测试不依赖QEMU。
---
### 线程模块——接口与实现
```rust
pub trait Context {
unsafe extern "C"
fn switch_to(&mut self, target: &mut Context);
}
pub struct X86Context {
rip: usize,
... // callee-saved registers
}
impl Context for X86Context {
unsafe extern "C" // Store caller-saved registers
fn switch_to(&mut self, target: &mut Context) {
// Store callee-saved registers
// Restore callee-saved registers
} // Restore caller-saved registers
}
```
上下文切换:保存和恢复寄存器
---
### 线程模块——面向接口的上层实现
```rust
/// 管理所有线程的状态及调度,全局唯一
pub struct ProcessManager {...}
/// 线程执行者每个CPU核对应一个
pub struct Processor {
manager: Arc<ProcessManager>,
context: Box<Context>,
...
}
impl Processor {
/// 调度线程,无限循环
fn run(&mut self) -> ! { loop {
let mut process = self.manager.pop();
unsafe { self.context.switch_to(&mut process); }
self.manager.push(process);
}}
}
```
每个CPU核不断地从运行队列中取出线程-运行-放回
---
### 线程模块——兼容标准库的高层封装
```rust
// thread.rs
pub fn current() -> Thread {...}
pub fn sleep(dur: Duration) {...}
pub fn spawn<F, T>(f: F) -> JoinHandle<T> {...}
pub fn yield_now() {...}
pub fn park() {...}
```
在上页基础上进一步封装。
提供和标准库`std::thread`完全一致的上层接口。
使得依赖std的多线程代码可以方便地迁移到内核中。
---
# 所有权和RAII机制简化资源管理
OS需要管理复杂的资源
资源之间有复杂的共享和依赖关系
---
![80%](./resources.png)
---
进程的对象结构:
```rust
pub struct Process {
context: Context,
kstack: KernelStack,
memory: MemorySet,
files: BTreeMap<usize, Arc<Mutex<File>>>,
...
}
```
---
将资源封装成对象,在析构函数中释放资源。
```rust
pub struct KernelStack {
ptr: *mut u8,
layout: Layout,
}
impl Drop for KernelStack {
fn drop(&mut self) {
unsafe{ dealloc(self.ptr, self.layout); }
}
}
```
---
当对象的生命周期结束时,资源自动释放。
```rust
pub struct Process {
context: Context,
kstack: KernelStack,
memory: MemorySet,
files: BTreeMap<usize, Arc<Mutex<File>>>,
...
}
pub struct ProcessManager {
procs: BTreeMap<usize, Process>,
}
impl ProcessManager {
pub fn remove(&mut self, pid: usize) {
self.procs.remove(&pid);
// All resources have been released here
}
}
```
---
# Rust vs C
## 代码风格
||Rust|C|
|-|-|-|
|数据结构|泛型容器Vec|侵入式(链表)|
|全局变量|少量|大量|
|数据分布|倾向分散|倾向集中|
|数据类型|鼓励自定义类型|基础类型|
|思维方式|所有权+生命周期|数据+行为|
---
## 代码量
||Rust|C|
|-|-|-|
|内存管理|1600|1800|
|线程管理|1200|1200|
|文件系统|1300|3400|
|同步互斥|500|400|
|内核其它|800|1200|
|共计|5400|8000|
*使用`loc`统计代码行数基于RISCV版本粗略计算*
---
## 语言能力
在底层:
* 具有同等的底层操作能力
* 二者具有良好的互操作性
在上层:
* Rust能编写更加安全的代码减少Bug
* Rust具有更好的表达能力胜任复杂逻辑
* Rust具有更强的抽象能力有助于代码的分离和复用
——Rust更加适合编写OS
---
# Rust的问题
学习曲线过于陡峭!
所有权、生命周期等机制难以驾驭!
初学者大部分时间在与编译器作斗争。
一种可能的解决方案:
* 先用unsafe、C风格实现
* 再逐步消除unsafe、重构成Rust风格
---
# 未来:接下来会做什么?
* 真机测试HiFiveU, K210等RISCV64开发板
* 教学实验2019年操作系统课实验
* 功能完善实现POSIX接口
* 性能优化发掘Rust的潜力
* 对比借鉴其它有趣OS
* 潜力探索async异步机制
---
## 其它有趣OS
### [Tock](https://www.tockos.org)
Rust编写的嵌入式操作系统
关注Capsule内核模块设计进程的内存分配策略……
### [Biscuit](https://github.com/mit-pdos/biscuit)
Golang编写的POSIX兼容OSMIT出品
关注Go异步机制Go vs Rust
---
## 潜力探索async无栈协程应用于OS内核的探讨
async-await用户态异步编程机制
用同步的方式,编写异步的代码。
背后的实现机制和OS线程调度高度一致。
能否应用于Kernel中与传统线程机制相比有何优缺点
---
# 感谢
### 指导老师
陈渝,向勇
### 参与开发的同学们
王润基,戴臻旸,王纪霆
贾越凯,寇明阳,孔彦
刘辰屹,陈秋昊,朱书聪
---
# 欢迎试玩!
![40%](demo.png)
GitHubhttps://github.com/wangrunji0408/RustOS
---
# 感谢聆听
# Q&A

15
kernel/Cargo.lock generated

@ -1,12 +1,10 @@
[[package]]
name = "aarch64"
version = "2.2.2"
source = "git+https://github.com/equation314/aarch64#47bf5439f5a1379f0fef6272853cf684207a4e45"
source = "git+https://github.com/equation314/aarch64#e3b60adb233ad34d05443e0b5ec34cac29253296"
dependencies = [
"bare-metal 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)",
"bit_field 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
"bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
"os_bootinfo 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"register 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"usize_conversions 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
"ux 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
@ -50,7 +48,7 @@ version = "0.1.0"
name = "bcm2837"
version = "0.1.0"
dependencies = [
"cortex-a 2.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"aarch64 2.2.2 (git+https://github.com/equation314/aarch64)",
"volatile 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)",
]
@ -98,14 +96,6 @@ name = "cfg-if"
version = "0.1.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "cortex-a"
version = "2.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"register 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "fixedvec"
version = "0.2.3"
@ -455,7 +445,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
"checksum bootloader 0.3.4 (git+https://github.com/wangrunji0408/bootloader)" = "<none>"
"checksum cc 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)" = "f159dfd43363c4d08055a07703eb7a3406b0dac4d0584d96965a3262db3c9d16"
"checksum cfg-if 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "082bb9b28e00d3c9d39cc03e64ce4cea0f1bb9b3fde493f0cbc008472d22bdf4"
"checksum cortex-a 2.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "2b187d0d728b4a99ba1d79f9671b976bcdd71a8a2c719585218fd2dc14a4d08c"
"checksum fixedvec 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "7c6c16d316ccdac21a4dd648e314e76facbbaf316e83ca137d0857a9c07419d0"
"checksum fuchsia-zircon 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2e9763c69ebaae630ba35f74888db465e49e259ba1bc0eda7d06f4a067615d82"
"checksum fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7"

@ -12,7 +12,8 @@ no_mmu = []
# Kernel in M-mode (for riscv32)
m_mode = ["no_mmu"]
# (for aarch64 RaspberryPi3)
board_raspi3 = []
board_raspi3 = ["bcm2837"]
raspi3_use_generic_timer = ["bcm2837/use_generic_timer"]
[profile.dev]
# MUST >= 1 : Enable RVO to avoid stack overflow
@ -53,7 +54,7 @@ bbl = { path = "../crate/bbl" }
[target.'cfg(target_arch = "aarch64")'.dependencies]
aarch64 = { git = "https://github.com/equation314/aarch64" }
atags = { path = "../crate/atags" }
bcm2837 = { path = "../crate/bcm2837", features = ["use_generic_timer"] }
bcm2837 = { path = "../crate/bcm2837", optional = true }
[package.metadata.bootimage]
default-target = "x86_64-blog_os.json"

@ -32,14 +32,16 @@ target := $(arch)-blog_os
kernel := target/$(target)/$(mode)/ucore
bin := target/$(target)/$(mode)/kernel.bin
bootimage := target/$(target)/bootimage.bin
user_dir := ../user
user_bin_path := $(user_dir)/target/$(arch)-ucore/debug
user_bins := $(patsubst $(user_bin_path)/%.d, $(user_bin_path)/%, $(wildcard $(user_bin_path)/*.d))
user_obj := build/$(arch)/user.o
export ARCH = $(arch)
export SFSIMG = $(user_dir)/build/user-$(arch).img
export SMP = $(smp)
#export SFSIMG = $(user_dir)/build/user-$(arch).img
ifeq ($(arch), x86_64)
export SFSIMG = $(user_dir)/img/ucore-i386.img
else
export SFSIMG = $(user_dir)/img/ucore-$(arch).img
endif
### qemu options ###
qemu_opts := \
@ -79,6 +81,15 @@ features += no_bbl
endif
endif
ifeq ($(board), raspi3)
# qemu only has generic timer
# TODO: configure system/generic timer automatically
raspi3_timer ?= generic
ifeq ($(raspi3_timer), generic)
features += raspi3_use_generic_timer
endif
endif
ifdef m_mode
features += no_mmu m_mode
bbl_m_mode := --enable-boot-machine
@ -101,7 +112,7 @@ ifeq ($(uname), Darwin)
prefix := x86_64-elf-
endif
else ifeq ($(arch), riscv32)
prefix := riscv32-unknown-elf-
prefix := riscv64-unknown-elf-
else ifeq ($(arch), aarch64)
prefix ?= aarch64-none-elf-
endif
@ -162,7 +173,7 @@ else
$(bbl_m_mode) \
--with-arch=rv32imac \
--disable-fp-emulation \
--host=riscv32-unknown-elf \
--host=riscv64-unknown-elf \
--with-payload=$(abspath $(kernel)) && \
make -j32 && \
cp bbl ../../kernel/$@
@ -189,11 +200,6 @@ endif
sfsimg:
@cd $(user_dir) && make sfsimg
# make user.o from binary files
$(user_obj): $(user_bins)
@cd $(user_bin_path) && \
$(ld) -o $(abspath $@) $(patsubst %, -b binary %, $(notdir $(user_bins)))
### install ###
ifeq ($(board), raspi3)

@ -28,8 +28,7 @@ else
SFSIMG_CFLAGS="-march=rv64ia -mabi=lp64"
RISCV_PK_CONFIGURE_FLAGS="--with-arch=rv64imac --disable-fp-emulation --host=riscv64-unknown-elf"
fi
UCORE_USER_IMAGE="../user/hand/user-${TARGET_ARCH}.img"
# UCORE_USER_IMAGE="../user/build/user-${TARGET_ARCH}.img"
UCORE_USER_IMAGE="../user/img/ucore-${TARGET_ARCH}.img"
LLC=llc
RUST_SRC_PATH=$(rustc --print sysroot)/lib/rustlib/src/rust/src
CARGO_PATH=~/.cargo
@ -39,6 +38,7 @@ CC=${TARGET_ARCH}-unknown-elf-gcc
AR=${TARGET_ARCH}-unknown-elf-ar
OBJCOPY=${TARGET_ARCH}-unknown-elf-objcopy
QEMU=qemu-system-${TARGET_ARCH}
export SMP=4
@ -583,5 +583,5 @@ else
make -j32
cp bbl ../../kernel/outdir/kernel.bin
cd ../../kernel
${QEMU} -smp cores=1 -nographic -machine virt -kernel outdir/kernel.bin
${QEMU} -smp cores=${SMP} -nographic -machine virt -kernel outdir/kernel.bin
fi

@ -1,11 +1,28 @@
use crate::arch::interrupt::TrapFrame;
use bcm2837::timer::Timer;
use bcm2837::interrupt::{Controller, Interrupt};
use bcm2837::interrupt::Controller;
use log::*;
pub fn handle_irq(tf: &mut TrapFrame) {
let controller = Timer::new();
pub use bcm2837::interrupt::Interrupt;
static IRQ_HANDLERS: &'static [Option<fn()>; 64] = &[None; 64];
pub fn handle_irq(_tf: &mut TrapFrame) {
let controller = bcm2837::timer::Timer::new();
if controller.is_pending() {
super::timer::set_next();
crate::trap::timer();
}
for int in Controller::new().pending_interrupts() {
if let Some(handler) = IRQ_HANDLERS[int] {
handler();
}
}
}
pub fn register_irq(int: Interrupt, handler: fn()) {
unsafe {
*(&IRQ_HANDLERS[int as usize] as *const _ as *mut Option<fn()>) = Some(handler);
}
Controller::new().enable(int);
}

@ -9,10 +9,16 @@ pub mod serial;
pub const IO_REMAP_BASE: usize = bcm2837::IO_BASE;
pub const IO_REMAP_END: usize = 0x40001000;
pub fn init() {
/// Some initializations must be done before other initializations.
pub fn init_early() {
assert_has_not_been_called!("board::init must be called only once");
serial::SERIAL_PORT.lock().init();
println!("Hello Raspberry Pi!");
}
/// Initialize raspi3 drivers
pub fn init_driver() {
timer::init();
}

@ -1,11 +1,11 @@
use bcm2837::mini_uart::MiniUart;
use bcm2837::mini_uart::{MiniUart, MiniUartInterruptId};
use lazy_static::lazy_static;
use core::fmt;
use spin::Mutex;
use once::*;
/// Struct to get a global SerialPort interface
pub struct SerialPort {
mu: Option<MiniUart>,
mu: MiniUart,
}
pub trait SerialRead {
@ -14,30 +14,31 @@ pub trait SerialRead {
impl SerialPort {
/// Creates a new instance of `SerialPort`.
const fn new() -> SerialPort {
SerialPort { mu: None }
fn new() -> SerialPort {
SerialPort {
mu: MiniUart::new(),
}
}
/// Init a newly created SerialPort, can only be called once.
pub fn init(&mut self) {
assert_has_not_been_called!("SerialPort::init must be called only once");
self.mu = Some(MiniUart::new());
self.mu.init();
super::irq::register_irq(super::irq::Interrupt::Aux, handle_serial_irq);
}
/// Writes the byte `byte` to the UART device.
pub fn write_byte(&mut self, byte: u8) {
match &mut self.mu {
Some(mu) => mu.write_byte(byte),
None => panic!("SerialPort is not initialized"),
}
fn write_byte(&mut self, byte: u8) {
self.mu.write_byte(byte)
}
/// Reads a byte from the UART device, blocking until a byte is available.
pub fn read_byte(&mut self) -> u8 {
match &mut self.mu {
Some(mu) => return mu.read_byte(),
None => panic!("SerialPort is not initialized"),
fn read_byte(&self) -> u8 {
self.mu.read_byte()
}
// Whether the interrupt `id` is pending.
fn interrupt_is_pending(&self, id: MiniUartInterruptId) -> bool {
self.mu.interrupt_is_pending(id)
}
}
@ -70,4 +71,13 @@ impl fmt::Write for SerialPort {
}
}
pub static SERIAL_PORT: Mutex<SerialPort> = Mutex::new(SerialPort::new());
fn handle_serial_irq() {
let serial = SERIAL_PORT.lock();
if serial.interrupt_is_pending(MiniUartInterruptId::Recive) {
crate::trap::serial(serial.read_byte() as char)
}
}
lazy_static!{
pub static ref SERIAL_PORT: Mutex<SerialPort> = Mutex::new(SerialPort::new());
}

@ -1,5 +1,4 @@
use bcm2837::timer;
use bcm2837::interrupt::{Controller, Interrupt};
use log::*;
pub fn init() {

@ -0,0 +1,12 @@
/// ARM64 drivers
use once::*;
use super::board;
/// Initialize ARM64 common drivers
pub fn init() {
assert_has_not_been_called!();
board::init_driver();
}

@ -99,7 +99,7 @@ impl Context {
/// Pop all callee-saved registers, then return to the target.
#[naked]
#[inline(never)]
unsafe extern fn __switch(self_stack: &mut usize, target_stack: &mut usize) {
unsafe extern fn __switch(_self_stack: &mut usize, _target_stack: &mut usize) {
asm!(
"
mov x10, #-(12 * 8)
@ -149,7 +149,7 @@ impl Context {
tf: TrapFrame::new_kernel_thread(entry, arg, kstack_top),
}.push_at(kstack_top, ttbr)
}
pub unsafe fn new_user_thread(entry_addr: usize, ustack_top: usize, kstack_top: usize, is32: bool, ttbr: usize) -> Self {
pub unsafe fn new_user_thread(entry_addr: usize, ustack_top: usize, kstack_top: usize, _is32: bool, ttbr: usize) -> Self {
InitStack {
context: ContextData::new(),
tf: TrapFrame::new_user_thread(entry_addr, ustack_top),

@ -93,8 +93,9 @@ fn handle_syscall(num: u16, tf: &mut TrapFrame) {
}
fn handle_page_fault(tf: &mut TrapFrame) {
let addr = FAR_EL1.get();
let addr = FAR_EL1.get() as usize;
if !crate::memory::page_fault_handler(addr) {
error!("\nEXCEPTION: Page Fault @ {:#x}", addr);
crate::trap::error(tf);
}
}

@ -1,7 +1,7 @@
//! Serial driver for aarch64.
use core::fmt::{Arguments, Write};
use super::board::serial::{SerialRead, SERIAL_PORT};
use super::board::serial::*;
pub fn getchar() -> char {
unsafe { SERIAL_PORT.force_unlock(); }

@ -5,7 +5,7 @@ use ucore_memory::PAGE_SIZE;
use atags::atags::Atags;
use aarch64::{barrier, regs::*, addr::*};
use aarch64::paging::{PhysFrame as Frame, memory_attribute::*};
use crate::memory::{FRAME_ALLOCATOR, init_heap, MemoryArea, MemoryAttr, MemorySet};
use crate::memory::{FRAME_ALLOCATOR, init_heap, MemoryArea, MemoryAttr, MemorySet, Linear};
/// Memory initialization.
pub fn init() {
@ -99,15 +99,15 @@ fn init_frame_allocator() {
/// remap kernel page table after all initialization.
fn remap_the_kernel() {
let mut ms = unsafe { MemorySet::new_bare() };
ms.push(MemoryArea::new_identity(0, bootstacktop as usize, MemoryAttr::default(), "kstack"));
ms.push(MemoryArea::new_identity(stext as usize, etext as usize, MemoryAttr::default().execute().readonly(), "text"));
ms.push(MemoryArea::new_identity(sdata as usize, edata as usize, MemoryAttr::default(), "data"));
ms.push(MemoryArea::new_identity(srodata as usize, erodata as usize, MemoryAttr::default().readonly(), "rodata"));
ms.push(MemoryArea::new_identity(sbss as usize, ebss as usize, MemoryAttr::default(), "bss"));
let mut ms = MemorySet::new_bare();
ms.push(0, bootstacktop as usize, Linear::new(0, MemoryAttr::default()), "kstack");
ms.push(stext as usize, etext as usize, Linear::new(0, MemoryAttr::default().execute().readonly()), "text");
ms.push(sdata as usize, edata as usize, Linear::new(0, MemoryAttr::default()), "data");
ms.push(srodata as usize, erodata as usize, Linear::new(0, MemoryAttr::default().readonly()), "rodata");
ms.push(sbss as usize, ebss as usize, Linear::new(0, MemoryAttr::default()), "bss");
use super::board::{IO_REMAP_BASE, IO_REMAP_END};
ms.push(MemoryArea::new_identity(IO_REMAP_BASE, IO_REMAP_END, MemoryAttr::default().mmio(), "io_remap"));
ms.push(IO_REMAP_BASE, IO_REMAP_END, Linear::new(0, MemoryAttr::default().mmio()), "io_remap");
unsafe { ms.get_page_table_mut().activate_as_kernel(); }
::core::mem::forget(ms);

@ -6,29 +6,25 @@ pub mod memory;
pub mod interrupt;
pub mod consts;
pub mod cpu;
pub mod driver;
#[cfg(feature = "board_raspi3")]
#[path = "board/raspi3/mod.rs"]
pub mod board;
pub use self::board::timer;
global_asm!(include_str!("boot/boot.S"));
/// The entry point of kernel
#[no_mangle] // don't mangle the name of this function
pub extern "C" fn rust_main() -> ! {
// Enable mmu and paging
memory::init_mmu_early();
// Init board to enable serial port.
board::init();
memory::init_mmu_early(); // Enable mmu and paging
board::init_early();
println!("{}", LOGO);
crate::logging::init();
interrupt::init();
memory::init();
timer::init();
driver::init();
crate::process::init();

@ -49,9 +49,7 @@ pub struct ActivePageTable(RecursivePageTable<'static>);
pub struct PageEntry(PageTableEntry);
impl PageTable for ActivePageTable {
type Entry = PageEntry;
fn map(&mut self, addr: usize, target: usize) -> &mut PageEntry {
fn map(&mut self, addr: usize, target: usize) -> &mut Entry {
let flags = EF::default();
let attr = MairNormal::attr_value();
self.0.map_to(Page::of_addr(addr), Frame::of_addr(target), flags, attr, &mut FrameAllocatorForAarch64)
@ -60,28 +58,19 @@ impl PageTable for ActivePageTable {
}
fn unmap(&mut self, addr: usize) {
let (frame, flush) = self.0.unmap(Page::of_addr(addr)).unwrap();
let (_frame, flush) = self.0.unmap(Page::of_addr(addr)).unwrap();
flush.flush();
}
fn get_entry(&mut self, addr: usize) -> Option<&mut PageEntry> {
let entry_addr = ((addr >> 9) & 0o777_777_777_7770) | (RECURSIVE_INDEX << 39);
fn get_entry(&mut self, vaddr: usize) -> Option<&mut Entry> {
// get p1 entry
let entry_addr = ((vaddr >> 9) & 0o777_777_777_7770) | (RECURSIVE_INDEX << 39)
| (vaddr & 0xffff_0000_0000_0000);
Some(unsafe { &mut *(entry_addr as *mut PageEntry) })
}
fn get_page_slice_mut<'a, 'b>(&'a mut self, addr: usize) -> &'b mut [u8] {
use core::slice;
unsafe { slice::from_raw_parts_mut((addr & !0xfffusize) as *mut u8, PAGE_SIZE) }
}
fn read(&mut self, addr: usize) -> u8 {
unsafe { *(addr as *const u8) }
}
fn write(&mut self, addr: usize, data: u8) {
unsafe { *(addr as *mut u8) = data; }
}
}
impl PageTableExt for ActivePageTable {}
const ROOT_PAGE_TABLE: *mut Aarch64PageTable =
((RECURSIVE_INDEX << 39) | (RECURSIVE_INDEX << 30) | (RECURSIVE_INDEX << 21) | (RECURSIVE_INDEX << 12)) as *mut Aarch64PageTable;
@ -90,18 +79,6 @@ impl ActivePageTable {
pub unsafe fn new() -> Self {
ActivePageTable(RecursivePageTable::new(&mut *(ROOT_PAGE_TABLE as *mut _)).unwrap())
}
fn with_temporary_map(&mut self, frame: &Frame, f: impl FnOnce(&mut ActivePageTable, &mut Aarch64PageTable)) {
// Create a temporary page
let page = Page::of_addr(0xcafebabe);
assert!(self.0.translate_page(page).is_none(), "temporary page is already mapped");
// Map it to table
self.map(page.start_address().as_u64() as usize, frame.start_address().as_u64() as usize);
// Call f
let table = unsafe { &mut *page.start_address().as_mut_ptr() };
f(self, table);
// Unmap the page
self.unmap(0xcafebabe);
}
}
impl Entry for PageEntry {
@ -196,9 +173,9 @@ impl InactivePageTable for InactivePageTable0 {
}
fn new_bare() -> Self {
let frame = Self::alloc_frame().map(|target| Frame::of_addr(target))
.expect("failed to allocate frame");
active_table().with_temporary_map(&frame, |_, table: &mut Aarch64PageTable| {
let target = alloc_frame().expect("failed to allocate frame");
let frame = Frame::of_addr(target);
active_table().with_temporary_map(target, |_, table: &mut Aarch64PageTable| {
table.zero();
// set up recursive mapping for the table
table[RECURSIVE_INDEX].set_frame(frame.clone(), EF::default(), MairNormal::attr_value());
@ -206,75 +183,56 @@ impl InactivePageTable for InactivePageTable0 {
InactivePageTable0 { p4_frame: frame }
}
fn edit(&mut self, f: impl FnOnce(&mut Self::Active)) {
active_table().with_temporary_map(&ttbr_el1_read(0), |active_table, p4_table: &mut Aarch64PageTable| {
let backup = p4_table[RECURSIVE_INDEX].clone();
// overwrite recursive mapping
p4_table[RECURSIVE_INDEX].set_frame(self.p4_frame.clone(), EF::default(), MairNormal::attr_value());
tlb_invalidate_all();
// execute f in the new context
f(active_table);
fn map_kernel(&mut self) {
let table = unsafe { &mut *ROOT_PAGE_TABLE };
let e0 = table[KERNEL_PML4].clone();
assert!(!e0.is_unused());
// restore recursive mapping to original p4 table
p4_table[RECURSIVE_INDEX] = backup;
tlb_invalidate_all();
self.edit(|_| {
table[KERNEL_PML4].set_frame(Frame::containing_address(e0.addr()), EF::default(), MairNormal::attr_value());
});
}
unsafe fn activate(&self) {
let old_frame = ttbr_el1_read(1);
let new_frame = self.p4_frame.clone();
debug!("switch TTBR1 {:?} -> {:?}", old_frame, new_frame);
if old_frame != new_frame {
ttbr_el1_write(1, new_frame);
tlb_invalidate_all();
fn token(&self) -> usize {
self.p4_frame.start_address().as_u64() as usize // as TTBRx_EL1
}
unsafe fn set_token(token: usize) {
ttbr_el1_write(1, Frame::containing_address(PhysAddr::new(token as u64)));
}
unsafe fn with<T>(&self, f: impl FnOnce() -> T) -> T {
// Just need to switch the user TTBR
let old_frame = ttbr_el1_read(1);
let new_frame = self.p4_frame.clone();
debug!("switch TTBR1 {:?} -> {:?}", old_frame, new_frame);
if old_frame != new_frame {
ttbr_el1_write(1, new_frame);
tlb_invalidate_all();
fn active_token() -> usize {
ttbr_el1_read(1).start_address().as_u64() as usize
}
let ret = f();
debug!("switch TTBR1 {:?} -> {:?}", new_frame, old_frame);
if old_frame != new_frame {
ttbr_el1_write(1, old_frame);
fn flush_tlb() {
tlb_invalidate_all();
flush_icache_all();
}
ret
}
fn token(&self) -> usize {
self.p4_frame.start_address().as_u64() as usize // as TTBRx_EL1
}
fn edit<T>(&mut self, f: impl FnOnce(&mut Self::Active) -> T) -> T {
let target = ttbr_el1_read(0).start_address().as_u64() as usize;
active_table().with_temporary_map(target, |active_table, p4_table: &mut Aarch64PageTable| {
let backup = p4_table[RECURSIVE_INDEX].clone();
let old_frame = ttbr_el1_read(1);
fn alloc_frame() -> Option<usize> {
alloc_frame()
}
// overwrite recursive mapping
p4_table[RECURSIVE_INDEX].set_frame(self.p4_frame.clone(), EF::default(), MairNormal::attr_value());
ttbr_el1_write(1, self.p4_frame.clone());
tlb_invalidate_all();
fn dealloc_frame(target: usize) {
dealloc_frame(target)
// execute f in the new context
let ret = f(active_table);
// restore recursive mapping to original p4 table
p4_table[RECURSIVE_INDEX] = backup;
ttbr_el1_write(1, old_frame);
tlb_invalidate_all();
ret
})
}
}
impl InactivePageTable0 {
fn map_kernel(&mut self) {
let table = unsafe { &mut *ROOT_PAGE_TABLE };
let e0 = table[KERNEL_PML4].clone();
assert!(!e0.is_unused());
self.edit(|_| {
table[KERNEL_PML4].set_frame(Frame::containing_address(e0.addr()), EF::default(), MairNormal::attr_value());
});
}
/// Activate as kernel page table (TTBR0).
/// Used in `arch::memory::remap_the_kernel()`.
pub unsafe fn activate_as_kernel(&self) {
@ -291,7 +249,7 @@ impl InactivePageTable0 {
impl Drop for InactivePageTable0 {
fn drop(&mut self) {
info!("PageTable dropping: {:?}", self);
Self::dealloc_frame(self.p4_frame.start_address().as_u64() as usize);
dealloc_frame(self.p4_frame.start_address().as_u64() as usize);
}
}

@ -2,7 +2,7 @@ use core::{slice, mem};
use riscv::{addr::*, register::sstatus};
use ucore_memory::PAGE_SIZE;
use log::*;
use crate::memory::{active_table, FRAME_ALLOCATOR, init_heap, MemoryArea, MemoryAttr, MemorySet, MEMORY_ALLOCATOR};
use crate::memory::{active_table, FRAME_ALLOCATOR, init_heap, MemoryArea, MemoryAttr, MemorySet, MEMORY_ALLOCATOR, Linear};
use crate::consts::{MEMORY_OFFSET, MEMORY_END, KERN_VA_BASE};
use riscv::register::satp;
@ -48,14 +48,9 @@ fn init_frame_allocator() {
use bit_allocator::BitAlloc;
use core::ops::Range;
// TODO: delete debug code
let mut ba = FRAME_ALLOCATOR.lock();
let range = to_range((end as usize) - KERN_VA_BASE + PAGE_SIZE, MEMORY_END);
info!("FrameAllocator insert {} .. {}", range.start, range.end);
ba.insert(range);
info!("FrameAllocator init end");
// DEBUG: trace code
trace!("init_frame_allocator: alloc={:x?}", ba.alloc());
/*
* @param:
@ -75,31 +70,15 @@ fn init_frame_allocator() {
}
/// Remap the kernel memory address with 4K page recorded in p1 page table
#[cfg(all(target_arch = "riscv32", not(feature = "no_mmu")))]
fn remap_the_kernel() {
let mut ms = MemorySet::new_bare();
#[cfg(feature = "no_bbl")]
ms.push(MemoryArea::new_identity(0x10000000, 0x10000008, MemoryAttr::default(), "serial"));
ms.push(MemoryArea::new_identity(stext as usize, etext as usize, MemoryAttr::default().execute().readonly(), "text"));
ms.push(MemoryArea::new_identity(sdata as usize, edata as usize, MemoryAttr::default(), "data"));
ms.push(MemoryArea::new_identity(srodata as usize, erodata as usize, MemoryAttr::default().readonly(), "rodata"));
ms.push(MemoryArea::new_identity(bootstack as usize, bootstacktop as usize, MemoryAttr::default(), "stack"));
ms.push(MemoryArea::new_identity(sbss as usize, ebss as usize, MemoryAttr::default(), "bss"));
unsafe { ms.activate(); }
unsafe { SATP = ms.token(); }
mem::forget(ms);
}
#[cfg(all(target_arch = "riscv64", not(feature = "no_mmu")))]
#[cfg(not(feature = "no_mmu"))]
fn remap_the_kernel() {
let offset = -(super::consts::KERN_VA_BASE as isize);
let mut ms = MemorySet::new_bare();
#[cfg(feature = "no_bbl")]
ms.push(MemoryArea::new_identity(0x0000_0000_1000_0000, 0x0000_0000_1000_0008, MemoryAttr::default(), "serial"));
ms.push(MemoryArea::new_identity(stext as usize, etext as usize, MemoryAttr::default().execute().readonly(), "text"));
ms.push(MemoryArea::new_identity(sdata as usize, edata as usize, MemoryAttr::default(), "data"));
ms.push(MemoryArea::new_identity(srodata as usize, erodata as usize, MemoryAttr::default().readonly(), "rodata"));
ms.push(MemoryArea::new_identity(bootstack as usize, bootstacktop as usize, MemoryAttr::default(), "stack"));
ms.push(MemoryArea::new_identity(sbss as usize, ebss as usize, MemoryAttr::default(), "bss"));
ms.push(stext as usize, etext as usize, Linear::new(offset, MemoryAttr::default().execute().readonly()), "text");
ms.push(sdata as usize, edata as usize, Linear::new(offset, MemoryAttr::default()), "data");
ms.push(srodata as usize, erodata as usize, Linear::new(offset, MemoryAttr::default().readonly()), "rodata");
ms.push(bootstack as usize, bootstacktop as usize, Linear::new(offset, MemoryAttr::default()), "stack");
ms.push(sbss as usize, ebss as usize, Linear::new(offset, MemoryAttr::default()), "bss");
unsafe { ms.activate(); }
unsafe { SATP = ms.token(); }
mem::forget(ms);

@ -22,8 +22,6 @@ pub struct ActivePageTable(RecursivePageTable<'static>, PageEntry);
pub struct PageEntry(PageTableEntry, Page);
impl PageTable for ActivePageTable {
type Entry = PageEntry;
/*
* @param:
* addr: the virtual addr to be matched
@ -33,7 +31,7 @@ impl PageTable for ActivePageTable {
* @retval:
* the matched PageEntry
*/
fn map(&mut self, addr: usize, target: usize) -> &mut PageEntry {
fn map(&mut self, addr: usize, target: usize) -> &mut Entry {
// use riscv::paging:Mapper::map_to,
// map the 4K `page` to the 4K `frame` with `flags`
let flags = EF::VALID | EF::READABLE | EF::WRITABLE;
@ -66,7 +64,7 @@ impl PageTable for ActivePageTable {
* a mutable PageEntry reference of 'addr'
*/
#[cfg(target_arch = "riscv32")]
fn get_entry(&mut self, vaddr: usize) -> Option<&mut PageEntry> {
fn get_entry(&mut self, vaddr: usize) -> Option<&mut Entry> {
let p2_table = unsafe { ROOT_PAGE_TABLE.as_mut().unwrap() };
let page = Page::of_addr(VirtAddr::new(vaddr));
if !p2_table[page.p2_index()].flags().contains(EF::VALID) {
@ -86,7 +84,7 @@ impl PageTable for ActivePageTable {
* a mutable PageEntry reference of 'addr'
*/
#[cfg(target_arch = "riscv64")]
fn get_entry(&mut self, vaddr: usize) -> Option<&mut PageEntry> {
fn get_entry(&mut self, vaddr: usize) -> Option<&mut Entry> {
let vaddr = VirtAddr::new(vaddr);
let page = Page::of_addr(vaddr);
@ -99,45 +97,9 @@ impl PageTable for ActivePageTable {
self.1 = PageEntry(entry, page);
Some(&mut self.1)
}
/*
* @param:
* addr:the input (virutal) address
* @brief:
* get the addr's memory page slice
* @retval:
* a mutable reference slice of 'addr' 's page
*/
fn get_page_slice_mut<'a, 'b>(&'a mut self, addr: usize) -> &'b mut [u8] {
use core::slice;
unsafe {
slice::from_raw_parts_mut((addr & !(PAGE_SIZE - 1)) as *mut u8, PAGE_SIZE)
}
}
/*
* @param:
* addr: virtual address
* @brief:
* get the address's content
* @retval:
* the content(u8) of 'addr'
*/
fn read(&mut self, addr: usize) -> u8 {
unsafe { *(addr as *const u8) }
}
/*
* @param:
* addr: virtual address
* @brief:
* write the address's content
*/
fn write(&mut self, addr: usize, data: u8) {
unsafe { *(addr as *mut u8) = data; }
}
}
impl PageTableExt for ActivePageTable {}
#[cfg(target_arch = "riscv32")]
fn edit_entry_of<T>(page: &Page, f: impl FnOnce(&mut PageTableEntry) -> T) -> T {
@ -213,59 +175,15 @@ const ROOT_PAGE_TABLE: *mut RvPageTable =
impl ActivePageTable {
pub unsafe fn new() -> Self {
let rv = ActivePageTable(
ActivePageTable(
RecursivePageTable::new(&mut *ROOT_PAGE_TABLE).unwrap(),
::core::mem::zeroed());
rv
}
/*
* @param:
* frame: the target physical frame which will be temporarily mapped
* f: the function you would like to apply for once
* @brief:
* do something on the target physical frame?
*/
#[cfg(target_arch = "riscv64")]
fn with_temporary_map(&mut self, frame: &Frame, f: impl FnOnce(&mut ActivePageTable, &mut RvPageTable)) {
// Create a temporary page
let page = Page::of_addr(VirtAddr::new(0xffffdeadcafebabe));
assert!(self.0.translate_page(page).is_none(), "temporary page is already mapped");
// Map it to table
self.map(page.start_address().as_usize(), frame.start_address().as_usize());
// Call f
let table = unsafe { &mut *(page.start_address().as_usize() as *mut _) };
f(self, table);
// Unmap the page
self.unmap(0xffffdeadcafebabe);
}
#[cfg(target_arch = "riscv32")]
fn with_temporary_map(&mut self, frame: &Frame, f: impl FnOnce(&mut ActivePageTable, &mut RvPageTable)) {
// Create a temporary page
let page = Page::of_addr(VirtAddr::new(0xcafebabe));
assert!(self.0.translate_page(page).is_none(), "temporary page is already mapped");
// Map it to table
self.map(page.start_address().as_usize(), frame.start_address().as_usize());
// Call f
let table = unsafe { &mut *(page.start_address().as_usize() as *mut _) };
f(self, table);
// Unmap the page
self.unmap(0xcafebabe);
::core::mem::zeroed()
)
}
}
/// implementation for the Entry trait in /crate/memory/src/paging/mod.rs
impl Entry for PageEntry {
// TODO: merge below two
#[cfg(target_arch = "riscv64")]
fn update(&mut self) {
edit_entry_of(&self.1, |entry| *entry = self.0);
sfence_vma(0, self.1.start_address());
}
#[cfg(target_arch = "riscv32")]
fn update(&mut self) {
edit_entry_of(&self.1, |entry| *entry = self.0);
sfence_vma(0, self.1.start_address());
@ -298,8 +216,8 @@ impl Entry for PageEntry {
fn set_user(&mut self, value: bool) { self.0.flags_mut().set(EF::USER, value); }
fn execute(&self) -> bool { self.0.flags().contains(EF::EXECUTABLE) }
fn set_execute(&mut self, value: bool) { self.0.flags_mut().set(EF::EXECUTABLE, value); }
fn mmio(&self) -> bool { unimplemented!() }
fn set_mmio(&mut self, value: bool) { unimplemented!() }
fn mmio(&self) -> bool { false }
fn set_mmio(&mut self, value: bool) { }
}
#[derive(Debug)]
@ -310,29 +228,10 @@ pub struct InactivePageTable0 {
impl InactivePageTable for InactivePageTable0 {
type Active = ActivePageTable;
/*
* @brief:
* get a new pagetable (for a new process or thread)
* @retbal:
* the new pagetable
*/
fn new() -> Self {
let mut pt = Self::new_bare();
pt.map_kernel();
pt
}
/*
* @brief:
* allocate a new frame and then self-mapping it and regard it as the inactivepagetale
* retval:
* the inactive page table
*/
fn new_bare() -> Self {
let frame = Self::alloc_frame().map(|target| Frame::of_addr(PhysAddr::new(target)))
.expect("failed to allocate frame");
let mut at = active_table();
at.with_temporary_map(&frame, |_, table: &mut RvPageTable| {
let target = alloc_frame().expect("failed to allocate frame");
let frame = Frame::of_addr(PhysAddr::new(target));
active_table().with_temporary_map(target, |_, table: &mut RvPageTable| {
table.zero();
table.set_recursive(RECURSIVE_INDEX, frame.clone());
});
@ -340,74 +239,36 @@ impl InactivePageTable for InactivePageTable0 {
}
/*
* @param:
* f: a function to do something with the temporary modified activate page table
* @brief:
* temporarily make current `active_table`'s recursive entry point to
* `this` inactive table, so we can modify this inactive page table.
* map the kernel code memory address (p2 page table) in the new inactive page table according the current active page table
*/
fn edit(&mut self, f: impl FnOnce(&mut Self::Active)) {
active_table().with_temporary_map(&satp::read().frame(), |active_table, root_table: &mut RvPageTable| {
let backup = root_table[RECURSIVE_INDEX].clone();
// overwrite recursive mapping
root_table[RECURSIVE_INDEX].set(self.root_frame.clone(), EF::VALID);
sfence_vma_all();
// execute f in the new context
f(active_table);
#[cfg(target_arch = "riscv32")]
fn map_kernel(&mut self) {
let table = unsafe { &mut *ROOT_PAGE_TABLE };
let e0 = table[0x40];
let e1 = table[KERNEL_P2_INDEX];
let e2 = table[KERNEL_P2_INDEX + 1];
let e3 = table[KERNEL_P2_INDEX + 2];
// restore recursive mapping to original p2 table
root_table[RECURSIVE_INDEX] = backup;
sfence_vma_all();
self.edit(|_| {
table[0x40] = e0;
table[KERNEL_P2_INDEX] = e1;
table[KERNEL_P2_INDEX + 1] = e2;
table[KERNEL_P2_INDEX + 2] = e3;
});
}
/*
* @brief:
* active self as the current active page table
*/
unsafe fn activate(&self) {
let old_frame = satp::read().frame();
let new_frame = self.root_frame.clone();
debug!("switch table {:x?} -> {:x?}", old_frame, new_frame);
if old_frame != new_frame {
satp::set(SATP_MODE, 0, new_frame);
sfence_vma_all();
}
}
#[cfg(target_arch = "riscv64")]
fn map_kernel(&mut self) {
let table = unsafe { &mut *ROOT_PAGE_TABLE };
let e1 = table[KERNEL_P4_INDEX];
assert!(!e1.is_unused());
/*
* @param:
* f: the function to run when temporarily activate self as current page table
* @brief:
* Temporarily activate self and run the process, and return the return value of f
* @retval:
* the return value of f
*/
unsafe fn with<T>(&self, f: impl FnOnce() -> T) -> T {
let old_frame = satp::read().frame();
let new_frame = self.root_frame.clone();
debug!("switch table {:x?} -> {:x?}", old_frame, new_frame);
if old_frame != new_frame {
satp::set(SATP_MODE, 0, new_frame);
sfence_vma_all();
}
let target = f();
debug!("switch table {:x?} -> {:x?}", new_frame, old_frame);
if old_frame != new_frame {
satp::set(SATP_MODE, 0, old_frame);
sfence_vma_all();
}
target
self.edit(|_| {
table[KERNEL_P4_INDEX] = e1;
});
}
/*
* @brief:
* get the token of self, the token is self's pagetable frame's starting physical address
* @retval:
* self token
*/
#[cfg(target_arch = "riscv32")]
fn token(&self) -> usize {
self.root_frame.number() | (1 << 31) // as satp
@ -421,61 +282,49 @@ impl InactivePageTable for InactivePageTable0 {
satp
}
fn alloc_frame() -> Option<usize> {
alloc_frame()
unsafe fn set_token(token: usize) {
asm!("csrw 0x180, $0" :: "r"(token) :: "volatile");
}
fn dealloc_frame(target: usize) {
dealloc_frame(target)
}
fn active_token() -> usize {
satp::read().bits()
}
#[cfg(target_arch = "riscv32")]
const SATP_MODE: satp::Mode = satp::Mode::Sv32;
#[cfg(target_arch = "riscv64")]
const SATP_MODE: satp::Mode = satp::Mode::Sv48;
fn flush_tlb() {
sfence_vma_all();
}
impl InactivePageTable0 {
/*
* @param:
* f: a function to do something with the temporary modified activate page table
* @brief:
* map the kernel code memory address (p2 page table) in the new inactive page table according the current active page table
* temporarily make current `active_table`'s recursive entry point to
* `this` inactive table, so we can modify this inactive page table.
*/
#[cfg(target_arch = "riscv32")]
fn map_kernel(&mut self) {
let table = unsafe { &mut *ROOT_PAGE_TABLE };
let e0 = table[0x40];
let e1 = table[KERNEL_P2_INDEX];
// for larger heap memory
let e2 = table[KERNEL_P2_INDEX + 1];
let e3 = table[KERNEL_P2_INDEX + 2];
assert!(!e1.is_unused());
assert!(!e2.is_unused());
assert!(!e3.is_unused());
fn edit<T>(&mut self, f: impl FnOnce(&mut Self::Active) -> T) -> T {
let target = satp::read().frame().start_address().as_usize();
active_table().with_temporary_map(target, |active_table, root_table: &mut RvPageTable| {
let backup = root_table[RECURSIVE_INDEX].clone();
self.edit(|_| {
table[0x40] = e0;
table[KERNEL_P2_INDEX].set(e1.frame(), EF::VALID | EF::GLOBAL);
// for larger heap memroy
table[KERNEL_P2_INDEX + 1].set(e2.frame(), EF::VALID | EF::GLOBAL);
table[KERNEL_P2_INDEX + 2].set(e3.frame(), EF::VALID | EF::GLOBAL);
});
}
// overwrite recursive mapping
root_table[RECURSIVE_INDEX].set(self.root_frame.clone(), EF::VALID);
sfence_vma_all();
#[cfg(target_arch = "riscv64")]
fn map_kernel(&mut self) {
let table = unsafe { &mut *ROOT_PAGE_TABLE };
let e1 = table[KERNEL_P4_INDEX];
assert!(!e1.is_unused());
// execute f in the new context
let ret = f(active_table);
self.edit(|_| {
table[KERNEL_P4_INDEX] = e1;
});
// restore recursive mapping to original p2 table
root_table[RECURSIVE_INDEX] = backup;
sfence_vma_all();
ret
})
}
}
impl Drop for InactivePageTable0 {
fn drop(&mut self) {
Self::dealloc_frame(self.root_frame.start_address().as_usize());
dealloc_frame(self.root_frame.start_address().as_usize());
}
}

@ -1,4 +1,3 @@
use bit_allocator::{BitAlloc, BitAlloc64K};
// Depends on kernel
use crate::memory::{active_table, alloc_frame, dealloc_frame};
use spin::{Mutex, MutexGuard};
@ -44,9 +43,7 @@ pub struct ActivePageTable(RecursivePageTable<'static>);
pub struct PageEntry(PageTableEntry);
impl PageTable for ActivePageTable {
type Entry = PageEntry;
fn map(&mut self, addr: usize, target: usize) -> &mut PageEntry {
fn map(&mut self, addr: usize, target: usize) -> &mut Entry {
let flags = EF::PRESENT | EF::WRITABLE | EF::NO_EXECUTE;
self.0.map_to(Page::of_addr(addr), Frame::of_addr(target), flags, &mut FrameAllocatorForX86)
.unwrap().flush();
@ -58,44 +55,21 @@ impl PageTable for ActivePageTable {
flush.flush();
}
fn get_entry(&mut self, addr: usize) -> Option<&mut PageEntry> {
fn get_entry(&mut self, addr: usize) -> Option<&mut Entry> {
for level in 0..3 {
let entry = get_entry_ptr(addr, 4 - level);
if unsafe { !(*entry).present() } { return None; }
}
unsafe { Some(&mut *(get_entry_ptr(addr, 1))) }
}
fn get_page_slice_mut<'a, 'b>(&'a mut self, addr: usize) -> &'b mut [u8] {
use core::slice;
unsafe { slice::from_raw_parts_mut((addr & !0xfffusize) as *mut u8, PAGE_SIZE) }
}
fn read(&mut self, addr: usize) -> u8 {
unsafe { *(addr as *const u8) }
}
fn write(&mut self, addr: usize, data: u8) {
unsafe { *(addr as *mut u8) = data; }
}
}
impl PageTableExt for ActivePageTable {}
impl ActivePageTable {
pub unsafe fn new() -> Self {
ActivePageTable(RecursivePageTable::new(&mut *(0xffffffff_fffff000 as *mut _)).unwrap())
}
fn with_temporary_map(&mut self, frame: &Frame, f: impl FnOnce(&mut ActivePageTable, &mut x86PageTable)) {
// Create a temporary page
let page = Page::of_addr(0xcafebabe);
assert!(self.0.translate_page(page).is_none(), "temporary page is already mapped");
// Map it to table
self.map(page.start_address().as_u64() as usize, frame.start_address().as_u64() as usize);
// Call f
let table = unsafe { &mut *page.start_address().as_mut_ptr() };
f(self, table);
// Unmap the page
self.unmap(0xcafebabe);
}
}
impl Entry for PageEntry {
@ -142,8 +116,8 @@ impl Entry for PageEntry {
}
fn execute(&self) -> bool { !self.0.flags().contains(EF::NO_EXECUTE) }
fn set_execute(&mut self, value: bool) { self.as_flags().set(EF::NO_EXECUTE, !value); }
fn mmio(&self) -> bool { unimplemented!() }
fn set_mmio(&mut self, value: bool) { unimplemented!() }
fn mmio(&self) -> bool { false }
fn set_mmio(&mut self, value: bool) { }
}
fn get_entry_ptr(addr: usize, level: u8) -> *mut PageEntry {
@ -166,16 +140,10 @@ pub struct InactivePageTable0 {
impl InactivePageTable for InactivePageTable0 {
type Active = ActivePageTable;
fn new() -> Self {
let mut pt = Self::new_bare();
pt.map_kernel();
pt
}
fn new_bare() -> Self {
let frame = Self::alloc_frame().map(|target| Frame::of_addr(target))
.expect("failed to allocate frame");
active_table().with_temporary_map(&frame, |_, table: &mut x86PageTable| {
let target = alloc_frame().expect("failed to allocate frame");
let frame = Frame::of_addr(target);
active_table().with_temporary_map(target, |_, table: &mut x86PageTable| {
table.zero();
// set up recursive mapping for the table
table[511].set_frame(frame.clone(), EF::PRESENT | EF::WRITABLE);
@ -183,78 +151,58 @@ impl InactivePageTable for InactivePageTable0 {
InactivePageTable0 { p4_frame: frame }
}
fn edit(&mut self, f: impl FnOnce(&mut Self::Active)) {
active_table().with_temporary_map(&Cr3::read().0, |active_table, p4_table: &mut x86PageTable| {
let backup = p4_table[0o777].clone();
// overwrite recursive mapping
p4_table[0o777].set_frame(self.p4_frame.clone(), EF::PRESENT | EF::WRITABLE);
tlb::flush_all();
// execute f in the new context
f(active_table);
// restore recursive mapping to original p4 table
p4_table[0o777] = backup;
tlb::flush_all();
fn map_kernel(&mut self) {
let mut table = unsafe { &mut *(0xffffffff_fffff000 as *mut x86PageTable) };
// Kernel at 0xffff_ff00_0000_0000
// Kernel stack at 0x0000_57ac_0000_0000 (defined in bootloader crate)
let e510 = table[510].clone();
let estack = table[175].clone();
self.edit(|_| {
table[510].set_addr(e510.addr(), e510.flags() | EF::GLOBAL);
table[175].set_addr(estack.addr(), estack.flags() | EF::GLOBAL);
});
}
unsafe fn activate(&self) {
let old_frame = Cr3::read().0;
let new_frame = self.p4_frame.clone();
debug!("switch table {:?} -> {:?}", old_frame, new_frame);
if old_frame != new_frame {
Cr3::write(new_frame, Cr3Flags::empty());
}
}
unsafe fn with<T>(&self, f: impl FnOnce() -> T) -> T {
let old_frame = Cr3::read().0;
let new_frame = self.p4_frame.clone();
debug!("switch table {:?} -> {:?}", old_frame, new_frame);
if old_frame != new_frame {
Cr3::write(new_frame, Cr3Flags::empty());
}
let ret = f();
debug!("switch table {:?} -> {:?}", new_frame, old_frame);
if old_frame != new_frame {
Cr3::write(old_frame, Cr3Flags::empty());
}
ret
}
fn token(&self) -> usize {
self.p4_frame.start_address().as_u64() as usize // as CR3
}
fn alloc_frame() -> Option<usize> {
alloc_frame()
unsafe fn set_token(token: usize) {
Cr3::write(Frame::containing_address(PhysAddr::new(token as u64)), Cr3Flags::empty());
}
fn dealloc_frame(target: usize) {
dealloc_frame(target)
fn active_token() -> usize {
Cr3::read().0.start_address().as_u64() as usize
}
fn flush_tlb() {
tlb::flush_all();
}
impl InactivePageTable0 {
fn map_kernel(&mut self) {
let mut table = unsafe { &mut *(0xffffffff_fffff000 as *mut x86PageTable) };
// Kernel at 0xffff_ff00_0000_0000
// Kernel stack at 0x0000_57ac_0000_0000 (defined in bootloader crate)
let e510 = table[510].clone();
let estack = table[175].clone();
self.edit(|_| {
table[510].set_addr(e510.addr(), e510.flags() | EF::GLOBAL);
table[175].set_addr(estack.addr(), estack.flags() | EF::GLOBAL);
});
fn edit<T>(&mut self, f: impl FnOnce(&mut Self::Active) -> T) -> T {
let target = Cr3::read().0.start_address().as_u64() as usize;
active_table().with_temporary_map(target, |active_table, p4_table: &mut x86PageTable| {
let backup = p4_table[0o777].clone();
// overwrite recursive mapping
p4_table[0o777].set_frame(self.p4_frame.clone(), EF::PRESENT | EF::WRITABLE);
tlb::flush_all();
// execute f in the new context
let ret = f(active_table);
// restore recursive mapping to original p4 table
p4_table[0o777] = backup;
tlb::flush_all();
ret
})
}
}
impl Drop for InactivePageTable0 {
fn drop(&mut self) {
info!("PageTable dropping: {:?}", self);
Self::dealloc_frame(self.p4_frame.start_address().as_u64() as usize);
dealloc_frame(self.p4_frame.start_address().as_u64() as usize);
}
}

@ -77,13 +77,12 @@ impl Stdin {
pub fn pop(&self) -> char {
// QEMU v3.0 don't support M-mode external interrupt (bug?)
// So we have to use polling.
// TODO: serial interrupt on aarch64
#[cfg(any(feature = "m_mode", target_arch = "aarch64"))]
#[cfg(feature = "m_mode")]
loop {
let c = crate::arch::io::getchar();
if c != '\0' { return c; }
}
#[cfg(not(any(feature = "m_mode", target_arch = "aarch64")))]
#[cfg(not(feature = "m_mode"))]
loop {
let ret = self.buf.lock().pop_front();
match ret {

@ -1,10 +1,10 @@
pub use crate::arch::paging::*;
use bit_allocator::{BitAlloc, BitAlloc4K, BitAlloc64K, BitAlloc1M};
use bit_allocator::BitAlloc;
use crate::consts::MEMORY_OFFSET;
use super::HEAP_ALLOCATOR;
use ucore_memory::{*, paging::PageTable};
use ucore_memory::cow::CowExt;
pub use ucore_memory::memory_set::{MemoryArea, MemoryAttr, InactivePageTable};
pub use ucore_memory::memory_set::{MemoryArea, MemoryAttr, handler::*};
use ucore_memory::swap::*;
use crate::process::{process};
use crate::sync::{SpinNoIrqLock, SpinNoIrq, MutexGuard};
@ -21,19 +21,15 @@ pub type MemorySet = ucore_memory::no_mmu::MemorySet<NoMMUSupportImpl>;
// x86_64 support up to 256M memory
#[cfg(target_arch = "x86_64")]
pub type FrameAlloc = BitAlloc64K;
pub type FrameAlloc = bit_allocator::BitAlloc64K;
// RISCV32 has 8M memory
#[cfg(target_arch = "riscv32")]
pub type FrameAlloc = BitAlloc4K;
// RISCV64 has 8M memory.
#[cfg(target_arch = "riscv64")]
pub type FrameAlloc = BitAlloc4K;
// RISCV has 8M memory
#[cfg(any(target_arch = "riscv32", target_arch = "riscv64"))]
pub type FrameAlloc = bit_allocator::BitAlloc4K;
// Raspberry Pi 3 has 1G memory
#[cfg(target_arch = "aarch64")]
pub type FrameAlloc = BitAlloc1M;
pub type FrameAlloc = bit_allocator::BitAlloc1M;
lazy_static! {
pub static ref FRAME_ALLOCATOR: SpinNoIrqLock<FrameAlloc> = SpinNoIrqLock::new(FrameAlloc::default());
@ -50,37 +46,30 @@ pub fn active_table() -> MutexGuard<'static, CowExt<ActivePageTable>, SpinNoIrq>
ACTIVE_TABLE.lock()
}
// Page table for swap in and out
lazy_static!{
static ref ACTIVE_TABLE_SWAP: SpinNoIrqLock<SwapExt<ActivePageTable, fifo::FifoSwapManager, mock_swapper::MockSwapper>> =
SpinNoIrqLock::new(unsafe{SwapExt::new(ActivePageTable::new(), fifo::FifoSwapManager::default(), mock_swapper::MockSwapper::default())});
}
pub fn active_table_swap() -> MutexGuard<'static, SwapExt<ActivePageTable, fifo::FifoSwapManager, mock_swapper::MockSwapper>, SpinNoIrq>{
ACTIVE_TABLE_SWAP.lock()
}
#[derive(Debug, Clone, Copy)]
pub struct GlobalFrameAlloc;
/*
* @brief:
* allocate a free physical frame, if no free frame, then swap out one page and reture mapped frame as the free one
* @retval:
* the physical address for the allocated frame
*/
pub fn alloc_frame() -> Option<usize> {
impl FrameAllocator for GlobalFrameAlloc {
fn alloc(&self) -> Option<usize> {
// get the real address of the alloc frame
let mut ret = FRAME_ALLOCATOR.lock();
let ret = ret.alloc();
let ret = ret.map(|id| id * PAGE_SIZE + MEMORY_OFFSET);
let ret = FRAME_ALLOCATOR.lock().alloc().map(|id| id * PAGE_SIZE + MEMORY_OFFSET);
trace!("Allocate frame: {:x?}", ret);
ret
//do we need : unsafe { ACTIVE_TABLE_SWAP.force_unlock(); } ???
// Some(ret.unwrap_or_else(|| active_table_swap().swap_out_any::<InactivePageTable0>().ok().expect("fail to swap out page")))
// TODO: try to swap out when alloc failed
}
pub fn dealloc_frame(target: usize) {
fn dealloc(&self, target: usize) {
trace!("Deallocate frame: {:x}", target);
FRAME_ALLOCATOR.lock().dealloc((target - MEMORY_OFFSET) / PAGE_SIZE);
}
}
pub fn alloc_frame() -> Option<usize> {
GlobalFrameAlloc.alloc()
}
pub fn dealloc_frame(target: usize) {
GlobalFrameAlloc.dealloc(target);
}
pub struct KernelStack(usize);
const STACK_SIZE: usize = 0x8000;
@ -104,44 +93,12 @@ impl Drop for KernelStack {
}
/*
* @param:
* addr: the virtual address of the page fault
* @brief:
* handle page fault
* @retval:
* Return true to continue, false to halt
*/
/// Handle page fault at `addr`.
/// Return true to continue, false to halt.
#[cfg(not(feature = "no_mmu"))]
pub fn page_fault_handler(addr: usize) -> bool {
info!("start handling swap in/out page fault, badva={:x}", addr);
//unsafe { ACTIVE_TABLE_SWAP.force_unlock(); }
/*LAB3 EXERCISE 1: YOUR STUDENT NUMBER
* handle the frame deallocated
*/
info!("get pt from processor()");
if process().memory_set.find_area(addr).is_none(){
return false;
}
let pt = process().memory_set.get_page_table_mut();
info!("pt got");
if active_table_swap().page_fault_handler(pt as *mut InactivePageTable0, addr, true, || alloc_frame().expect("fail to alloc frame")){
return true;
}
//////////////////////////////////////////////////////////////////////////////
// Handle copy on write (not being used now)
/*
unsafe { ACTIVE_TABLE.force_unlock(); }
if active_table().page_fault_handler(addr, || alloc_frame().expect("fail to alloc frame")){
return true;
}
*/
false
process().memory_set.page_fault_handler(addr)
}
pub fn init_heap() {

@ -1,18 +1,16 @@
use crate::arch::interrupt::{TrapFrame, Context as ArchContext};
use crate::memory::{MemoryArea, MemoryAttr, MemorySet, KernelStack, active_table_swap, alloc_frame, InactivePageTable0};
use xmas_elf::{ElfFile, header, program::{Flags, ProgramHeader, Type, SegmentData}};
use core::fmt::{Debug, Error, Formatter};
use alloc::{boxed::Box, collections::BTreeMap, vec::Vec, sync::Arc, string::String};
use ucore_memory::{Page};
use ucore_memory::memory_set::*;
use ucore_process::Context;
use alloc::{boxed::Box, collections::BTreeMap, string::String, sync::Arc, vec::Vec};
use log::*;
use simple_filesystem::file::File;
use spin::Mutex;
use log::*;
use ucore_process::Context;
use xmas_elf::{ElfFile, header, program::{Flags, ProgramHeader, SegmentData, Type}};
use crate::arch::interrupt::{Context as ArchContext, TrapFrame};
use crate::memory::{ByFrame, Delay, FrameAllocator, GlobalFrameAlloc, KernelStack, MemoryArea, MemoryAttr, MemorySet};
// TODO: avoid pub
pub struct ContextImpl {
pub struct Process {
pub arch: ArchContext,
pub memory_set: MemorySet,
pub kstack: KernelStack,
@ -20,17 +18,17 @@ pub struct ContextImpl {
pub cwd: String,
}
impl Context for ContextImpl {
impl Context for Process {
unsafe fn switch_to(&mut self, target: &mut Context) {
use core::mem::transmute;
let (target, _): (&mut ContextImpl, *const ()) = transmute(target);
let (target, _): (&mut Process, *const ()) = transmute(target);
self.arch.switch(&mut target.arch);
}
}
impl ContextImpl {
impl Process {
pub unsafe fn new_init() -> Box<Context> {
Box::new(ContextImpl {
Box::new(Process {
arch: ArchContext::null(),
memory_set: MemorySet::new(),
kstack: KernelStack::new(),
@ -42,7 +40,7 @@ impl ContextImpl {
pub fn new_kernel(entry: extern fn(usize) -> !, arg: usize) -> Box<Context> {
let memory_set = MemorySet::new();
let kstack = KernelStack::new();
Box::new(ContextImpl {
Box::new(Process {
arch: unsafe { ArchContext::new_kernel_thread(entry, arg, kstack.top(), memory_set.token()) },
memory_set,
kstack,
@ -52,7 +50,7 @@ impl ContextImpl {
}
/// Make a new user thread from ELF data
pub fn new_user<'a, Iter>(data: &[u8], args: Iter) -> Box<ContextImpl>
pub fn new_user<'a, Iter>(data: &[u8], args: Iter) -> Box<Process>
where Iter: Iterator<Item=&'a str>
{
// Parse elf
@ -82,7 +80,7 @@ impl ContextImpl {
true => (USER32_STACK_OFFSET, USER32_STACK_OFFSET + USER_STACK_SIZE),
false => (USER_STACK_OFFSET, USER_STACK_OFFSET + USER_STACK_SIZE),
};
memory_set.push(MemoryArea::new(ustack_buttom, ustack_top, MemoryAttr::default().user(), "user_stack"));
memory_set.push(ustack_buttom, ustack_top, ByFrame::new(MemoryAttr::default().user(), GlobalFrameAlloc), "user_stack");
ustack_top
};
#[cfg(feature = "no_mmu")]
@ -96,10 +94,7 @@ impl ContextImpl {
let kstack = KernelStack::new();
//set the user Memory pages in the memory set swappable
memory_set_map_swappable(&mut memory_set);
Box::new(ContextImpl {
Box::new(Process {
arch: unsafe {
ArchContext::new_user_thread(
entry_addr, ustack_top, kstack.top(), is32, memory_set.token())
@ -131,10 +126,7 @@ impl ContextImpl {
info!("temporary copy data!");
let kstack = KernelStack::new();
memory_set_map_swappable(&mut memory_set);
info!("FORK() finsihed!");
Box::new(ContextImpl {
Box::new(Process {
arch: unsafe { ArchContext::new_fork(tf, kstack.top(), memory_set.token()) },
memory_set,
kstack,
@ -144,34 +136,6 @@ impl ContextImpl {
}
}
#[cfg(not(feature = "no_mmu"))]
#[cfg(not(target_arch = "aarch64"))]
impl Drop for ContextImpl {
fn drop(&mut self){
info!("come in to drop for ContextImpl");
//set the user Memory pages in the memory set unswappable
let Self {ref mut arch, ref mut memory_set, ref mut kstack, ..} = self;
let pt = {
memory_set.get_page_table_mut() as *mut InactivePageTable0
};
for area in memory_set.iter(){
for page in Page::range_of(area.get_start_addr(), area.get_end_addr()) {
let addr = page.start_address();
unsafe {
active_table_swap().remove_from_swappable(pt, addr, || alloc_frame().expect("alloc frame failed"));
}
}
}
debug!("Finishing setting pages unswappable");
}
}
impl Debug for ContextImpl {
fn fmt(&self, f: &mut Formatter) -> Result<(), Error> {
write!(f, "{:x?}", self.arch)
}
}
/// Push a slice at the stack. Return the new sp.
unsafe fn push_slice<T: Copy>(mut sp: usize, vs: &[T]) -> usize {
use core::{mem::{size_of, align_of}, slice};
@ -201,7 +165,7 @@ unsafe fn push_args_at_stack<'a, Iter>(args: Iter, stack_top: usize) -> usize
/// Generate a MemorySet according to the ELF file.
/// Also return the real entry point address.
fn memory_set_from<'a>(elf: &'a ElfFile<'a>) -> (MemorySet, usize) {
fn memory_set_from(elf: &ElfFile<'_>) -> (MemorySet, usize) {
debug!("come in to memory_set_from");
let mut ms = MemorySet::new();
let mut entry = elf.header.pt2.entry_point() as usize;
@ -240,7 +204,7 @@ fn memory_set_from<'a>(elf: &'a ElfFile<'a>) -> (MemorySet, usize) {
info!("area @ {:?}, size = {:#x}", target.as_ptr(), mem_size);
#[cfg(not(feature = "no_mmu"))]
let target = {
ms.push(MemoryArea::new(virt_addr, virt_addr + mem_size, memory_attr_from(ph.flags()), ""));
ms.push(virt_addr, virt_addr + mem_size, ByFrame::new(memory_attr_from(ph.flags()), GlobalFrameAlloc), "");
unsafe { ::core::slice::from_raw_parts_mut(virt_addr as *mut u8, mem_size) }
};
// Copy data
@ -262,30 +226,3 @@ fn memory_attr_from(elf_flags: Flags) -> MemoryAttr {
if elf_flags.is_execute() { flags = flags.execute(); }
flags
}
/*
* @param:
* memory_set: the target MemorySet to set swappable
* @brief:
* map the memory area in the memory_set swappalbe, specially for the user process
*/
#[cfg(not(any(feature = "no_mmu", target_arch = "aarch64")))]
pub fn memory_set_map_swappable(memory_set: &mut MemorySet) {
info!("COME INTO memory set map swappable!");
let pt = unsafe {
memory_set.get_page_table_mut() as *mut InactivePageTable0
};
for area in memory_set.iter(){
for page in Page::range_of(area.get_start_addr(), area.get_end_addr()) {
let addr = page.start_address();
unsafe { active_table_swap().set_swappable(pt, addr); }
}
}
info!("Finishing setting pages swappable");
}
#[cfg(any(feature = "no_mmu", target_arch = "aarch64"))]
pub fn memory_set_map_swappable(memory_set: &mut MemorySet) {
// FIXME: Page Fault on aarch64
// NOTE: This function may disappear after refactor memory crate
}

@ -1,5 +1,5 @@
use spin::Mutex;
pub use self::context::ContextImpl;
pub use self::context::Process;
pub use ucore_process::*;
use crate::consts::{MAX_CPU_NUM, MAX_PROCESS_NUM};
use crate::arch::cpu;
@ -17,16 +17,18 @@ pub fn init() {
unsafe {
for cpu_id in 0..MAX_CPU_NUM {
PROCESSORS[cpu_id].init(cpu_id, ContextImpl::new_init(), manager.clone());
PROCESSORS[cpu_id].init(cpu_id, Process::new_init(), manager.clone());
}
}
// Add idle threads
extern fn idle(_arg: usize) -> ! {
loop { cpu::halt(); }
}
// TODO: make #idle_thr equal to #cpu
for i in 0..1 {
manager.add(ContextImpl::new_kernel(idle, i), 0);
use core::str::FromStr;
let cores = usize::from_str(env!("SMP")).unwrap();
for i in 0..cores {
manager.add(Process::new_kernel(idle, i), 0);
}
crate::shell::run_user_shell();
@ -36,9 +38,11 @@ pub fn init() {
static PROCESSORS: [Processor; MAX_CPU_NUM] = [Processor::new(), Processor::new(), Processor::new(), Processor::new(), Processor::new(), Processor::new(), Processor::new(), Processor::new()];
/// Get current thread struct
pub fn process() -> &'static mut ContextImpl {
///
/// FIXME: It's obviously unsafe to get &mut !
pub fn process() -> &'static mut Process {
use core::mem::transmute;
let (process, _): (&mut ContextImpl, *const ()) = unsafe {
let (process, _): (&mut Process, *const ()) = unsafe {
transmute(processor().context())
};
process
@ -54,5 +58,5 @@ pub fn processor() -> &'static Processor {
#[no_mangle]
pub fn new_kernel_context(entry: extern fn(usize) -> !, arg: usize) -> Box<Context> {
ContextImpl::new_kernel(entry, arg)
Process::new_kernel(entry, arg)
}

@ -7,10 +7,12 @@ use crate::process::*;
pub fn run_user_shell() {
if let Ok(inode) = ROOT_INODE.lookup("sh") {
println!("Going to user mode shell.");
println!("Use 'ls' to list available programs.");
let data = inode.read_as_vec().unwrap();
processor().manager().add(ContextImpl::new_user(data.as_slice(), "sh".split(' ')), 0);
processor().manager().add(Process::new_user(data.as_slice(), "sh".split(' ')), 0);
} else {
processor().manager().add(ContextImpl::new_kernel(shell, 0), 0);
processor().manager().add(Process::new_kernel(shell, 0), 0);
}
}
@ -27,7 +29,7 @@ pub extern fn shell(_arg: usize) -> ! {
let name = cmd.split(' ').next().unwrap();
if let Ok(file) = ROOT_INODE.lookup(name) {
let data = file.read_as_vec().unwrap();
let pid = processor().manager().add(ContextImpl::new_user(data.as_slice(), cmd.split(' ')), thread::current().id());
let pid = processor().manager().add(Process::new_user(data.as_slice(), cmd.split(' ')), thread::current().id());
unsafe { thread::JoinHandle::<()>::_of(pid) }.join().unwrap();
} else {
println!("Program not exist");

@ -208,7 +208,7 @@ fn sys_exec(name: *const u8, argc: usize, argv: *const *const u8, tf: &mut TrapF
// Make new Context
let iter = args.iter().map(|s| s.as_str());
let mut context = ContextImpl::new_user(buf.as_slice(), iter);
let mut context = Process::new_user(buf.as_slice(), iter);
// Activate new page table
unsafe { context.memory_set.activate(); }

Loading…
Cancel
Save