remove arch/riscv64 directory

master
WangRunji 6 years ago
parent f954c2fd6a
commit 02bd2b2317

@ -6,11 +6,11 @@
OUTPUT_ARCH(riscv)
ENTRY(_start)
BASE_ADDRESS = 0x80020000;
BASE_ADDRESS = 0x40020000;
SECTIONS
{
. = 0x80000000;
. = 0x40000000;
.boot : {
KEEP(*(.text.boot))
}

@ -1,57 +0,0 @@
--- atomic_backup.rs 2018-10-06 19:59:14.000000000 +0800
+++ atomic.rs 2018-10-26 14:34:31.000000000 +0800
@@ -125,6 +125,9 @@
#[cfg(target_has_atomic = "8")]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct AtomicBool {
+ #[cfg(any(target_arch = "riscv32", target_arch = "riscv64"))]
+ v: UnsafeCell<u32>,
+ #[cfg(not(any(target_arch = "riscv32", target_arch = "riscv64")))]
v: UnsafeCell<u8>,
}
@@ -265,6 +268,44 @@
pub const ATOMIC_BOOL_INIT: AtomicBool = AtomicBool::new(false);
#[cfg(target_has_atomic = "8")]
+#[cfg(any(target_arch = "riscv32", target_arch = "riscv64"))]
+impl AtomicBool {
+ ///
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const fn new(v: bool) -> AtomicBool {
+ AtomicBool { v: UnsafeCell::new(v as u32) }
+ }
+
+ ///
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn load(&self, order: Ordering) -> bool {
+ unsafe { atomic_load(self.v.get(), order) != 0 }
+ }
+
+ ///
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn store(&self, val: bool, order: Ordering) {
+ unsafe { atomic_store(self.v.get(), val as u32, order); }
+ }
+
+ ///
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[cfg(target_has_atomic = "cas")]
+ pub fn compare_and_swap(&self, current: bool, new: bool, order: Ordering) -> bool {
+ loop {
+ if let Ok(val) = unsafe { atomic_compare_exchange(self.v.get(), current as u32, new as u32, order, order) } {
+ return val != 0;
+ }
+ }
+ }
+}
+
+#[cfg(target_has_atomic = "8")]
+#[cfg(not(any(target_arch = "riscv32", target_arch = "riscv64")))]
impl AtomicBool {
/// Creates a new `AtomicBool`.
///

@ -1,16 +0,0 @@
.section .text.boot
boot:
csrwi 0x304, 0 # mie
csrwi 0x344, 0 # mip
csrwi 0x340, 0 # mscratch
csrwi 0x180, 0 # satp
li t0, -1
csrw 0x302, t0 # medeleg
csrw 0x303, t0 # mideleg
csrw 0x306, t0 # mcounteren
csrw 0x106, t0 # scounteren
li t0, 1 << 11 # MPP = S
csrw 0x300, t0 # mstatus
lui t0, 0x80020
csrw 0x341, t0 # mepc
mret

@ -1,19 +0,0 @@
.section .text.entry
.globl _start
_start:
add t0, a0, 1
slli t0, t0, 16
lui sp, %hi(bootstack)
addi sp, sp, %lo(bootstack)
add sp, sp, t0
call rust_main
.section .bss
.align 12 #PGSHIFT
.global bootstack
bootstack:
.space 4096 * 16 * 8
.global bootstacktop
bootstacktop:

@ -1,127 +0,0 @@
# Constants / Macros defined in Rust code:
# xscratch
# xstatus
# xepc
# xcause
# xtval
# XRET
.macro SAVE_ALL
# If coming from userspace, preserve the user stack pointer and load
# the kernel stack pointer. If we came from the kernel, sscratch
# will contain 0, and we should continue on the current stack.
csrrw sp, (xscratch), sp
bnez sp, _save_context
_restore_kernel_sp:
csrr sp, (xscratch)
# sscratch = previous-sp, sp = kernel-sp
_save_context:
# provide room for trap frame
addi sp, sp, -36 * 4
# save x registers except x2 (sp)
sw x1, 1*4(sp)
sw x3, 3*4(sp)
# tp(x4) = hartid. DON'T change.
# sw x4, 4*4(sp)
sw x5, 5*4(sp)
sw x6, 6*4(sp)
sw x7, 7*4(sp)
sw x8, 8*4(sp)
sw x9, 9*4(sp)
sw x10, 10*4(sp)
sw x11, 11*4(sp)
sw x12, 12*4(sp)
sw x13, 13*4(sp)
sw x14, 14*4(sp)
sw x15, 15*4(sp)
sw x16, 16*4(sp)
sw x17, 17*4(sp)
sw x18, 18*4(sp)
sw x19, 19*4(sp)
sw x20, 20*4(sp)
sw x21, 21*4(sp)
sw x22, 22*4(sp)
sw x23, 23*4(sp)
sw x24, 24*4(sp)
sw x25, 25*4(sp)
sw x26, 26*4(sp)
sw x27, 27*4(sp)
sw x28, 28*4(sp)
sw x29, 29*4(sp)
sw x30, 30*4(sp)
sw x31, 31*4(sp)
# get sp, sstatus, sepc, stval, scause
# set sscratch = 0
csrrw s0, (xscratch), x0
csrr s1, (xstatus)
csrr s2, (xepc)
csrr s3, (xtval)
csrr s4, (xcause)
# store sp, sstatus, sepc, sbadvaddr, scause
sw s0, 2*4(sp)
sw s1, 32*4(sp)
sw s2, 33*4(sp)
sw s3, 34*4(sp)
sw s4, 35*4(sp)
.endm
.macro RESTORE_ALL
lw s1, 32*4(sp) # s1 = sstatus
lw s2, 33*4(sp) # s2 = sepc
andi s0, s1, 1 << 8
bnez s0, _restore_context # back to S-mode? (sstatus.SPP = 1)
_save_kernel_sp:
addi s0, sp, 36*4
csrw (xscratch), s0 # sscratch = kernel-sp
_restore_context:
# restore sstatus, sepc
csrw (xstatus), s1
csrw (xepc), s2
# restore x registers except x2 (sp)
lw x1, 1*4(sp)
lw x3, 3*4(sp)
# lw x4, 4*4(sp)
lw x5, 5*4(sp)
lw x6, 6*4(sp)
lw x7, 7*4(sp)
lw x8, 8*4(sp)
lw x9, 9*4(sp)
lw x10, 10*4(sp)
lw x11, 11*4(sp)
lw x12, 12*4(sp)
lw x13, 13*4(sp)
lw x14, 14*4(sp)
lw x15, 15*4(sp)
lw x16, 16*4(sp)
lw x17, 17*4(sp)
lw x18, 18*4(sp)
lw x19, 19*4(sp)
lw x20, 20*4(sp)
lw x21, 21*4(sp)
lw x22, 22*4(sp)
lw x23, 23*4(sp)
lw x24, 24*4(sp)
lw x25, 25*4(sp)
lw x26, 26*4(sp)
lw x27, 27*4(sp)
lw x28, 28*4(sp)
lw x29, 29*4(sp)
lw x30, 30*4(sp)
lw x31, 31*4(sp)
# restore sp last
lw x2, 2*4(sp)
.endm
.section .text
.globl __alltraps
__alltraps:
SAVE_ALL
mv a0, sp
jal rust_trap
.globl __trapret
__trapret:
RESTORE_ALL
# return from supervisor call
XRET

@ -1,38 +0,0 @@
// http://llvm.org/docs/Atomics.html#libcalls-atomic
int __atomic_load_4(int *src) {
int res = 0;
__asm__ __volatile__("amoadd.w.rl %0, zero, (%1)" : "=r"(res) : "r"(src) : "memory");
return res;
}
int __atomic_store_4(int *dst, int val) {
__asm__ __volatile__("amoswap.w.aq zero, %0, (%1)" :: "r"(val), "r"(dst) : "memory");
}
char __atomic_compare_exchange_4(int* dst, int* expected, int desired) {
int val;
// val = *dst
__asm__ __volatile__("lr.w %0, (%1)" : "=r"(val) : "r"(dst) : "memory");
if (val == *expected) {
int result;
// Try: *dst = desired. If success, result = 0, otherwise result != 0.
__asm__ __volatile__("sc.w %0, %1, (%2)" : "=r"(result) : "r"(desired), "r"(dst) : "memory");
return result == 0;
}
// *expected should always equal to the previous value of *dst
*expected = val;
return 0;
}
int __atomic_fetch_add_4(int* ptr, int val) {
int res;
__asm__ __volatile__("amoadd.w.rl %0, %1, (%2)" : "=r"(res) : "r"(val), "r"(ptr) : "memory");
return res;
}
int __atomic_fetch_sub_4(int* ptr, int val) {
int res;
__asm__ __volatile__("amoadd.w.rl %0, %1, (%2)" : "=r"(res) : "r"(-val), "r"(ptr) : "memory");
return res;
}

@ -1,25 +0,0 @@
//! Workaround for missing compiler-builtin symbols
//!
//! [atomic](http://llvm.org/docs/Atomics.html#libcalls-atomic)
/// Copy from:
/// https://github.com/rust-lang-nursery/compiler-builtins/blob/master/src/riscv32.rs
#[no_mangle]
pub extern fn __mulsi3(mut a: u32, mut b: u32) -> u32 {
let mut r: u32 = 0;
while a > 0 {
if a & 1 > 0 {
r += b;
}
a >>= 1;
b <<= 1;
}
r
}
#[no_mangle]
pub extern fn abort() {
loop {}
}

@ -1,14 +0,0 @@
// Physical address available on THINPAD:
// [0x80000000, 0x80800000]
const P2_SIZE: usize = 1 << 22;
const P2_MASK: usize = 0x3ff << 22;
pub const RECURSIVE_INDEX: usize = 0x3fe;
pub const KERNEL_OFFSET: usize = 0;
pub const KERNEL_P2_INDEX: usize = 0x8000_0000 >> 22;
pub const KERNEL_HEAP_SIZE: usize = 0x00a0_0000;
pub const MEMORY_OFFSET: usize = 0x8000_0000;
//pub const MEMORY_END: usize = 0x8080_0000; //for thinpad not enough now
pub const MEMORY_END: usize = 0x8100_0000;
pub const USER_STACK_OFFSET: usize = 0x70000000;
pub const USER_STACK_SIZE: usize = 0x10000;
pub const USER32_STACK_OFFSET: usize = USER_STACK_OFFSET;

@ -1,277 +0,0 @@
#[cfg(feature = "m_mode")]
use riscv::register::{
mstatus as xstatus,
mstatus::Mstatus as Xstatus,
mcause::Mcause,
};
#[cfg(not(feature = "m_mode"))]
use riscv::register::{
sstatus as xstatus,
sstatus::Sstatus as Xstatus,
mcause::Mcause,
};
#[derive(Clone)]
#[repr(C)]
pub struct TrapFrame {
pub x: [usize; 32], // general registers
pub sstatus: Xstatus, // Supervisor Status Register
pub sepc: usize, // Supervisor exception program counter, save the trap virtual address (here is used to save the process program entry addr?)
pub stval: usize, // Supervisor trap value
pub scause: Mcause, // scause register: record the cause of exception/interrupt/trap
}
/// Generate the trapframe for building new thread in kernel
impl TrapFrame {
/*
* @param:
* entry: program entry for the thread
* arg: a0
* sp: stack top
* @brief:
* generate a trapfram for building a new kernel thread
* @retval:
* the trapframe for new kernel thread
*/
fn new_kernel_thread(entry: extern fn(usize) -> !, arg: usize, sp: usize) -> Self {
use core::mem::zeroed;
let mut tf: Self = unsafe { zeroed() };
tf.x[10] = arg; // a0
tf.x[2] = sp;
tf.sepc = entry as usize;
tf.sstatus = xstatus::read();
tf.sstatus.set_xpie(true);
tf.sstatus.set_xie(false);
#[cfg(feature = "m_mode")]
tf.sstatus.set_mpp(xstatus::MPP::Machine);
#[cfg(not(feature = "m_mode"))]
tf.sstatus.set_spp(xstatus::SPP::Supervisor);
tf
}
/*
* @param:
* entry_addr: program entry for the thread
* sp: stack top
* @brief:
* generate a trapfram for building a new user thread
* @retval:
* the trapframe for new user thread
*/
fn new_user_thread(entry_addr: usize, sp: usize) -> Self {
use core::mem::zeroed;
let mut tf: Self = unsafe { zeroed() };
tf.x[2] = sp;
tf.sepc = entry_addr;
tf.sstatus = xstatus::read();
tf.sstatus.set_xpie(true);
tf.sstatus.set_xie(false);
#[cfg(feature = "m_mode")]
tf.sstatus.set_mpp(xstatus::MPP::User);
#[cfg(not(feature = "m_mode"))]
tf.sstatus.set_spp(xstatus::SPP::User);
tf
}
}
use core::fmt::{Debug, Formatter, Error};
impl Debug for TrapFrame {
fn fmt(&self, f: &mut Formatter) -> Result<(), Error> {
struct Regs<'a>(&'a [usize; 32]);
impl<'a> Debug for Regs<'a> {
fn fmt(&self, f: &mut Formatter) -> Result<(), Error> {
const REG_NAME: [&str; 32] = [
"zero", "ra", "sp", "gp", "tp", "t0", "t1", "t2",
"s0", "s1", "a0", "a1", "a2", "a3", "a4", "a5", "a6", "a7",
"s2", "s3", "s4", "s5", "s6", "s7", "s8", "s9", "s10", "s11",
"t3", "t4", "t5", "t6"];
f.debug_map().entries(REG_NAME.iter().zip(self.0)).finish()
}
}
f.debug_struct("TrapFrame")
.field("regs", &Regs(&self.x))
.field("sstatus", &self.sstatus)
.field("sepc", &self.sepc)
.field("stval", &self.stval)
.field("scause", &self.scause)
.finish()
}
}
/// kernel stack contents for a new thread
#[derive(Debug)]
#[repr(C)]
pub struct InitStack {
context: ContextData,
tf: TrapFrame,
}
impl InitStack {
/*
* @param:
* stack_top: the pointer to kernel stack stop
* @brief:
* save the InitStack on the kernel stack stop
* @retval:
* a Context with ptr in it
*/
unsafe fn push_at(self, stack_top: usize) -> Context {
let ptr = (stack_top as *mut Self).offset(-1); //real kernel stack top
*ptr = self;
Context(ptr as usize)
}
}
extern {
fn __trapret();
}
#[derive(Debug, Default)]
#[repr(C)]
struct ContextData {
ra: usize,
satp: usize,
s: [usize; 12],
}
impl ContextData {
fn new(satp: usize) -> Self {
// satp(asid) just like cr3, save the physical address for Page directory?
ContextData { ra: __trapret as usize, satp, ..ContextData::default() }
}
}
/// A struct only contain one usize element
#[derive(Debug)]
pub struct Context(usize);
impl Context {
/// Switch to another kernel thread.
///
/// Defined in `trap.asm`.
///
/// Push all callee-saved registers at the current kernel stack.
/// Store current sp, switch to target.
/// Pop all callee-saved registers, then return to the target.
#[naked]
#[inline(never)]
pub unsafe extern fn switch(&mut self, target: &mut Self) {
asm!(
"
// save from's registers
addi sp, sp, -4*14
sw sp, 0(a0)
sw ra, 0*4(sp)
sw s0, 2*4(sp)
sw s1, 3*4(sp)
sw s2, 4*4(sp)
sw s3, 5*4(sp)
sw s4, 6*4(sp)
sw s5, 7*4(sp)
sw s6, 8*4(sp)
sw s7, 9*4(sp)
sw s8, 10*4(sp)
sw s9, 11*4(sp)
sw s10, 12*4(sp)
sw s11, 13*4(sp)
csrrs s11, 0x180, x0 // satp
sw s11, 1*4(sp)
// restore to's registers
lw sp, 0(a1)
lw s11, 1*4(sp)
csrrw x0, 0x180, s11 // satp
lw ra, 0*4(sp)
lw s0, 2*4(sp)
lw s1, 3*4(sp)
lw s2, 4*4(sp)
lw s3, 5*4(sp)
lw s4, 6*4(sp)
lw s5, 7*4(sp)
lw s6, 8*4(sp)
lw s7, 9*4(sp)
lw s8, 10*4(sp)
lw s9, 11*4(sp)
lw s10, 12*4(sp)
lw s11, 13*4(sp)
addi sp, sp, 4*14
sw zero, 0(a1)
ret"
: : : : "volatile" )
}
/*
* @brief:
* generate a null Context
* @retval:
* a null Context
*/
pub unsafe fn null() -> Self {
Context(0)
}
/*
* @param:
* entry: program entry for the thread
* arg: a0
* kstack_top: kernel stack top
* cr3: cr3 register, save the physical address of Page directory
* @brief:
* generate the content of kernel stack for the new kernel thread and save it's address at kernel stack top - 1
* @retval:
* a Context struct with the pointer to the kernel stack top - 1 as its only element
*/
pub unsafe fn new_kernel_thread(entry: extern fn(usize) -> !, arg: usize, kstack_top: usize, cr3: usize) -> Self {
InitStack {
context: ContextData::new(cr3),
tf: TrapFrame::new_kernel_thread(entry, arg, kstack_top),
}.push_at(kstack_top)
}
/*
* @param:
* entry_addr: program entry for the thread
* ustack_top: user stack top
* kstack_top: kernel stack top
* is32: whether the cpu is 32 bit or not
* cr3: cr3 register, save the physical address of Page directory
* @brief:
* generate the content of kernel stack for the new user thread and save it's address at kernel stack top - 1
* @retval:
* a Context struct with the pointer to the kernel stack top - 1 as its only element
*/
pub unsafe fn new_user_thread(entry_addr: usize, ustack_top: usize, kstack_top: usize, is32: bool, cr3: usize) -> Self {
InitStack {
context: ContextData::new(cr3),
tf: TrapFrame::new_user_thread(entry_addr, ustack_top),
}.push_at(kstack_top)
}
/*
* @param:
* TrapFrame: the trapframe of the forked process(thread)
* kstack_top: kernel stack top
* cr3: cr3 register, save the physical address of Page directory
* @brief:
* fork and generate a new process(thread) Context according to the TrapFrame and save it's address at kernel stack top - 1
* @retval:
* a Context struct with the pointer to the kernel stack top - 1 as its only element
*/
pub unsafe fn new_fork(tf: &TrapFrame, kstack_top: usize, cr3: usize) -> Self {
InitStack {
context: ContextData::new(cr3),
tf: {
let mut tf = tf.clone();
// fork function's ret value, the new process is 0
tf.x[10] = 0; // a0
tf
},
}.push_at(kstack_top)
}
/// Called at a new user context
/// To get the init TrapFrame in sys_exec
pub unsafe fn get_init_tf(&self) -> TrapFrame {
(*(self.0 as *const InitStack)).tf.clone()
}
}

@ -1,36 +0,0 @@
use crate::consts::MAX_CPU_NUM;
use core::ptr::{read_volatile, write_volatile};
use crate::memory::*;
static mut STARTED: [bool; MAX_CPU_NUM] = [false; MAX_CPU_NUM];
pub unsafe fn set_cpu_id(cpu_id: usize) {
asm!("mv tp, $0" : : "r"(cpu_id));
}
pub fn id() -> usize {
let cpu_id;
unsafe { asm!("mv $0, tp" : "=r"(cpu_id)); }
cpu_id
}
pub fn send_ipi(cpu_id: usize) {
bbl::sbi::send_ipi(1 << cpu_id);
}
pub unsafe fn has_started(cpu_id: usize) -> bool {
read_volatile(&STARTED[cpu_id])
}
pub unsafe fn start_others(hart_mask: usize) {
for cpu_id in 0..MAX_CPU_NUM {
if (hart_mask >> cpu_id) & 1 != 0 {
write_volatile(&mut STARTED[cpu_id], true);
}
}
}
pub fn halt() {
use riscv::asm::wfi;
unsafe { wfi() }
}

@ -1,171 +0,0 @@
#[cfg(feature = "m_mode")]
use riscv::register::{
mstatus as xstatus,
mscratch as xscratch,
mtvec as xtvec,
};
#[cfg(not(feature = "m_mode"))]
use riscv::register::{
sstatus as xstatus,
sscratch as xscratch,
stvec as xtvec,
};
use riscv::register::{mcause, mepc, sie};
pub use self::context::*;
use crate::memory::{MemorySet, InactivePageTable0};
use log::*;
#[path = "context.rs"]
mod context;
/*
* @brief:
* initialize the interrupt status
*/
pub fn init() {
extern {
fn __alltraps();
}
unsafe {
// Set sscratch register to 0, indicating to exception vector that we are
// presently executing in the kernel
xscratch::write(0);
// Set the exception vector address
xtvec::write(__alltraps as usize, xtvec::TrapMode::Direct);
// Enable IPI
sie::set_ssoft();
// Enable serial interrupt
sie::set_sext();
// NOTE: In M-mode: mie.MSIE is set by BBL.
// mie.MEIE can not be set in QEMU v3.0
// (seems like a bug)
}
info!("interrupt: init end");
}
/*
* @brief:
* enable interrupt
*/
#[inline(always)]
pub unsafe fn enable() {
xstatus::set_xie();
}
/*
* @brief:
* store and disable interrupt
* @retbal:
* a usize value store the origin sie
*/
#[inline(always)]
pub unsafe fn disable_and_store() -> usize {
let e = xstatus::read().xie() as usize;
xstatus::clear_xie();
e
}
/*
* @param:
* flags: input flag
* @brief:
* enable interrupt if flags != 0
*/
#[inline(always)]
pub unsafe fn restore(flags: usize) {
if flags != 0 {
xstatus::set_xie();
}
}
/*
* @param:
* TrapFrame: the trapFrame of the Interrupt/Exception/Trap to be processed
* @brief:
* process the Interrupt/Exception/Trap
*/
#[no_mangle]
pub extern fn rust_trap(tf: &mut TrapFrame) {
use self::mcause::{Trap, Interrupt as I, Exception as E};
trace!("Interrupt @ CPU{}: {:?} ", super::cpu::id(), tf.scause.cause());
match tf.scause.cause() {
// M-mode only
Trap::Interrupt(I::MachineExternal) => serial(),
Trap::Interrupt(I::MachineSoft) => ipi(),
Trap::Interrupt(I::MachineTimer) => timer(),
Trap::Exception(E::MachineEnvCall) => sbi(tf),
Trap::Interrupt(I::SupervisorExternal) => serial(),
Trap::Interrupt(I::SupervisorSoft) => ipi(),
Trap::Interrupt(I::SupervisorTimer) => timer(),
Trap::Exception(E::IllegalInstruction) => illegal_inst(tf),
Trap::Exception(E::UserEnvCall) => syscall(tf),
Trap::Exception(E::LoadPageFault) => page_fault(tf),
Trap::Exception(E::StorePageFault) => page_fault(tf),
Trap::Exception(E::InstructionPageFault) => page_fault(tf),
_ => crate::trap::error(tf),
}
trace!("Interrupt end");
}
/// Call BBL SBI functions for M-mode kernel
fn sbi(tf: &mut TrapFrame) {
(super::BBL.mcall_trap)(tf.x.as_ptr(), tf.scause.bits(), tf.sepc);
tf.sepc += 4;
}
fn serial() {
crate::trap::serial(super::io::getchar());
}
fn ipi() {
debug!("IPI");
bbl::sbi::clear_ipi();
}
/*
* @brief:
* process timer interrupt
*/
fn timer() {
super::timer::set_next();
crate::trap::timer();
}
/*
* @param:
* TrapFrame: the Trapframe for the syscall
* @brief:
* process syscall
*/
fn syscall(tf: &mut TrapFrame) {
tf.sepc += 4; // Must before syscall, because of fork.
let ret = crate::syscall::syscall(tf.x[10], [tf.x[11], tf.x[12], tf.x[13], tf.x[14], tf.x[15], tf.x[16]], tf);
tf.x[10] = ret as usize;
}
/*
* @param:
* TrapFrame: the Trapframe for the illegal inst exception
* @brief:
* process IllegalInstruction exception
*/
fn illegal_inst(tf: &mut TrapFrame) {
(super::BBL.illegal_insn_trap)(tf.x.as_ptr(), tf.scause.bits(), tf.sepc);
tf.sepc = mepc::read();
}
/*
* @param:
* TrapFrame: the Trapframe for the page fault exception
* @brief:
* process page fault exception
*/
fn page_fault(tf: &mut TrapFrame) {
let addr = tf.stval;
trace!("\nEXCEPTION: Page Fault @ {:#x}", addr);
if !crate::memory::page_fault_handler(addr) {
crate::trap::error(tf);
}
}

@ -1,60 +0,0 @@
use core::fmt::{Write, Result, Arguments};
use core::ptr::{read_volatile, write_volatile};
use bbl::sbi;
struct SerialPort;
impl Write for SerialPort {
fn write_str(&mut self, s: &str) -> Result {
for c in s.bytes() {
if c == 127 {
putchar(8);
putchar(b' ');
putchar(8);
} else {
putchar(c);
}
}
Ok(())
}
}
fn putchar(c: u8) {
if cfg!(feature = "no_bbl") {
unsafe {
while read_volatile(STATUS) & CAN_WRITE == 0 {}
write_volatile(DATA, c as u8);
}
} else if cfg!(feature = "m_mode") {
(super::BBL.mcall_console_putchar)(c);
} else {
sbi::console_putchar(c as usize);
}
}
pub fn getchar() -> char {
let c = if cfg!(feature = "no_bbl") {
unsafe {
// while read_volatile(STATUS) & CAN_READ == 0 {}
read_volatile(DATA)
}
} else if cfg!(feature = "m_mode") {
(super::BBL.mcall_console_getchar)() as u8
} else {
sbi::console_getchar() as u8
};
match c {
255 => '\0', // null
c => c as char,
}
}
pub fn putfmt(fmt: Arguments) {
SerialPort.write_fmt(fmt).unwrap();
}
const DATA: *mut u8 = 0x10000000 as *mut u8;
const STATUS: *const u8 = 0x10000005 as *const u8;
const CAN_READ: u8 = 1 << 0;
const CAN_WRITE: u8 = 1 << 5;

@ -1,110 +0,0 @@
use core::{slice, mem};
use riscv::{addr::*, register::sstatus};
use ucore_memory::PAGE_SIZE;
use log::*;
use crate::memory::{active_table, FRAME_ALLOCATOR, init_heap, MemoryArea, MemoryAttr, MemorySet, MEMORY_ALLOCATOR};
use crate::consts::{MEMORY_OFFSET, MEMORY_END};
#[cfg(feature = "no_mmu")]
pub fn init() {
init_heap();
let heap_bottom = end as usize;
let heap_size = MEMORY_END - heap_bottom;
unsafe { MEMORY_ALLOCATOR.lock().init(heap_bottom, heap_size); }
info!("available memory: [{:#x}, {:#x})", heap_bottom, MEMORY_END);
}
/*
* @brief:
* Init the mermory management module, allow memory access and set up page table and init heap and frame allocator
*/
#[cfg(not(feature = "no_mmu"))]
pub fn init() {
#[repr(align(4096))] // align the PageData struct to 4096 bytes
struct PageData([u8; PAGE_SIZE]);
static PAGE_TABLE_ROOT: PageData = PageData([0; PAGE_SIZE]);
unsafe { sstatus::set_sum(); } // Allow user memory access
let frame = Frame::of_addr(PhysAddr::new(&PAGE_TABLE_ROOT as *const _ as u32));
super::paging::setup_page_table(frame); // set up page table
// initialize heap and Frame allocator
init_frame_allocator();
init_heap();
// remap the kernel use 4K page
remap_the_kernel();
}
pub fn init_other() {
unsafe {
sstatus::set_sum(); // Allow user memory access
asm!("csrw 0x180, $0; sfence.vma" :: "r"(SATP) :: "volatile");
}
}
/*
* @brief:
* Init frame allocator, here use a BitAlloc implemented by segment tree.
*/
fn init_frame_allocator() {
use bit_allocator::BitAlloc;
use core::ops::Range;
let mut ba = FRAME_ALLOCATOR.lock();
ba.insert(to_range(end as usize + PAGE_SIZE, MEMORY_END));
info!("FrameAllocator init end");
/*
* @param:
* start: start address
* end: end address
* @brief:
* transform the memory address to the page number
* @retval:
* the page number range from start address to end address
*/
fn to_range(start: usize, end: usize) -> Range<usize> {
let page_start = (start - MEMORY_OFFSET) / PAGE_SIZE;
let page_end = (end - MEMORY_OFFSET - 1) / PAGE_SIZE + 1;
page_start..page_end
}
}
/*
* @brief:
* remmap the kernel memory address with 4K page recorded in p1 page table
*/
#[cfg(not(feature = "no_mmu"))]
fn remap_the_kernel() {
let mut ms = MemorySet::new_bare();
#[cfg(feature = "no_bbl")]
ms.push(MemoryArea::new_identity(0x10000000, 0x10000008, MemoryAttr::default(), "serial"));
ms.push(MemoryArea::new_identity(stext as usize, etext as usize, MemoryAttr::default().execute().readonly(), "text"));
ms.push(MemoryArea::new_identity(sdata as usize, edata as usize, MemoryAttr::default(), "data"));
ms.push(MemoryArea::new_identity(srodata as usize, erodata as usize, MemoryAttr::default().readonly(), "rodata"));
ms.push(MemoryArea::new_identity(sbss as usize, ebss as usize, MemoryAttr::default(), "bss"));
unsafe { ms.activate(); }
unsafe { SATP = ms.token(); }
mem::forget(ms);
info!("kernel remap end");
}
// First core stores its SATP here.
// Other cores load it later.
static mut SATP: usize = 0;
// Symbols provided by linker script
extern {
fn stext();
fn etext();
fn sdata();
fn edata();
fn srodata();
fn erodata();
fn sbss();
fn ebss();
fn start();
fn end();
fn bootstack();
fn bootstacktop();
}

@ -1,81 +0,0 @@
pub mod io;
pub mod interrupt;
pub mod timer;
pub mod paging;
pub mod memory;
pub mod compiler_rt;
pub mod consts;
pub mod cpu;
#[no_mangle]
pub extern fn rust_main(hartid: usize, dtb: usize, hart_mask: usize, functions: usize) -> ! {
unsafe { cpu::set_cpu_id(hartid); }
unsafe { BBL_FUNCTIONS_PTR = functions as *const _; }
println!("Hello RISCV! in hart {}, dtb @ {:#x}, functions @ {:#x}", hartid, dtb, functions);
if hartid != 0 {
while unsafe { !cpu::has_started(hartid) } { }
others_main();
unreachable!();
}
crate::logging::init();
interrupt::init();
memory::init();
timer::init();
crate::process::init();
unsafe { cpu::start_others(hart_mask); }
crate::kmain();
}
fn others_main() -> ! {
interrupt::init();
memory::init_other();
timer::init();
crate::kmain();
}
/// Constant & Macro for `trap.asm`
#[cfg(feature = "m_mode")]
global_asm!("
.equ xstatus, 0x300
.equ xscratch, 0x340
.equ xepc, 0x341
.equ xcause, 0x342
.equ xtval, 0x343
.macro XRET\n mret\n .endm
");
#[cfg(not(feature = "m_mode"))]
global_asm!("
.equ xstatus, 0x100
.equ xscratch, 0x140
.equ xepc, 0x141
.equ xcause, 0x142
.equ xtval, 0x143
.macro XRET\n sret\n .endm
");
#[cfg(feature = "no_bbl")]
global_asm!(include_str!("boot/boot.asm"));
global_asm!(include_str!("boot/entry.asm"));
global_asm!(include_str!("boot/trap.asm"));
/// Some symbols passed from BBL.
/// Used in M-mode kernel.
#[repr(C)]
struct BBLFunctions {
mcall_trap: BBLTrapHandler,
illegal_insn_trap: BBLTrapHandler,
mcall_console_putchar: extern fn(u8),
mcall_console_getchar: extern fn() -> usize,
}
type BBLTrapHandler = extern fn(regs: *const usize, mcause: usize, mepc: usize);
static mut BBL_FUNCTIONS_PTR: *const BBLFunctions = ::core::ptr::null();
use lazy_static::lazy_static;
lazy_static! {
static ref BBL: BBLFunctions = unsafe { BBL_FUNCTIONS_PTR.read() };
}

@ -1,371 +0,0 @@
use crate::consts::{KERNEL_P2_INDEX, RECURSIVE_INDEX};
// Depends on kernel
use crate::memory::{active_table, alloc_frame, dealloc_frame};
use riscv::addr::*;
use riscv::asm::{sfence_vma, sfence_vma_all};
use riscv::paging::{Mapper, PageTable as RvPageTable, PageTableEntry, PageTableFlags as EF, RecursivePageTable};
use riscv::paging::{FrameAllocator, FrameDeallocator};
use riscv::register::satp;
use ucore_memory::memory_set::*;
use ucore_memory::PAGE_SIZE;
use ucore_memory::paging::*;
use log::*;
/*
* @param:
* Frame: page table root frame
* @brief:
* setup page table in the frame
*/
// need 1 page
pub fn setup_page_table(frame: Frame) {
let p2 = unsafe { &mut *(frame.start_address().as_u32() as *mut RvPageTable) };
p2.zero();
p2.set_recursive(RECURSIVE_INDEX, frame.clone());
// Set kernel identity map
// 0x10000000 ~ 1K area
p2.map_identity(0x40, EF::VALID | EF::READABLE | EF::WRITABLE);
// 0x80000000 ~ 12M area
p2.map_identity(KERNEL_P2_INDEX, EF::VALID | EF::READABLE | EF::WRITABLE | EF::EXECUTABLE);
p2.map_identity(KERNEL_P2_INDEX + 1, EF::VALID | EF::READABLE | EF::WRITABLE | EF::EXECUTABLE);
p2.map_identity(KERNEL_P2_INDEX + 2, EF::VALID | EF::READABLE | EF::WRITABLE | EF::EXECUTABLE);
use riscv::register::satp;
unsafe { satp::set(satp::Mode::Sv32, 0, frame); }
sfence_vma_all();
info!("setup init page table end");
}
pub struct ActivePageTable(RecursivePageTable<'static>);
pub struct PageEntry(PageTableEntry);
impl PageTable for ActivePageTable {
type Entry = PageEntry;
/*
* @param:
* addr: the virtual addr to be matched
* target: the physical addr to be matched with addr
* @brief:
* map the virtual address 'addr' to the physical address 'target' in pagetable.
* @retval:
* the matched PageEntry
*/
fn map(&mut self, addr: usize, target: usize) -> &mut PageEntry {
// the flag for the new page entry
let flags = EF::VALID | EF::READABLE | EF::WRITABLE;
// here page is for the virtual address while frame is for the physical, both of them is 4096 bytes align
let page = Page::of_addr(VirtAddr::new(addr));
let frame = Frame::of_addr(PhysAddr::new(target as u32));
// map the page to the frame using FrameAllocatorForRiscv
// we may need frame allocator to alloc frame for new page table(first/second)
self.0.map_to(page, frame, flags, &mut FrameAllocatorForRiscv)
.unwrap().flush();
self.get_entry(addr).expect("fail to get entry")
}
/*
* @param:
* addr: virtual address of which the mapped physical frame should be unmapped
* @bridf:
^ unmap the virtual addresses' mapped physical frame
*/
fn unmap(&mut self, addr: usize) {
let page = Page::of_addr(VirtAddr::new(addr));
let (frame, flush) = self.0.unmap(page).unwrap();
flush.flush();
}
/*
* @param:
* addr:input virtual address
* @brief:
* get the pageEntry of 'addr'
* @retval:
* a mutable PageEntry reference of 'addr'
*/
fn get_entry(&mut self, addr: usize) -> Option<&mut PageEntry> {
if unsafe { !(*ROOT_PAGE_TABLE)[addr >> 22].flags().contains(EF::VALID) } {
return None;
}
let page = Page::of_addr(VirtAddr::new(addr));
// ???
let _ = self.0.translate_page(page);
let entry_addr = ((addr >> 10) & ((1 << 22) - 4)) | (RECURSIVE_INDEX << 22);
unsafe { Some(&mut *(entry_addr as *mut PageEntry)) }
}
/*
* @param:
* addr:the input (virutal) address
* @brief:
* get the addr's memory page slice
* @retval:
* a mutable reference slice of 'addr' 's page
*/
fn get_page_slice_mut<'a, 'b>(&'a mut self, addr: usize) -> &'b mut [u8] {
use core::slice;
unsafe { slice::from_raw_parts_mut((addr & !(PAGE_SIZE - 1)) as *mut u8, PAGE_SIZE) }
}
/*
* @param:
* addr: virtual address
* @brief:
* get the address's content
* @retval:
* the content(u8) of 'addr'
*/
fn read(&mut self, addr: usize) -> u8 {
unsafe { *(addr as *const u8) }
}
/*
* @param:
* addr: virtual address
* @brief:
* write the address's content
*/
fn write(&mut self, addr: usize, data: u8) {
unsafe { *(addr as *mut u8) = data; }
}
}
// define the ROOT_PAGE_TABLE, and the virtual address of it?
const ROOT_PAGE_TABLE: *mut RvPageTable =
(((RECURSIVE_INDEX << 10) | (RECURSIVE_INDEX + 1)) << 12) as *mut RvPageTable;
impl ActivePageTable {
pub unsafe fn new() -> Self {
ActivePageTable(RecursivePageTable::new(&mut *ROOT_PAGE_TABLE).unwrap())
}
/*
* @param:
* frame: the target physical frame which will be temporarily mapped
* f: the function you would like to apply for once
* @brief:
* do something on the target physical frame?
*/
fn with_temporary_map(&mut self, frame: &Frame, f: impl FnOnce(&mut ActivePageTable, &mut RvPageTable)) {
// Create a temporary page
let page = Page::of_addr(VirtAddr::new(0xcafebabe));
assert!(self.0.translate_page(page).is_none(), "temporary page is already mapped");
// Map it to table
self.map(page.start_address().as_usize(), frame.start_address().as_u32() as usize);
// Call f
let table = unsafe { &mut *(page.start_address().as_usize() as *mut _) };
f(self, table);
// Unmap the page
self.unmap(0xcafebabe);
}
}
/// implementation for the Entry trait in /crate/memory/src/paging/mod.rs
impl Entry for PageEntry {
fn update(&mut self) {
let addr = VirtAddr::new((self as *const _ as usize) << 10);
sfence_vma(0, addr);
}
fn accessed(&self) -> bool { self.0.flags().contains(EF::ACCESSED) }
fn dirty(&self) -> bool { self.0.flags().contains(EF::DIRTY) }
fn writable(&self) -> bool { self.0.flags().contains(EF::WRITABLE) }
fn present(&self) -> bool { self.0.flags().contains(EF::VALID | EF::READABLE) }
fn clear_accessed(&mut self) { self.as_flags().remove(EF::ACCESSED); }
fn clear_dirty(&mut self) { self.as_flags().remove(EF::DIRTY); }
fn set_writable(&mut self, value: bool) { self.as_flags().set(EF::WRITABLE, value); }
fn set_present(&mut self, value: bool) { self.as_flags().set(EF::VALID | EF::READABLE, value); }
fn target(&self) -> usize { self.0.addr().as_u32() as usize }
fn set_target(&mut self, target: usize) {
let flags = self.0.flags();
let frame = Frame::of_addr(PhysAddr::new(target as u32));
self.0.set(frame, flags);
}
fn writable_shared(&self) -> bool { self.0.flags().contains(EF::RESERVED1) }
fn readonly_shared(&self) -> bool { self.0.flags().contains(EF::RESERVED2) }
fn set_shared(&mut self, writable: bool) {
let flags = self.as_flags();
flags.set(EF::RESERVED1, writable);
flags.set(EF::RESERVED2, !writable);
}
fn clear_shared(&mut self) { self.as_flags().remove(EF::RESERVED1 | EF::RESERVED2); }
fn swapped(&self) -> bool { self.0.flags().contains(EF::RESERVED1) }
fn set_swapped(&mut self, value: bool) { self.as_flags().set(EF::RESERVED1, value); }
fn user(&self) -> bool { self.0.flags().contains(EF::USER) }
fn set_user(&mut self, value: bool) { self.as_flags().set(EF::USER, value); }
fn execute(&self) -> bool { self.0.flags().contains(EF::EXECUTABLE) }
fn set_execute(&mut self, value: bool) { self.as_flags().set(EF::EXECUTABLE, value); }
fn mmio(&self) -> bool { unimplemented!() }
fn set_mmio(&mut self, value: bool) { unimplemented!() }
}
impl PageEntry {
fn as_flags(&mut self) -> &mut EF {
unsafe { &mut *(self as *mut _ as *mut EF) }
}
}
#[derive(Debug)]
pub struct InactivePageTable0 {
p2_frame: Frame,
}
impl InactivePageTable for InactivePageTable0 {
type Active = ActivePageTable;
/*
* @brief:
* get a new pagetable (for a new process or thread)
* @retbal:
* the new pagetable
*/
fn new() -> Self {
let mut pt = Self::new_bare();
pt.map_kernel();
pt
}
/*
* @brief:
* allocate a new frame and then self-mapping it and regard it as the inactivepagetale
* retval:
* the inactive page table
*/
fn new_bare() -> Self {
let frame = Self::alloc_frame().map(|target| Frame::of_addr(PhysAddr::new(target as u32)))
.expect("failed to allocate frame");
active_table().with_temporary_map(&frame, |_, table: &mut RvPageTable| {
table.zero();
table.set_recursive(RECURSIVE_INDEX, frame.clone());
});
InactivePageTable0 { p2_frame: frame }
}
/*
* @param:
* f: a function to do something with the temporary modified activate page table
* @brief:
* temporarily map the inactive pagetable as an active p2page and apply f on the temporary modified active page table
*/
fn edit(&mut self, f: impl FnOnce(&mut Self::Active)) {
active_table().with_temporary_map(&satp::read().frame(), |active_table, p2_table: &mut RvPageTable| {
let backup = p2_table[RECURSIVE_INDEX].clone();
// overwrite recursive mapping
p2_table[RECURSIVE_INDEX].set(self.p2_frame.clone(), EF::VALID);
sfence_vma_all();
// execute f in the new context
f(active_table);
// restore recursive mapping to original p2 table
p2_table[RECURSIVE_INDEX] = backup;
sfence_vma_all();
});
}
/*
* @brief:
* active self as the current active page table
*/
unsafe fn activate(&self) {
let old_frame = satp::read().frame();
let new_frame = self.p2_frame.clone();
debug!("switch table {:x?} -> {:x?}", old_frame, new_frame);
if old_frame != new_frame {
satp::set(satp::Mode::Sv32, 0, new_frame);
sfence_vma_all();
}
}
/*
* @param:
* f: the function to run when temporarily activate self as current page table
* @brief:
* Temporarily activate self and run the process, and return the return value of f
* @retval:
* the return value of f
*/
unsafe fn with<T>(&self, f: impl FnOnce() -> T) -> T {
let old_frame = satp::read().frame();
let new_frame = self.p2_frame.clone();
debug!("switch table {:x?} -> {:x?}", old_frame, new_frame);
if old_frame != new_frame {
satp::set(satp::Mode::Sv32, 0, new_frame);
sfence_vma_all();
}
let target = f();
debug!("switch table {:x?} -> {:x?}", new_frame, old_frame);
if old_frame != new_frame {
satp::set(satp::Mode::Sv32, 0, old_frame);
sfence_vma_all();
}
target
}
/*
* @brief:
* get the token of self, the token is self's pagetable frame's starting physical address
* @retval:
* self token
*/
fn token(&self) -> usize {
self.p2_frame.number() | (1 << 31) // as satp
}
fn alloc_frame() -> Option<usize> {
alloc_frame()
}
fn dealloc_frame(target: usize) {
dealloc_frame(target)
}
}
impl InactivePageTable0 {
/*
* @brief:
* map the kernel code memory address (p2 page table) in the new inactive page table according the current active page table
*/
fn map_kernel(&mut self) {
let table = unsafe { &mut *ROOT_PAGE_TABLE };
let e0 = table[0x40];
let e1 = table[KERNEL_P2_INDEX];
assert!(!e1.is_unused());
// for larger heap memroy
let e2 = table[KERNEL_P2_INDEX + 1];
assert!(!e2.is_unused());
let e3 = table[KERNEL_P2_INDEX + 2];
assert!(!e2.is_unused());
self.edit(|_| {
table[0x40] = e0;
table[KERNEL_P2_INDEX].set(e1.frame(), EF::VALID | EF::GLOBAL);
// for larger heap memroy
table[KERNEL_P2_INDEX + 1].set(e2.frame(), EF::VALID | EF::GLOBAL);
table[KERNEL_P2_INDEX + 2].set(e3.frame(), EF::VALID | EF::GLOBAL);
});
}
}
impl Drop for InactivePageTable0 {
fn drop(&mut self) {
info!("PageTable dropping: {:?}", self);
Self::dealloc_frame(self.p2_frame.start_address().as_u32() as usize);
}
}
struct FrameAllocatorForRiscv;
impl FrameAllocator for FrameAllocatorForRiscv {
fn alloc(&mut self) -> Option<Frame> {
alloc_frame().map(|addr| Frame::of_addr(PhysAddr::new(addr as u32)))
}
}
impl FrameDeallocator for FrameAllocatorForRiscv {
fn dealloc(&mut self, frame: Frame) {
dealloc_frame(frame.start_address().as_u32() as usize);
}
}

@ -1,67 +0,0 @@
use riscv::register::*;
use bbl::sbi;
use log::*;
/*
* @brief:
* get timer cycle for 64 bit cpu
*/
#[cfg(target_pointer_width = "64")]
pub fn get_cycle() -> u64 {
time::read() as u64
}
/*
* @brief:
* get timer cycle for 32 bit cpu
*/
#[cfg(target_pointer_width = "32")]
pub fn get_cycle() -> u64 {
loop {
let hi = timeh::read();
let lo = time::read();
let tmp = timeh::read();
if hi == tmp {
return ((hi as u64) << 32) | (lo as u64);
}
}
}
/*
* @brief:
* enable supervisor timer interrupt and set next timer interrupt
*/
pub fn init() {
// Enable supervisor timer interrupt
#[cfg(feature = "m_mode")]
unsafe { mie::set_mtimer(); }
#[cfg(not(feature = "m_mode"))]
unsafe { sie::set_stimer(); }
set_next();
info!("timer: init end");
}
/*
* @brief:
* set the next timer interrupt
*/
pub fn set_next() {
// 100Hz @ QEMU
let timebase = 250000;
set_timer(get_cycle() + timebase);
}
/*
* @brief:
* set time for timer interrupt
*/
fn set_timer(t: u64) {
#[cfg(feature = "no_bbl")]
unsafe {
asm!("csrw 0x321, $0; csrw 0x322, $1"
: : "r"(t as u32), "r"((t >> 32) as u32) : : "volatile");
}
#[cfg(not(feature = "no_bbl"))]
sbi::set_timer(t);
}
Loading…
Cancel
Save