Add MIPS target.

master
Yuhao Zhou 6 years ago
parent e4df6d6788
commit a78916c57e

7
kernel/Cargo.lock generated

@ -227,6 +227,11 @@ name = "managed"
version = "0.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "mips"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "nodrop"
version = "0.1.13"
@ -309,6 +314,7 @@ dependencies = [
"isomorphic_drivers 0.1.0 (git+https://github.com/rcore-os/isomorphic_drivers)",
"lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
"mips 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"once 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
"pc-keyboard 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
"raw-cpuid 6.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
@ -607,6 +613,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
"checksum log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)" = "e19e8d5c34a3e0e2223db8e060f9e8264aeeb5c5fc64a4ee9965c062211c024b"
"checksum log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)" = "c84ec4b527950aa83a329754b01dbe3f58361d1c5efacd1f6d68c494d08a17c6"
"checksum managed 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "fdcec5e97041c7f0f1c5b7d93f12e57293c831c646f4cc7a5db59460c7ea8de6"
"checksum mips 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f79c54c0ff7e933ffe5b3ec7c3c05037b654c334b0c98e66536d2d8906435394"
"checksum nodrop 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)" = "2f9667ddcc6cc8a43afc9b7917599d7216aa09c463919ea32c59ed6cac8bc945"
"checksum once 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "931fb7a4cf34610cf6cbe58d52a8ca5ef4c726d4e2e178abd0dc13a6551c6d73"
"checksum os_bootinfo 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "66481dbeb5e773e7bd85b63cd6042c30786f834338288c5ec4f3742673db360a"

@ -69,6 +69,9 @@ riscv = { git = "https://github.com/rcore-os/riscv", features = ["inline-asm"] }
aarch64 = { git = "https://github.com/rcore-os/aarch64" }
bcm2837 = { git = "https://github.com/rcore-os/bcm2837", optional = true }
[target.'cfg(target_arch = "mipsel")'.dependencies]
mips = "^0.1.0"
[package.metadata.bootimage]
default-target = "targets/x86_64.json"
output = "target/x86_64/bootimage.bin"

@ -15,7 +15,7 @@
# make clean Clean
#
# Options:
# arch = x86_64 | riscv32 | riscv64 | aarch64
# arch = x86_64 | riscv32 | riscv64 | aarch64 | mipsel
# d = int | in_asm | ... QEMU debug info
# mode = debug | release
# LOG = off | error | warn | info | debug | trace
@ -155,6 +155,8 @@ else ifeq ($(arch), riscv32)
prefix := riscv64-unknown-elf-
else ifeq ($(arch), riscv64)
prefix := riscv64-unknown-elf-
else ifeq ($(arch), mipsel)
prefix ?= mipsel-linux-gnu-
else ifeq ($(arch), aarch64)
prefix ?= aarch64-none-elf-
ifeq (,$(shell which $(prefix)ld))
@ -251,6 +253,9 @@ else ifeq ($(arch), riscv64)
cp bbl $(abspath $@)
else ifeq ($(arch), aarch64)
@$(objcopy) $(bootloader) --strip-all -O binary $@
else ifeq ($(arch), mipsel)
# TODO write mipsel bootloader
false
endif
kernel:
@ -271,6 +276,8 @@ else ifeq ($(arch), riscv64)
@cargo xbuild $(build_args)
else ifeq ($(arch), aarch64)
@cargo xbuild $(build_args)
else ifeq ($(arch), mipsel)
@cargo xbuild $(build_args)
endif

@ -0,0 +1,72 @@
--- atomic_backup.rs 2018-10-06 19:59:14.000000000 +0800
+++ atomic.rs 2018-10-26 14:34:31.000000000 +0800
@@ -125,6 +125,9 @@
#[cfg(target_has_atomic = "8")]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct AtomicBool {
+ #[cfg(any(target_arch = "riscv32", target_arch = "riscv64"))]
+ v: UnsafeCell<u32>,
+ #[cfg(not(any(target_arch = "riscv32", target_arch = "riscv64")))]
v: UnsafeCell<u8>,
}
@@ -265,6 +268,59 @@
pub const ATOMIC_BOOL_INIT: AtomicBool = AtomicBool::new(false);
#[cfg(target_has_atomic = "8")]
+#[cfg(any(target_arch = "riscv32", target_arch = "riscv64"))]
+impl AtomicBool {
+ ///
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const fn new(v: bool) -> AtomicBool {
+ AtomicBool { v: UnsafeCell::new(v as u32) }
+ }
+
+ ///
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn load(&self, order: Ordering) -> bool {
+ unsafe { atomic_load(self.v.get(), order) != 0 }
+ }
+
+ ///
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn store(&self, val: bool, order: Ordering) {
+ unsafe { atomic_store(self.v.get(), val as u32, order); }
+ }
+
+ ///
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn compare_and_swap(&self, current: bool, new: bool, order: Ordering) -> bool {
+ match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) {
+ Ok(x) => x,
+ Err(x) => x,
+ }
+ }
+
+ ///
+ #[inline]
+ #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
+ pub fn compare_exchange(&self,
+ current: bool,
+ new: bool,
+ success: Ordering,
+ failure: Ordering)
+ -> Result<bool, bool> {
+ match unsafe {
+ atomic_compare_exchange(self.v.get(), current as u32, new as u32, success, failure)
+ } {
+ Ok(x) => Ok(x != 0),
+ Err(x) => Err(x != 0),
+ }
+ }
+}
+
+#[cfg(target_has_atomic = "8")]
+#[cfg(not(any(target_arch = "riscv32", target_arch = "riscv64")))]
impl AtomicBool {
/// Creates a new `AtomicBool`.
///

@ -0,0 +1,49 @@
/* Copy from bbl-ucore : https://ring00.github.io/bbl-ucore */
/* Simple linker script for the ucore kernel.
See the GNU ld 'info' manual ("info ld") to learn the syntax. */
OUTPUT_ARCH(riscv)
ENTRY(_start)
BASE_ADDRESS = 0xffffffffc0020000;
SECTIONS
{
/* Load the kernel at this address: "." means the current address */
. = BASE_ADDRESS;
start = .;
.text : {
stext = .;
*(.text.entry)
*(.text .text.*)
. = ALIGN(4K);
etext = .;
}
.rodata : {
srodata = .;
*(.rodata .rodata.*)
. = ALIGN(4K);
erodata = .;
}
.data : {
sdata = .;
*(.data .data.*)
edata = .;
}
.stack : {
*(.bss.stack)
}
.bss : {
sbss = .;
*(.bss .bss.*)
ebss = .;
}
PROVIDE(end = .);
}

@ -0,0 +1,18 @@
use super::consts::KERNEL_OFFSET;
/// Mask all external interrupt except serial.
pub unsafe fn init_external_interrupt() {
const HART1_S_MODE_INTERRUPT_ENABLES: *mut u64 = (KERNEL_OFFSET + 0x0C00_2100) as *mut u64;
const SERIAL: u64 = 4;
HART1_S_MODE_INTERRUPT_ENABLES.write(1 << SERIAL);
}
/// Claim and complete external interrupt by reading and writing to
/// PLIC Interrupt Claim/Complete Register.
pub unsafe fn handle_external_interrupt() {
const HART1_S_MODE_INTERRUPT_CLAIM_COMPLETE: *mut u32 = (KERNEL_OFFSET + 0x0C20_2004) as *mut u32;
// claim
let source = HART1_S_MODE_INTERRUPT_CLAIM_COMPLETE.read();
// complete
HART1_S_MODE_INTERRUPT_CLAIM_COMPLETE.write(source);
}

@ -0,0 +1,19 @@
.section .text.entry
.globl _start
_start:
add t0, a0, 1
slli t0, t0, 16
lui sp, %hi(bootstack)
addi sp, sp, %lo(bootstack)
add sp, sp, t0
call rust_main
.section .bss.stack
.align 12 #PGSHIFT
.global bootstack
bootstack:
.space 4096 * 16 * 8
.global bootstacktop
bootstacktop:

@ -0,0 +1,54 @@
/* Copy from bbl-ucore : https://ring00.github.io/bbl-ucore */
/* Simple linker script for the ucore kernel.
See the GNU ld 'info' manual ("info ld") to learn the syntax. */
OUTPUT_ARCH(riscv)
ENTRY(_start)
BASE_ADDRESS = 0xC0020000;
SECTIONS
{
. = 0xC0000000;
.boot : {
KEEP(*(.text.boot))
}
/* Load the kernel at this address: "." means the current address */
. = BASE_ADDRESS;
start = .;
.text : {
stext = .;
*(.text.entry)
*(.text .text.*)
. = ALIGN(4K);
etext = .;
}
.rodata : {
srodata = .;
*(.rodata .rodata.*)
. = ALIGN(4K);
erodata = .;
}
.data : {
sdata = .;
*(.data .data.*)
edata = .;
}
.stack : {
*(.bss.stack)
}
.bss : {
sbss = .;
*(.bss .bss.*)
ebss = .;
}
PROVIDE(end = .);
}

@ -0,0 +1,49 @@
/* Copy from bbl-ucore : https://ring00.github.io/bbl-ucore */
/* Simple linker script for the ucore kernel.
See the GNU ld 'info' manual ("info ld") to learn the syntax. */
OUTPUT_ARCH(riscv)
ENTRY(_start)
BASE_ADDRESS = 0xffffffffc0020000;
SECTIONS
{
/* Load the kernel at this address: "." means the current address */
. = BASE_ADDRESS;
start = .;
.text : {
stext = .;
*(.text.entry)
*(.text .text.*)
. = ALIGN(4K);
etext = .;
}
.rodata : {
srodata = .;
*(.rodata .rodata.*)
. = ALIGN(4K);
erodata = .;
}
.data : {
sdata = .;
*(.data .data.*)
edata = .;
}
.stack : {
*(.bss.stack)
}
.bss : {
sbss = .;
*(.bss .bss.*)
ebss = .;
}
PROVIDE(end = .);
}

@ -0,0 +1,129 @@
# Constants / Macros defined in Rust code:
# XLENB
# LOAD
# STORE
# TEST_BACK_TO_KERNEL
.macro SAVE_ALL
# If coming from userspace, preserve the user stack pointer and load
# the kernel stack pointer. If we came from the kernel, sscratch
# will contain 0, and we should continue on the current stack.
csrrw sp, sscratch, sp
bnez sp, trap_from_user
trap_from_kernel:
csrr sp, sscratch
STORE gp, -1
# sscratch = previous-sp, sp = kernel-sp
trap_from_user:
# provide room for trap frame
addi sp, sp, -37 * XLENB
# save x registers except x2 (sp)
STORE x1, 1
STORE x3, 3
STORE x4, 4
STORE x5, 5
STORE x6, 6
STORE x7, 7
STORE x8, 8
STORE x9, 9
STORE x10, 10
STORE x11, 11
STORE x12, 12
STORE x13, 13
STORE x14, 14
STORE x15, 15
STORE x16, 16
STORE x17, 17
STORE x18, 18
STORE x19, 19
STORE x20, 20
STORE x21, 21
STORE x22, 22
STORE x23, 23
STORE x24, 24
STORE x25, 25
STORE x26, 26
STORE x27, 27
STORE x28, 28
STORE x29, 29
STORE x30, 30
STORE x31, 31
# load hartid to gp from sp[36]
LOAD gp, 36
# get sp, sstatus, sepc, stval, scause
# set sscratch = 0
csrrw s0, sscratch, x0
csrr s1, sstatus
csrr s2, sepc
csrr s3, stval
csrr s4, scause
# store sp, sstatus, sepc, sbadvaddr, scause
STORE s0, 2
STORE s1, 32
STORE s2, 33
STORE s3, 34
STORE s4, 35
.endm
.macro RESTORE_ALL
LOAD s1, 32 # s1 = sstatus
LOAD s2, 33 # s2 = sepc
andi s0, s1, 1 << 8 # sstatus.SPP = 1
bnez s0, _to_kernel # s0 = back to kernel?
_to_user:
addi s0, sp, 37*XLENB
csrw sscratch, s0 # sscratch = kernel-sp
STORE gp, 36 # store hartid from gp to sp[36]
_to_kernel:
# restore sstatus, sepc
csrw sstatus, s1
csrw sepc, s2
# restore x registers except x2 (sp)
LOAD x1, 1
LOAD x3, 3
LOAD x4, 4
LOAD x5, 5
LOAD x6, 6
LOAD x7, 7
LOAD x8, 8
LOAD x9, 9
LOAD x10, 10
LOAD x11, 11
LOAD x12, 12
LOAD x13, 13
LOAD x14, 14
LOAD x15, 15
LOAD x16, 16
LOAD x17, 17
LOAD x18, 18
LOAD x19, 19
LOAD x20, 20
LOAD x21, 21
LOAD x22, 22
LOAD x23, 23
LOAD x24, 24
LOAD x25, 25
LOAD x26, 26
LOAD x27, 27
LOAD x28, 28
LOAD x29, 29
LOAD x30, 30
LOAD x31, 31
# restore sp last
LOAD x2, 2
.endm
.section .text
.globl trap_entry
trap_entry:
SAVE_ALL
mv a0, sp
jal rust_trap
.globl trap_return
trap_return:
RESTORE_ALL
# return from supervisor call
sret

@ -0,0 +1,8 @@
//! Workaround for missing compiler-builtin symbols
//!
//! [atomic](http://llvm.org/docs/Atomics.html#libcalls-atomic)
#[no_mangle]
pub extern fn abort() {
panic!("abort");
}

@ -0,0 +1,42 @@
// Physical address available on THINPAD:
// [0x80000000, 0x80800000]
#[cfg(target_arch = "riscv32")]
pub const RECURSIVE_INDEX: usize = 0x3fd;
#[cfg(target_arch = "riscv64")]
pub const RECURSIVE_INDEX: usize = 0o774;
// Under riscv64, upon booting, paging is enabled by bbl and
// root_table[0777] maps to p3_table,
// and p3_table[0777] maps to gigapage 8000_0000H,
// so 0xFFFF_FFFF_8000_0000 maps to 0x8000_0000
// root_table[0774] points to root_table itself as page table
// root_table[0775] points to root_table itself as leaf page
// root_table[0776] points to a temp page table as leaf page
#[cfg(target_arch = "riscv32")]
pub const KERNEL_OFFSET: usize = 0xC000_0000;
#[cfg(target_arch = "riscv64")]
pub const KERNEL_OFFSET: usize = 0xFFFF_FFFF_C000_0000;
#[cfg(target_arch = "riscv32")]
pub const KERNEL_P2_INDEX: usize = (KERNEL_OFFSET >> 12 >> 10) & 0x3ff;
#[cfg(target_arch = "riscv64")]
pub const KERNEL_P4_INDEX: usize = (KERNEL_OFFSET >> 12 >> 9 >> 9 >> 9) & 0o777;
pub const KERNEL_HEAP_SIZE: usize = 0x00a0_0000;
#[cfg(target_arch = "riscv32")]
pub const MEMORY_OFFSET: usize = 0x8000_0000;
#[cfg(target_arch = "riscv64")]
pub const MEMORY_OFFSET: usize = 0x8000_0000;
#[cfg(target_arch = "riscv32")]
pub const MEMORY_END: usize = 0x8100_0000;
#[cfg(target_arch = "riscv64")]
pub const MEMORY_END: usize = 0x8100_0000;
// FIXME: rv64 `sh` and `ls` will crash if stack top > 0x80000000 ???
pub const USER_STACK_OFFSET: usize = 0x80000000 - USER_STACK_SIZE;
pub const USER_STACK_SIZE: usize = 0x10000;
pub const USER32_STACK_OFFSET: usize = 0xC0000000 - USER_STACK_SIZE;
pub const MAX_DTB_SIZE: usize = 0x2000;

@ -0,0 +1,273 @@
use riscv::register::{
sstatus,
sstatus::Sstatus,
scause::Scause,
};
/// Saved registers on a trap.
#[derive(Clone)]
#[repr(C)]
pub struct TrapFrame {
/// General registers
pub x: [usize; 32],
/// Supervisor Status
pub sstatus: Sstatus,
/// Supervisor Exception Program Counter
pub sepc: usize,
/// Supervisor Trap Value
pub stval: usize,
/// Supervisor Cause
pub scause: Scause,
/// Reserve space for hartid
pub _hartid: usize,
}
impl TrapFrame {
/// Constructs TrapFrame for a new kernel thread.
///
/// The new thread starts at function `entry` with an usize argument `arg`.
/// The stack pointer will be set to `sp`.
fn new_kernel_thread(entry: extern fn(usize) -> !, arg: usize, sp: usize) -> Self {
use core::mem::zeroed;
let mut tf: Self = unsafe { zeroed() };
tf.x[10] = arg; // a0
tf.x[2] = sp;
tf.sepc = entry as usize;
tf.sstatus = sstatus::read();
tf.sstatus.set_spie(true);
tf.sstatus.set_sie(false);
tf.sstatus.set_spp(sstatus::SPP::Supervisor);
tf
}
/// Constructs TrapFrame for a new user thread.
///
/// The new thread starts at `entry_addr`.
/// The stack pointer will be set to `sp`.
fn new_user_thread(entry_addr: usize, sp: usize) -> Self {
use core::mem::zeroed;
let mut tf: Self = unsafe { zeroed() };
tf.x[2] = sp;
tf.sepc = entry_addr;
tf.sstatus = sstatus::read();
tf.sstatus.set_spie(true);
tf.sstatus.set_sie(false);
tf.sstatus.set_spp(sstatus::SPP::User);
tf
}
}
use core::fmt::{Debug, Formatter, Error};
impl Debug for TrapFrame {
fn fmt(&self, f: &mut Formatter) -> Result<(), Error> {
struct Regs<'a>(&'a [usize; 32]);
impl<'a> Debug for Regs<'a> {
fn fmt(&self, f: &mut Formatter) -> Result<(), Error> {
const REG_NAME: [&str; 32] = [
"zero", "ra", "sp", "gp", "tp", "t0", "t1", "t2",
"s0", "s1", "a0", "a1", "a2", "a3", "a4", "a5", "a6", "a7",
"s2", "s3", "s4", "s5", "s6", "s7", "s8", "s9", "s10", "s11",
"t3", "t4", "t5", "t6"];
f.debug_map().entries(REG_NAME.iter().zip(self.0)).finish()
}
}
f.debug_struct("TrapFrame")
.field("regs", &Regs(&self.x))
.field("sstatus", &self.sstatus)
.field("sepc", &self.sepc)
.field("stval", &self.stval)
.field("scause", &self.scause.cause())
.finish()
}
}
/// Kernel stack contents for a new thread
#[derive(Debug)]
#[repr(C)]
pub struct InitStack {
context: ContextData,
tf: TrapFrame,
}
impl InitStack {
/// Push the InitStack on the stack and transfer to a Context.
unsafe fn push_at(self, stack_top: usize) -> Context {
let ptr = (stack_top as *mut Self).sub(1); //real kernel stack top
*ptr = self;
Context { sp: ptr as usize }
}
}
extern {
fn trap_return();
}
/// Saved registers for kernel context switches.
#[derive(Debug, Default)]
#[repr(C)]
struct ContextData {
/// Return address
ra: usize,
/// Page table token
satp: usize,
/// Callee-saved registers
s: [usize; 12],
}
impl ContextData {
fn new(satp: usize) -> Self {
ContextData { ra: trap_return as usize, satp, ..ContextData::default() }
}
}
/// Context of a kernel thread.
#[derive(Debug)]
#[repr(C)]
pub struct Context {
/// The stack pointer of the suspended thread.
/// A `ContextData` is stored here.
sp: usize,
}
impl Context {
/// Switch to another kernel thread.
///
/// Push all callee-saved registers at the current kernel stack.
/// Store current sp, switch to target.
/// Pop all callee-saved registers, then return to the target.
#[naked]
#[inline(never)]
pub unsafe extern fn switch(&mut self, _target: &mut Self) {
#[cfg(target_arch = "riscv32")]
asm!(r"
.equ XLENB, 4
.macro Load reg, mem
lw \reg, \mem
.endm
.macro Store reg, mem
sw \reg, \mem
.endm");
#[cfg(target_arch = "riscv64")]
asm!(r"
.equ XLENB, 8
.macro Load reg, mem
ld \reg, \mem
.endm
.macro Store reg, mem
sd \reg, \mem
.endm");
asm!("
// save from's registers
addi sp, sp, (-XLENB*14)
Store sp, 0(a0)
Store ra, 0*XLENB(sp)
Store s0, 2*XLENB(sp)
Store s1, 3*XLENB(sp)
Store s2, 4*XLENB(sp)
Store s3, 5*XLENB(sp)
Store s4, 6*XLENB(sp)
Store s5, 7*XLENB(sp)
Store s6, 8*XLENB(sp)
Store s7, 9*XLENB(sp)
Store s8, 10*XLENB(sp)
Store s9, 11*XLENB(sp)
Store s10, 12*XLENB(sp)
Store s11, 13*XLENB(sp)
csrr s11, satp
Store s11, 1*XLENB(sp)
// restore to's registers
Load sp, 0(a1)
Load s11, 1*XLENB(sp)
csrw satp, s11
Load ra, 0*XLENB(sp)
Load s0, 2*XLENB(sp)
Load s1, 3*XLENB(sp)
Load s2, 4*XLENB(sp)
Load s3, 5*XLENB(sp)
Load s4, 6*XLENB(sp)
Load s5, 7*XLENB(sp)
Load s6, 8*XLENB(sp)
Load s7, 9*XLENB(sp)
Load s8, 10*XLENB(sp)
Load s9, 11*XLENB(sp)
Load s10, 12*XLENB(sp)
Load s11, 13*XLENB(sp)
addi sp, sp, (XLENB*14)
Store zero, 0(a1)
ret"
: : : : "volatile" )
}
/// Constructs a null Context for the current running thread.
pub unsafe fn null() -> Self {
Context { sp: 0 }
}
/// Constructs Context for a new kernel thread.
///
/// The new thread starts at function `entry` with an usize argument `arg`.
/// The stack pointer will be set to `kstack_top`.
/// The SATP register will be set to `satp`.
pub unsafe fn new_kernel_thread(entry: extern fn(usize) -> !, arg: usize, kstack_top: usize, satp: usize) -> Self {
InitStack {
context: ContextData::new(satp),
tf: TrapFrame::new_kernel_thread(entry, arg, kstack_top),
}.push_at(kstack_top)
}
/// Constructs Context for a new user thread.
///
/// The new thread starts at `entry_addr`.
/// The stack pointer of user and kernel mode will be set to `ustack_top`, `kstack_top`.
/// The SATP register will be set to `satp`.
pub unsafe fn new_user_thread(entry_addr: usize, ustack_top: usize, kstack_top: usize, _is32: bool, satp: usize) -> Self {
InitStack {
context: ContextData::new(satp),
tf: TrapFrame::new_user_thread(entry_addr, ustack_top),
}.push_at(kstack_top)
}
/// Fork a user process and get the new Context.
///
/// The stack pointer in kernel mode will be set to `kstack_top`.
/// The SATP register will be set to `satp`.
/// All the other registers are same as the original.
pub unsafe fn new_fork(tf: &TrapFrame, kstack_top: usize, satp: usize) -> Self {
InitStack {
context: ContextData::new(satp),
tf: {
let mut tf = tf.clone();
// fork function's ret value, the new process is 0
tf.x[10] = 0; // a0
tf
},
}.push_at(kstack_top)
}
/// Fork a user thread and get the new Context.
///
/// The stack pointer in kernel mode will be set to `kstack_top`.
/// The SATP register will be set to `satp`.
/// The new user stack will be set to `ustack_top`.
/// The new thread pointer will be set to `tls`.
/// All the other registers are same as the original.
pub unsafe fn new_clone(tf: &TrapFrame, ustack_top: usize, kstack_top: usize, satp: usize, tls: usize) -> Self {
InitStack {
context: ContextData::new(satp),
tf: {
let mut tf = tf.clone();
tf.x[2] = ustack_top; // sp
tf.x[4] = tls; // tp
tf.x[10] = 0; // a0
tf
},
}.push_at(kstack_top)
}
/// Used for getting the init TrapFrame of a new user context in `sys_exec`.
pub unsafe fn get_init_tf(&self) -> TrapFrame {
(*(self.sp as *const InitStack)).tf.clone()
}
}

@ -0,0 +1,38 @@
use crate::consts::MAX_CPU_NUM;
use core::ptr::{read_volatile, write_volatile};
static mut STARTED: [bool; MAX_CPU_NUM] = [false; MAX_CPU_NUM];
pub unsafe fn set_cpu_id(cpu_id: usize) {
asm!("mv gp, $0" : : "r"(cpu_id));
}
pub fn id() -> usize {
let cpu_id;
unsafe { asm!("mv $0, gp" : "=r"(cpu_id)); }
cpu_id
}
pub fn send_ipi(cpu_id: usize) {
super::sbi::send_ipi(1 << cpu_id);
}
pub unsafe fn has_started(cpu_id: usize) -> bool {
read_volatile(&STARTED[cpu_id])
}
pub unsafe fn start_others(hart_mask: usize) {
for cpu_id in 0..MAX_CPU_NUM {
if (hart_mask >> cpu_id) & 1 != 0 {
write_volatile(&mut STARTED[cpu_id], true);
}
}
}
pub fn halt() {
unsafe { riscv::asm::wfi() }
}
pub unsafe fn exit_in_qemu(error_code: u8) -> ! {
super::sbi::shutdown()
}

@ -0,0 +1,127 @@
use mips::interrupts::*;
use crate::drivers::DRIVERS;
pub use self::context::*;
use log::*;
#[path = "context.rs"]
mod context;
/// Initialize interrupt
pub fn init() {
// extern {
// fn trap_entry();
// }
// unsafe {
// // Set sscratch register to 0, indicating to exception vector that we are
// // presently executing in the kernel
// sscratch::write(0);
// // Set the exception vector address
// stvec::write(trap_entry as usize, stvec::TrapMode::Direct);
// // Enable IPI
// sie::set_ssoft();
// // Enable external interrupt
// if super::cpu::id() == super::BOOT_HART_ID {
// sie::set_sext();
// }
// }
// info!("interrupt: init end");
}
/// Enable interrupt
#[inline]
pub unsafe fn enable() {
// sstatus::set_sie();
}
/// Disable interrupt and return current interrupt status
#[inline]
pub unsafe fn disable_and_store() -> usize {
// let e = sstatus::read().sie() as usize;
// sstatus::clear_sie();
// e
}
/// Enable interrupt if `flags` != 0
#[inline]
pub unsafe fn restore(flags: usize) {
// if flags != 0 {
// enable();
// }
}
/// Dispatch and handle interrupt.
///
/// This function is called from `trap.asm`.
#[no_mangle]
pub extern fn rust_trap(tf: &mut TrapFrame) {
// use self::scause::{Trap, Interrupt as I, Exception as E};
// trace!("Interrupt @ CPU{}: {:?} ", super::cpu::id(), tf.scause.cause());
// match tf.scause.cause() {
// Trap::Interrupt(I::SupervisorExternal) => external(),
// Trap::Interrupt(I::SupervisorSoft) => ipi(),
// Trap::Interrupt(I::SupervisorTimer) => timer(),
// Trap::Exception(E::UserEnvCall) => syscall(tf),
// Trap::Exception(E::LoadPageFault) => page_fault(tf),
// Trap::Exception(E::StorePageFault) => page_fault(tf),
// Trap::Exception(E::InstructionPageFault) => page_fault(tf),
// _ => crate::trap::error(tf),
// }
// trace!("Interrupt end");
}
fn external() {
#[cfg(feature = "board_u540")]
unsafe { super::board::handle_external_interrupt(); }
// true means handled, false otherwise
let handlers = [try_process_serial, try_process_drivers];
for handler in handlers.iter() {
if handler() == true {
break
}
}
}
fn try_process_serial() -> bool {
match super::io::getchar_option() {
Some(ch) => {
crate::trap::serial(ch);
true
}
None => false
}
}
fn try_process_drivers() -> bool {
for driver in DRIVERS.read().iter() {
if driver.try_handle_interrupt(None) == true {
return true
}
}
return false
}
fn ipi() {
debug!("IPI");
super::sbi::clear_ipi();
}
fn timer() {
super::timer::set_next();
crate::trap::timer();
}
fn syscall(tf: &mut TrapFrame) {
tf.sepc += 4; // Must before syscall, because of fork.
let ret = crate::syscall::syscall(tf.x[17], [tf.x[10], tf.x[11], tf.x[12], tf.x[13], tf.x[14], tf.x[15]], tf);
tf.x[10] = ret as usize;
}
fn page_fault(tf: &mut TrapFrame) {
let addr = tf.stval;
trace!("\nEXCEPTION: Page Fault @ {:#x}", addr);
if !crate::memory::handle_page_fault(addr) {
crate::trap::error(tf);
}
}

@ -0,0 +1,52 @@
use core::fmt::{Write, Result, Arguments};
use super::sbi;
struct SerialPort;
impl Write for SerialPort {
fn write_str(&mut self, s: &str) -> Result {
for c in s.bytes() {
if c == 127 {
putchar(8);
putchar(b' ');
putchar(8);
} else {
putchar(c);
}
}
Ok(())
}
}
fn putchar(c: u8) {
if cfg!(feature = "board_u540") {
if c == b'\n' {
sbi::console_putchar(b'\r' as usize);
}
}
sbi::console_putchar(c as usize);
}
pub fn getchar() -> char {
let c = sbi::console_getchar() as u8;
match c {
255 => '\0', // null
c => c as char,
}
}
pub fn getchar_option() -> Option<char> {
let c = sbi::console_getchar() as isize;
match c {
-1 => None,
c => Some(c as u8 as char),
}
}
pub fn putfmt(fmt: Arguments) {
SerialPort.write_fmt(fmt).unwrap();
}
const TXDATA: *mut u32 = 0x38000000 as *mut u32;
const RXDATA: *mut u32 = 0x38000004 as *mut u32;

@ -0,0 +1,93 @@
use core::mem;
use riscv::{addr::*, register::sstatus};
use rcore_memory::PAGE_SIZE;
use log::*;
use crate::memory::{FRAME_ALLOCATOR, init_heap, MemoryAttr, MemorySet, Linear};
use crate::consts::{MEMORY_OFFSET, MEMORY_END, KERNEL_OFFSET};
use riscv::register::satp;
/// Initialize the memory management module
pub fn init(dtb: usize) {
unsafe { sstatus::set_sum(); } // Allow user memory access
// initialize heap and Frame allocator
init_frame_allocator();
init_heap();
// remap the kernel use 4K page
remap_the_kernel(dtb);
}
pub fn init_other() {
unsafe {
sstatus::set_sum(); // Allow user memory access
asm!("csrw satp, $0; sfence.vma" :: "r"(SATP) :: "volatile");
}
}
fn init_frame_allocator() {
use bit_allocator::BitAlloc;
use core::ops::Range;
let mut ba = FRAME_ALLOCATOR.lock();
let range = to_range((end as usize) - KERNEL_OFFSET + MEMORY_OFFSET + PAGE_SIZE, MEMORY_END);
ba.insert(range);
info!("frame allocator: init end");
/// Transform memory area `[start, end)` to integer range for `FrameAllocator`
fn to_range(start: usize, end: usize) -> Range<usize> {
let page_start = (start - MEMORY_OFFSET) / PAGE_SIZE;
let page_end = (end - MEMORY_OFFSET - 1) / PAGE_SIZE + 1;
assert!(page_start < page_end, "illegal range for frame allocator");
page_start..page_end
}
}
/// Remap the kernel memory address with 4K page recorded in p1 page table
fn remap_the_kernel(dtb: usize) {
let offset = -(KERNEL_OFFSET as isize - MEMORY_OFFSET as isize);
let mut ms = MemorySet::new_bare();
ms.push(stext as usize, etext as usize, MemoryAttr::default().execute().readonly(), Linear::new(offset), "text");
ms.push(sdata as usize, edata as usize, MemoryAttr::default(), Linear::new(offset), "data");
ms.push(srodata as usize, erodata as usize, MemoryAttr::default().readonly(), Linear::new(offset), "rodata");
ms.push(bootstack as usize, bootstacktop as usize, MemoryAttr::default(), Linear::new(offset), "stack");
ms.push(sbss as usize, ebss as usize, MemoryAttr::default(), Linear::new(offset), "bss");
ms.push(dtb, dtb + super::consts::MAX_DTB_SIZE, MemoryAttr::default().readonly(), Linear::new(offset), "dts");
// map PLIC for HiFiveU
let offset = -(KERNEL_OFFSET as isize);
ms.push(KERNEL_OFFSET + 0x0C00_2000, KERNEL_OFFSET + 0x0C00_2000 + PAGE_SIZE, MemoryAttr::default(), Linear::new(offset), "plic0");
ms.push(KERNEL_OFFSET + 0x0C20_2000, KERNEL_OFFSET + 0x0C20_2000 + PAGE_SIZE, MemoryAttr::default(), Linear::new(offset), "plic1");
unsafe { ms.activate(); }
unsafe { SATP = ms.token(); }
mem::forget(ms);
info!("remap kernel end");
}
// First core stores its SATP here.
// Other cores load it later.
static mut SATP: usize = 0;
pub unsafe fn clear_bss() {
let start = sbss as usize;
let end = ebss as usize;
let step = core::mem::size_of::<usize>();
for i in (start..end).step_by(step) {
(i as *mut usize).write(0);
}
}
// Symbols provided by linker script
#[allow(dead_code)]
extern {
fn stext();
fn etext();
fn sdata();
fn edata();
fn srodata();
fn erodata();
fn sbss();
fn ebss();
fn start();
fn end();
fn bootstack();
fn bootstacktop();
}

@ -0,0 +1,88 @@
pub mod io;
pub mod interrupt;
pub mod timer;
pub mod paging;
pub mod memory;
pub mod compiler_rt;
pub mod consts;
pub mod cpu;
pub mod syscall;
pub mod rand;
#[cfg(feature = "board_u540")]
#[path = "board/u540/mod.rs"]
mod board;
mod sbi;
use log::*;
#[no_mangle]
pub extern fn rust_main(hartid: usize, dtb: usize, hart_mask: usize) -> ! {
// An initial recursive page table has been set by BBL (shared by all cores)
unsafe { cpu::set_cpu_id(hartid); }
if hartid != BOOT_HART_ID {
while unsafe { !cpu::has_started(hartid) } { }
println!("Hello RISCV! in hart {}, dtb @ {:#x}", hartid, dtb);
others_main();
//other_main -> !
}
unsafe { memory::clear_bss(); }
println!("Hello RISCV! in hart {}, dtb @ {:#x}", hartid, dtb);
crate::logging::init();
interrupt::init();
memory::init(dtb);
timer::init();
// FIXME: init driver on u540
#[cfg(not(feature = "board_u540"))]
crate::drivers::init(dtb);
#[cfg(feature = "board_u540")]
unsafe { board::init_external_interrupt(); }
crate::process::init();
unsafe { cpu::start_others(hart_mask); }
crate::kmain();
}
fn others_main() -> ! {
interrupt::init();
memory::init_other();
timer::init();
crate::kmain();
}
#[cfg(not(feature = "board_u540"))]
const BOOT_HART_ID: usize = 0;
#[cfg(feature = "board_u540")]
const BOOT_HART_ID: usize = 1;
/// Constant & Macro for `trap.asm`
#[cfg(target_arch = "riscv32")]
global_asm!(r"
.equ XLENB, 4
.equ XLENb, 32
.macro LOAD a1, a2
lw \a1, \a2*XLENB(sp)
.endm
.macro STORE a1, a2
sw \a1, \a2*XLENB(sp)
.endm
");
#[cfg(target_arch = "riscv64")]
global_asm!(r"
.equ XLENB, 8
.equ XLENb, 64
.macro LOAD a1, a2
ld \a1, \a2*XLENB(sp)
.endm
.macro STORE a1, a2
sd \a1, \a2*XLENB(sp)
.endm
");
global_asm!(include_str!("boot/entry.asm"));
global_asm!(include_str!("boot/trap.asm"));

@ -0,0 +1,253 @@
use crate::consts::RECURSIVE_INDEX;
// Depends on kernel
use crate::memory::{active_table, alloc_frame, dealloc_frame};
use riscv::addr::*;
use riscv::asm::{sfence_vma, sfence_vma_all};
use riscv::paging::{Mapper, PageTable as RvPageTable, PageTableEntry, PageTableFlags as EF, RecursivePageTable, PageTableType};
use riscv::paging::{FrameAllocator, FrameDeallocator};
use riscv::register::satp;
use rcore_memory::paging::*;
use log::*;
#[cfg(target_arch = "riscv32")]
use crate::consts::KERNEL_P2_INDEX;
#[cfg(target_arch = "riscv64")]
use crate::consts::KERNEL_P4_INDEX;
pub struct ActivePageTable(RecursivePageTable<'static>, PageEntry);
/// PageTableEntry: the contents of this entry.
/// Page: this entry is the pte of page `Page`.
pub struct PageEntry(&'static mut PageTableEntry, Page);
impl PageTable for ActivePageTable {
fn map(&mut self, addr: usize, target: usize) -> &mut Entry {
// use riscv::paging:Mapper::map_to,
// map the 4K `page` to the 4K `frame` with `flags`
let flags = EF::VALID | EF::READABLE | EF::WRITABLE;
let page = Page::of_addr(VirtAddr::new(addr));
let frame = Frame::of_addr(PhysAddr::new(target));
// map the page to the frame using FrameAllocatorForRiscv
// we may need frame allocator to alloc frame for new page table(first/second)
self.0.map_to(page, frame, flags, &mut FrameAllocatorForRiscv).unwrap().flush();
self.get_entry(addr).expect("fail to get entry")
}
fn unmap(&mut self, addr: usize) {
let page = Page::of_addr(VirtAddr::new(addr));
let (_, flush) = self.0.unmap(page).unwrap();
flush.flush();
}
fn get_entry(&mut self, vaddr: usize) -> Option<&mut Entry> {
let page = Page::of_addr(VirtAddr::new(vaddr));
if let Ok(e) = self.0.ref_entry(page.clone()) {
let e = unsafe { &mut *(e as *mut PageTableEntry) };
self.1 = PageEntry(e, page);
Some(&mut self.1 as &mut Entry)
} else {
None
}
}
}
impl PageTableExt for ActivePageTable {}
/// The virtual address of root page table
#[cfg(target_arch = "riscv32")]
const ROOT_PAGE_TABLE: *mut RvPageTable =
((RECURSIVE_INDEX << 12 << 10) |
((RECURSIVE_INDEX+1) << 12)) as *mut RvPageTable;
#[cfg(all(target_arch = "riscv64", feature = "sv39"))]
const ROOT_PAGE_TABLE: *mut RvPageTable =
((0xFFFF_0000_0000_0000) |
(0o777 << 12 << 9 << 9 << 9) |
(RECURSIVE_INDEX << 12 << 9 << 9) |
(RECURSIVE_INDEX << 12 << 9) |
((RECURSIVE_INDEX+1) << 12)) as *mut RvPageTable;
#[cfg(all(target_arch = "riscv64", not(feature = "sv39")))]
const ROOT_PAGE_TABLE: *mut RvPageTable =
((0xFFFF_0000_0000_0000) |
(RECURSIVE_INDEX << 12 << 9 << 9 << 9) |
(RECURSIVE_INDEX << 12 << 9 << 9) |
(RECURSIVE_INDEX << 12 << 9) |
((RECURSIVE_INDEX+1) << 12)) as *mut RvPageTable;
impl ActivePageTable {
#[cfg(target_arch = "riscv32")]
pub unsafe fn new() -> Self {
ActivePageTable(
RecursivePageTable::new(&mut *ROOT_PAGE_TABLE).unwrap(),
::core::mem::uninitialized()
)
}
#[cfg(target_arch = "riscv64")]
pub unsafe fn new() -> Self {
#[cfg(feature = "sv39")]
let type_ = PageTableType::Sv39;
#[cfg(not(feature = "sv39"))]
let type_ = PageTableType::Sv48;
ActivePageTable(
RecursivePageTable::new(&mut *ROOT_PAGE_TABLE, type_).unwrap(),
::core::mem::uninitialized()
)
}
}
/// implementation for the Entry trait in /crate/memory/src/paging/mod.rs
impl Entry for PageEntry {
fn update(&mut self) {
unsafe { sfence_vma(0, self.1.start_address().as_usize()); }
}
fn accessed(&self) -> bool { self.0.flags().contains(EF::ACCESSED) }
fn dirty(&self) -> bool { self.0.flags().contains(EF::DIRTY) }
fn writable(&self) -> bool { self.0.flags().contains(EF::WRITABLE) }
fn present(&self) -> bool { self.0.flags().contains(EF::VALID | EF::READABLE) }
fn clear_accessed(&mut self) { self.0.flags_mut().remove(EF::ACCESSED); }
fn clear_dirty(&mut self) { self.0.flags_mut().remove(EF::DIRTY); }
fn set_writable(&mut self, value: bool) { self.0.flags_mut().set(EF::WRITABLE, value); }
fn set_present(&mut self, value: bool) { self.0.flags_mut().set(EF::VALID | EF::READABLE, value); }
fn target(&self) -> usize { self.0.addr().as_usize() }
fn set_target(&mut self, target: usize) {
let flags = self.0.flags();
let frame = Frame::of_addr(PhysAddr::new(target));
self.0.set(frame, flags);
}
fn writable_shared(&self) -> bool { self.0.flags().contains(EF::RESERVED1) }
fn readonly_shared(&self) -> bool { self.0.flags().contains(EF::RESERVED2) }
fn set_shared(&mut self, writable: bool) {
let flags = self.0.flags_mut();
flags.set(EF::RESERVED1, writable);
flags.set(EF::RESERVED2, !writable);
}
fn clear_shared(&mut self) { self.0.flags_mut().remove(EF::RESERVED1 | EF::RESERVED2); }
fn swapped(&self) -> bool { self.0.flags().contains(EF::RESERVED1) }
fn set_swapped(&mut self, value: bool) { self.0.flags_mut().set(EF::RESERVED1, value); }
fn user(&self) -> bool { self.0.flags().contains(EF::USER) }
fn set_user(&mut self, value: bool) { self.0.flags_mut().set(EF::USER, value); }
fn execute(&self) -> bool { self.0.flags().contains(EF::EXECUTABLE) }
fn set_execute(&mut self, value: bool) { self.0.flags_mut().set(EF::EXECUTABLE, value); }
fn mmio(&self) -> u8 { 0 }
fn set_mmio(&mut self, _value: u8) { }
}
#[derive(Debug)]
pub struct InactivePageTable0 {
root_frame: Frame,
}
impl InactivePageTable for InactivePageTable0 {
type Active = ActivePageTable;
fn new_bare() -> Self {
let target = alloc_frame().expect("failed to allocate frame");
let frame = Frame::of_addr(PhysAddr::new(target));
active_table().with_temporary_map(target, |_, table: &mut RvPageTable| {
table.zero();
table.set_recursive(RECURSIVE_INDEX, frame.clone());
});
InactivePageTable0 { root_frame: frame }
}
#[cfg(target_arch = "riscv32")]
fn map_kernel(&mut self) {
let table = unsafe { &mut *ROOT_PAGE_TABLE };
extern {
fn start();
fn end();
}
let mut entrys: [PageTableEntry; 16] = unsafe { core::mem::uninitialized() };
let entry_start = start as usize >> 22;
let entry_end = (end as usize >> 22) + 1;
let entry_count = entry_end - entry_start;
for i in 0..entry_count {
entrys[i] = table[entry_start + i];
}
self.edit(|_| {
// NOTE: 'table' now refers to new page table
for i in 0..entry_count {
table[entry_start + i] = entrys[i];
}
});
}
#[cfg(target_arch = "riscv64")]
fn map_kernel(&mut self) {
let table = unsafe { &mut *ROOT_PAGE_TABLE };
let e1 = table[KERNEL_P4_INDEX];
assert!(!e1.is_unused());
self.edit(|_| {
table[KERNEL_P4_INDEX] = e1;
});
}
#[cfg(target_arch = "riscv32")]
fn token(&self) -> usize {
self.root_frame.number() | (1 << 31) // as satp
}
#[cfg(target_arch = "riscv64")]
fn token(&self) -> usize {
use bit_field::BitField;
let mut satp = self.root_frame.number();
satp.set_bits(44..60, 0); // AS is 0
#[cfg(feature = "sv39")]
satp.set_bits(60..64, satp::Mode::Sv39 as usize);
#[cfg(not(feature = "sv39"))]
satp.set_bits(60..64, satp::Mode::Sv48 as usize);
satp
}
unsafe fn set_token(token: usize) {
asm!("csrw satp, $0" :: "r"(token) :: "volatile");
}
fn active_token() -> usize {
satp::read().bits()
}
fn flush_tlb() {
unsafe { sfence_vma_all(); }
}
fn edit<T>(&mut self, f: impl FnOnce(&mut Self::Active) -> T) -> T {
let target = satp::read().frame().start_address().as_usize();
active_table().with_temporary_map(target, |active_table, root_table: &mut RvPageTable| {
let backup = root_table[RECURSIVE_INDEX].clone();
// overwrite recursive mapping
root_table[RECURSIVE_INDEX].set(self.root_frame.clone(), EF::VALID);
unsafe { sfence_vma_all(); }
// execute f in the new context
let ret = f(active_table);
// restore recursive mapping to original p2 table
root_table[RECURSIVE_INDEX] = backup;
unsafe { sfence_vma_all(); }
ret
})
}
}
impl Drop for InactivePageTable0 {
fn drop(&mut self) {
dealloc_frame(self.root_frame.start_address().as_usize());
}
}
struct FrameAllocatorForRiscv;
impl FrameAllocator for FrameAllocatorForRiscv {
fn alloc(&mut self) -> Option<Frame> {
alloc_frame().map(|addr| Frame::of_addr(PhysAddr::new(addr)))
}
}
impl FrameDeallocator for FrameAllocatorForRiscv {
fn dealloc(&mut self, frame: Frame) {
dealloc_frame(frame.start_address().as_usize());
}
}

@ -0,0 +1,3 @@
pub fn rand() -> u64 {
return 0;
}

@ -0,0 +1,64 @@
//! Port from sbi.h
#[inline(always)]
fn sbi_call(which: usize, arg0: usize, arg1: usize, arg2: usize) -> usize {
let ret;
unsafe {
asm!("ecall"
: "={x10}" (ret)
: "{x10}" (arg0), "{x11}" (arg1), "{x12}" (arg2), "{x17}" (which)
: "memory"
: "volatile");
}
ret
}
pub fn console_putchar(ch: usize) {
sbi_call(SBI_CONSOLE_PUTCHAR, ch, 0, 0);
}
pub fn console_getchar() -> usize {
sbi_call(SBI_CONSOLE_GETCHAR, 0, 0, 0)
}
pub fn shutdown() -> ! {
sbi_call(SBI_SHUTDOWN, 0, 0, 0);
unreachable!()
}
pub fn set_timer(stime_value: u64) {
#[cfg(target_pointer_width = "32")]
sbi_call(SBI_SET_TIMER, stime_value as usize, (stime_value >> 32) as usize, 0);
#[cfg(target_pointer_width = "64")]
sbi_call(SBI_SET_TIMER, stime_value as usize, 0, 0);
}
pub fn clear_ipi() {
sbi_call(SBI_CLEAR_IPI, 0, 0, 0);
}
pub fn send_ipi(hart_mask: usize) {
sbi_call(SBI_SEND_IPI, &hart_mask as *const _ as usize, 0, 0);
}
pub fn remote_fence_i(hart_mask: usize) {
sbi_call(SBI_REMOTE_FENCE_I, &hart_mask as *const _ as usize, 0, 0);
}
pub fn remote_sfence_vma(hart_mask: usize, _start: usize, _size: usize) {
sbi_call(SBI_REMOTE_SFENCE_VMA, &hart_mask as *const _ as usize, 0, 0);
}
pub fn remote_sfence_vma_asid(hart_mask: usize, _start: usize, _size: usize, _asid: usize) {
sbi_call(SBI_REMOTE_SFENCE_VMA_ASID, &hart_mask as *const _ as usize, 0, 0);
}
const SBI_SET_TIMER: usize = 0;
const SBI_CONSOLE_PUTCHAR: usize = 1;
const SBI_CONSOLE_GETCHAR: usize = 2;
const SBI_CLEAR_IPI: usize = 3;
const SBI_SEND_IPI: usize = 4;
const SBI_REMOTE_FENCE_I: usize = 5;
const SBI_REMOTE_SFENCE_VMA: usize = 6;
const SBI_REMOTE_SFENCE_VMA_ASID: usize = 7;
const SBI_SHUTDOWN: usize = 8;

@ -0,0 +1,285 @@
//! RISCV32 syscall ids
//! Reference: https://github.com/riscv/riscv-musl/blob/staging/arch/riscv32/bits/syscall.h.in
pub const SYS_IO_SETUP: usize = 0;
pub const SYS_IO_DESTROY: usize = 1;
pub const SYS_IO_SUBMIT: usize = 2;
pub const SYS_IO_CANCEL: usize = 3;
pub const SYS_IO_GETEVENTS: usize = 4;
pub const SYS_SETXATTR: usize = 5;
pub const SYS_LSETXATTR: usize = 6;
pub const SYS_FSETXATTR: usize = 7;
pub const SYS_GETXATTR: usize = 8;
pub const SYS_LGETXATTR: usize = 9;
pub const SYS_FGETXATTR: usize = 10;
pub const SYS_LISTXATTR: usize = 11;
pub const SYS_LLISTXATTR: usize = 12;
pub const SYS_FLISTXATTR: usize = 13;
pub const SYS_REMOVEXATTR: usize = 14;
pub const SYS_LREMOVEXATTR: usize = 15;
pub const SYS_FREMOVEXATTR: usize = 16;
pub const SYS_GETCWD: usize = 17;
pub const SYS_LOOKUP_DCOOKIE: usize = 18;
pub const SYS_EVENTFD2: usize = 19;
pub const SYS_EPOLL_CREATE1: usize = 20;
pub const SYS_EPOLL_CTL: usize = 21;
pub const SYS_EPOLL_PWAIT: usize = 22;
pub const SYS_DUP: usize = 23;
pub const SYS_DUP3: usize = 24;
pub const SYS_FCNTL: usize = 25;
pub const SYS_INOTIFY_INIT1: usize = 26;
pub const SYS_INOTIFY_ADD_WATCH: usize = 27;
pub const SYS_INOTIFY_RM_WATCH: usize = 28;
pub const SYS_IOCTL: usize = 29;
pub const SYS_IOPRIO_SET: usize = 30;
pub const SYS_IOPRIO_GET: usize = 31;
pub const SYS_FLOCK: usize = 32;
pub const SYS_MKNODAT: usize = 33;
pub const SYS_MKDIRAT: usize = 34;
pub const SYS_UNLINKAT: usize = 35;
pub const SYS_SYMLINKAT: usize = 36;
pub const SYS_LINKAT: usize = 37;
pub const SYS_RENAMEAT: usize = 38; // FIXME
pub const SYS_UMOUNT2: usize = 39;
pub const SYS_MOUNT: usize = 40;
pub const SYS_PIVOT_ROOT: usize = 41;
pub const SYS_NFSSERVCTL: usize = 42;
pub const SYS_STATFS: usize = 43;
pub const SYS_FSTATFS: usize = 44;
pub const SYS_TRUNCATE: usize = 45;
pub const SYS_FTRUNCATE: usize = 46;
pub const SYS_FALLOCATE: usize = 47;
pub const SYS_FACCESSAT: usize = 48;
pub const SYS_CHDIR: usize = 49;
pub const SYS_FCHDIR: usize = 50;
pub const SYS_CHROOT: usize = 51;
pub const SYS_FCHMOD: usize = 52;
pub const SYS_FCHMODAT: usize = 53;
pub const SYS_FCHOWNAT: usize = 54;
pub const SYS_FCHOWN: usize = 55;
pub const SYS_OPENAT: usize = 56;
pub const SYS_CLOSE: usize = 57;
pub const SYS_VHANGUP: usize = 58;
pub const SYS_PIPE2: usize = 59;
pub const SYS_QUOTACTL: usize = 60;
pub const SYS_GETDENTS64: usize = 61;
pub const SYS_LSEEK: usize = 62;
pub const SYS_READ: usize = 63;
pub const SYS_WRITE: usize = 64;
pub const SYS_READV: usize = 65;
pub const SYS_WRITEV: usize = 66;
pub const SYS_PREAD64: usize = 67;
pub const SYS_PWRITE64: usize = 68;
pub const SYS_PREADV: usize = 69;
pub const SYS_PWRITEV: usize = 70;
pub const SYS_SENDFILE: usize = 71;
pub const SYS_PSELECT6: usize = 72;
pub const SYS_PPOLL: usize = 73;
pub const SYS_SIGNALFD4: usize = 74;
pub const SYS_VMSPLICE: usize = 75;
pub const SYS_SPLICE: usize = 76;
pub const SYS_TEE: usize = 77;
pub const SYS_READLINKAT: usize = 78;
pub const SYS_NEWFSTATAT: usize = 79;
pub const SYS_FSTAT: usize = 80;
pub const SYS_SYNC: usize = 81;
pub const SYS_FSYNC: usize = 82;
pub const SYS_FDATASYNC: usize = 83;
pub const SYS_SYNC_FILE_RANGE: usize = 84;
pub const SYS_TIMERFD_CREATE: usize = 85;
pub const SYS_TIMERFD_SETTIME: usize = 86;
pub const SYS_TIMERFD_GETTIME: usize = 87;
pub const SYS_UTIMENSAT: usize = 88;
pub const SYS_ACCT: usize = 89;
pub const SYS_CAPGET: usize = 90;
pub const SYS_CAPSET: usize = 91;
pub const SYS_PERSONALITY: usize = 92;
pub const SYS_EXIT: usize = 93;
pub const SYS_EXIT_GROUP: usize = 94;
pub const SYS_WAITID: usize = 95;
pub const SYS_SET_TID_ADDRESS: usize = 96;
pub const SYS_UNSHARE: usize = 97;
pub const SYS_FUTEX: usize = 98;
pub const SYS_SET_ROBUST_LIST: usize = 99;
pub const SYS_GET_ROBUST_LIST: usize = 100;
pub const SYS_NANOSLEEP: usize = 101;
pub const SYS_GETITIMER: usize = 102;
pub const SYS_SETITIMER: usize = 103;
pub const SYS_KEXEC_LOAD: usize = 104;
pub const SYS_INIT_MODULE: usize = 105;
pub const SYS_DELETE_MODULE: usize = 106;
pub const SYS_TIMER_CREATE: usize = 107;
pub const SYS_TIMER_GETTIME: usize = 108;
pub const SYS_TIMER_GETOVERRUN: usize = 109;
pub const SYS_TIMER_SETTIME: usize = 110;
pub const SYS_TIMER_DELETE: usize = 111;
pub const SYS_CLOCK_SETTIME: usize = 112;
pub const SYS_CLOCK_GETTIME: usize = 113;
pub const SYS_CLOCK_GETRES: usize = 114;
pub const SYS_CLOCK_NANOSLEEP: usize = 115;
pub const SYS_SYSLOG: usize = 116;
pub const SYS_PTRACE: usize = 117;
pub const SYS_SCHED_SETPARAM: usize = 118;
pub const SYS_SCHED_SETSCHEDULER: usize = 119;
pub const SYS_SCHED_GETSCHEDULER: usize = 120;
pub const SYS_SCHED_GETPARAM: usize = 121;
pub const SYS_SCHED_SETAFFINITY: usize = 122;
pub const SYS_SCHED_GETAFFINITY: usize = 123;
pub const SYS_SCHED_YIELD: usize = 124;
pub const SYS_SCHED_GET_PRIORITY_MAX: usize = 125;
pub const SYS_SCHED_GET_PRIORITY_MIN: usize = 126;
pub const SYS_SCHED_RR_GET_INTERVAL: usize = 127;
pub const SYS_RESTART_SYSCALL: usize = 128;
pub const SYS_KILL: usize = 129;
pub const SYS_TKILL: usize = 130;
pub const SYS_TGKILL: usize = 131;
pub const SYS_SIGALTSTACK: usize = 132;
pub const SYS_RT_SIGSUSPEND: usize = 133;
pub const SYS_RT_SIGACTION: usize = 134;
pub const SYS_RT_SIGPROCMASK: usize = 135;
pub const SYS_RT_SIGPENDING: usize = 136;
pub const SYS_RT_SIGTIMEDWAIT: usize = 137;
pub const SYS_RT_SIGQUEUEINFO: usize = 138;
pub const SYS_RT_SIGRETURN: usize = 139;
pub const SYS_SETPRIORITY: usize = 140;
pub const SYS_GETPRIORITY: usize = 141;
pub const SYS_REBOOT: usize = 142;
pub const SYS_SETREGID: usize = 143;
pub const SYS_SETGID: usize = 144;
pub const SYS_SETREUID: usize = 145;
pub const SYS_SETUID: usize = 146;
pub const SYS_SETRESUID: usize = 147;
pub const SYS_GETRESUID: usize = 148;
pub const SYS_SETRESGID: usize = 149;
pub const SYS_GETRESGID: usize = 150;
pub const SYS_SETFSUID: usize = 151;
pub const SYS_SETFSGID: usize = 152;
pub const SYS_TIMES: usize = 153;
pub const SYS_SETPGID: usize = 154;
pub const SYS_GETPGID: usize = 155;
pub const SYS_GETSID: usize = 156;
pub const SYS_SETSID: usize = 157;
pub const SYS_GETGROUPS: usize = 158;
pub const SYS_SETGROUPS: usize = 159;
pub const SYS_UNAME: usize = 160;
pub const SYS_SETHOSTNAME: usize = 161;
pub const SYS_SETDOMAINNAME: usize = 162;
pub const SYS_GETRLIMIT: usize = 163;
pub const SYS_SETRLIMIT: usize = 164;
pub const SYS_GETRUSAGE: usize = 165;
pub const SYS_UMASK: usize = 166;
pub const SYS_PRCTL: usize = 167;
pub const SYS_GETCPU: usize = 168;
pub const SYS_GETTIMEOFDAY: usize = 169;
pub const SYS_SETTIMEOFDAY: usize = 170;
pub const SYS_ADJTIMEX: usize = 171;
pub const SYS_GETPID: usize = 172;
pub const SYS_GETPPID: usize = 173;
pub const SYS_GETUID: usize = 174;
pub const SYS_GETEUID: usize = 175;
pub const SYS_GETGID: usize = 176;
pub const SYS_GETEGID: usize = 177;
pub const SYS_GETTID: usize = 178;
pub const SYS_SYSINFO: usize = 179;
pub const SYS_MQ_OPEN: usize = 180;
pub const SYS_MQ_UNLINK: usize = 181;
pub const SYS_MQ_TIMEDSEND: usize = 182;
pub const SYS_MQ_TIMEDRECEIVE: usize = 183;
pub const SYS_MQ_NOTIFY: usize = 184;
pub const SYS_MQ_GETSETATTR: usize = 185;
pub const SYS_MSGGET: usize = 186;
pub const SYS_MSGCTL: usize = 187;
pub const SYS_MSGRCV: usize = 188;
pub const SYS_MSGSND: usize = 189;
pub const SYS_SEMGET: usize = 190;
pub const SYS_SEMCTL: usize = 191;
pub const SYS_SEMTIMEDOP: usize = 192;
pub const SYS_SEMOP: usize = 193;
pub const SYS_SHMGET: usize = 194;
pub const SYS_SHMCTL: usize = 195;
pub const SYS_SHMAT: usize = 196;
pub const SYS_SHMDT: usize = 197;
pub const SYS_SOCKET: usize = 198;
pub const SYS_SOCKETPAIR: usize = 199;
pub const SYS_BIND: usize = 200;
pub const SYS_LISTEN: usize = 201;
pub const SYS_ACCEPT: usize = 202;
pub const SYS_CONNECT: usize = 203;
pub const SYS_GETSOCKNAME: usize = 204;
pub const SYS_GETPEERNAME: usize = 205;
pub const SYS_SENDTO: usize = 206;
pub const SYS_RECVFROM: usize = 207;
pub const SYS_SETSOCKOPT: usize = 208;
pub const SYS_GETSOCKOPT: usize = 209;
pub const SYS_SHUTDOWN: usize = 210;
pub const SYS_SENDMSG: usize = 211;
pub const SYS_RECVMSG: usize = 212;
pub const SYS_READAHEAD: usize = 213;
pub const SYS_BRK: usize = 214;
pub const SYS_MUNMAP: usize = 215;
pub const SYS_MREMAP: usize = 216;
pub const SYS_ADD_KEY: usize = 217;
pub const SYS_REQUEST_KEY: usize = 218;
pub const SYS_KEYCTL: usize = 219;
pub const SYS_CLONE: usize = 220;
pub const SYS_EXECVE: usize = 221;
pub const SYS_MMAP: usize = 222;
pub const SYS_FADVISE64: usize = 223;
pub const SYS_SWAPON: usize = 224;
pub const SYS_SWAPOFF: usize = 225;
pub const SYS_MPROTECT: usize = 226;
pub const SYS_MSYNC: usize = 227;
pub const SYS_MLOCK: usize = 228;
pub const SYS_MUNLOCK: usize = 229;
pub const SYS_MLOCKALL: usize = 230;
pub const SYS_MUNLOCKALL: usize = 231;
pub const SYS_MINCORE: usize = 232;
pub const SYS_MADVISE: usize = 233;
pub const SYS_REMAP_FILE_PAGES: usize = 234;
pub const SYS_MBIND: usize = 235;
pub const SYS_GET_MEMPOLICY: usize = 236;
pub const SYS_SET_MEMPOLICY: usize = 237;
pub const SYS_MIGRATE_PAGES: usize = 238;
pub const SYS_MOVE_PAGES: usize = 239;
pub const SYS_RT_TGSIGQUEUEINFO: usize = 240;
pub const SYS_PERF_EVENT_OPEN: usize = 241;
pub const SYS_ACCEPT4: usize = 242;
pub const SYS_RECVMMSG: usize = 243;
pub const SYS_ARCH_SPECIFIC_SYSCALL: usize = 244;
pub const SYS_WAIT4: usize = 260;
pub const SYS_PRLIMIT64: usize = 261;
pub const SYS_FANOTIFY_INIT: usize = 262;
pub const SYS_FANOTIFY_MARK: usize = 263;
pub const SYS_NAME_TO_HANDLE_AT: usize = 264;
pub const SYS_OPEN_BY_HANDLE_AT: usize = 265;
pub const SYS_CLOCK_ADJTIME: usize = 266;
pub const SYS_SYNCFS: usize = 267;
pub const SYS_SETNS: usize = 268;
pub const SYS_SENDMMSG: usize = 269;
pub const SYS_PROCESS_VM_READV: usize = 270;
pub const SYS_PROCESS_VM_WRITEV: usize = 271;
pub const SYS_KCMP: usize = 272;
pub const SYS_FINIT_MODULE: usize = 273;
pub const SYS_SCHED_SETATTR: usize = 274;
pub const SYS_SCHED_GETATTR: usize = 275;
pub const SYS_RENAMEAT2: usize = 276;
pub const SYS_SECCOMP: usize = 277;
pub const SYS_GETRANDOM: usize = 278;
pub const SYS_MEMFD_CREATE: usize = 279;
pub const SYS_BPF: usize = 280;
pub const SYS_EXECVEAT: usize = 281;
pub const SYS_USERFAULTFD: usize = 282;
pub const SYS_MEMBARRIER: usize = 283;
pub const SYS_MLOCK2: usize = 284;
pub const SYS_COPY_FILE_RANGE: usize = 285;
pub const SYS_PREADV2: usize = 286;
pub const SYS_PWRITEV2: usize = 287;
pub const SYS_PKEY_MPROTECT: usize = 288;
pub const SYS_PKEY_ALLOC: usize = 289;
pub const SYS_PKEY_FREE: usize = 290;
pub const SYS_SYSRISCV: usize = SYS_ARCH_SPECIFIC_SYSCALL;
pub const SYS_RISCV_FLUSH_ICACHE: usize = SYS_SYSRISCV + 15;
// custom temporary syscall
pub const SYS_MAP_PCI_DEVICE: usize = 999;
pub const SYS_GET_PADDR: usize = 998;

@ -0,0 +1,40 @@
use riscv::register::*;
use super::sbi;
use log::*;
#[cfg(target_pointer_width = "64")]
pub fn get_cycle() -> u64 {
time::read() as u64
}
#[cfg(target_pointer_width = "32")]
pub fn get_cycle() -> u64 {
loop {
let hi = timeh::read();
let lo = time::read();
let tmp = timeh::read();
if hi == tmp {
return ((hi as u64) << 32) | (lo as u64);
}
}
}
pub fn read_epoch() -> u64 {
// TODO: support RTC
0
}
/// Enable timer interrupt
pub fn init() {
// Enable supervisor timer interrupt
unsafe { sie::set_stimer(); }
set_next();
info!("timer: init end");
}
/// Set the next timer interrupt
pub fn set_next() {
// 100Hz @ QEMU
let timebase = 250000;
sbi::set_timer(get_cycle() + timebase);
}

@ -0,0 +1,36 @@
{
"arch": "mips",
"cpu": "mips32r2",
"llvm-target": "mipsel-unknown-none",
"data-layout": "e-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64",
"target-endian": "little",
"target-pointer-width": "32",
"target-c-int-width": "32",
"os": "none",
"features": "+mips32r2,+soft-float",
"max-atomic-width": "32",
"linker": "rust-lld",
"linker-flavor": "ld.lld",
"pre-link-args": {
"ld.lld": [
"-Tsrc/arch/mipsel/boot/linker.ld"
]
},
"executables": true,
"panic-strategy": "abort",
"relocation-model": "static",
"abi-blacklist": [
"cdecl",
"stdcall",
"fastcall",
"vectorcall",
"thiscall",
"aapcs",
"win64",
"sysv64",
"ptx-kernel",
"msp430-interrupt",
"x86-interrupt"
],
"eliminate-frame-pointer": false
}
Loading…
Cancel
Save