commit
1b4edf3bb2
Binary file not shown.
@ -1,14 +1,19 @@
|
|||||||
.section .text.entry
|
.section .text.entry
|
||||||
.globl _start
|
.globl _start
|
||||||
_start:
|
_start:
|
||||||
lui sp, %hi(bootstacktop)
|
add t0, a0, 1
|
||||||
addi sp, sp, %lo(bootstacktop)
|
slli t0, t0, 16
|
||||||
|
|
||||||
|
lui sp, %hi(bootstack)
|
||||||
|
addi sp, sp, %lo(bootstack)
|
||||||
|
add sp, sp, t0
|
||||||
|
|
||||||
call rust_main
|
call rust_main
|
||||||
|
|
||||||
.section .bss
|
.section .bss
|
||||||
.align 12 #PGSHIFT
|
.align 12 #PGSHIFT
|
||||||
.global bootstack
|
.global bootstack
|
||||||
bootstack:
|
bootstack:
|
||||||
.space 4096 * 16 #KSTACKSIZE
|
.space 4096 * 16 * 8
|
||||||
.global bootstacktop
|
.global bootstacktop
|
||||||
bootstacktop:
|
bootstacktop:
|
||||||
|
@ -0,0 +1,49 @@
|
|||||||
|
// http://llvm.org/docs/Atomics.html#libcalls-atomic
|
||||||
|
|
||||||
|
char __atomic_load_1(char *src) {
|
||||||
|
char res = 0;
|
||||||
|
__asm__ __volatile__("amoadd.w.rl %0, zero, (%1)" : "=r"(res) : "r"(src) : "memory");
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
short __atomic_load_2(short *src) {
|
||||||
|
short res = 0;
|
||||||
|
__asm__ __volatile__("amoadd.w.rl %0, zero, (%1)" : "=r"(res) : "r"(src) : "memory");
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
int __atomic_load_4(int *src) {
|
||||||
|
int res = 0;
|
||||||
|
__asm__ __volatile__("amoadd.w.rl %0, zero, (%1)" : "=r"(res) : "r"(src) : "memory");
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
char __atomic_store_1(char *dst, char val) {
|
||||||
|
__asm__ __volatile__("amoswap.w.aq zero, %0, (%1)" :: "r"(val), "r"(dst) : "memory");
|
||||||
|
}
|
||||||
|
|
||||||
|
int __atomic_store_4(int *dst, int val) {
|
||||||
|
__asm__ __volatile__("amoswap.w.aq zero, %0, (%1)" :: "r"(val), "r"(dst) : "memory");
|
||||||
|
}
|
||||||
|
|
||||||
|
char __atomic_compare_exchange_1(char* dst, char* expected, char desired) {
|
||||||
|
char val = 0;
|
||||||
|
__asm__ __volatile__("lr.w %0, (%1)" : "=r"(val) : "r"(dst) : "memory");
|
||||||
|
if (val == *expected) {
|
||||||
|
int sc_ret = 0;
|
||||||
|
__asm__ __volatile__("sc.w %0, %1, (%2)" : "=r"(sc_ret) : "r"(desired), "r"(dst) : "memory");
|
||||||
|
return sc_ret == 0;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
char __atomic_compare_exchange_4(int* dst, int* expected, int desired) {
|
||||||
|
int val = 0;
|
||||||
|
__asm__ __volatile__("lr.w %0, (%1)" : "=r"(val) : "r"(dst) : "memory");
|
||||||
|
if (val == *expected) {
|
||||||
|
int sc_ret = 0;
|
||||||
|
__asm__ __volatile__("sc.w %0, %1, (%2)" : "=r"(sc_ret) : "r"(desired), "r"(dst) : "memory");
|
||||||
|
return sc_ret == 0;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
@ -0,0 +1,14 @@
|
|||||||
|
// Physical address available on THINPAD:
|
||||||
|
// [0x80000000, 0x80800000]
|
||||||
|
const P2_SIZE: usize = 1 << 22;
|
||||||
|
const P2_MASK: usize = 0x3ff << 22;
|
||||||
|
pub const RECURSIVE_PAGE_PML4: usize = 0x3fe;
|
||||||
|
pub const KERNEL_OFFSET: usize = 0;
|
||||||
|
pub const KERNEL_PML4: usize = 0x8000_0000 >> 22;
|
||||||
|
pub const KERNEL_HEAP_OFFSET: usize = 0x8020_0000;
|
||||||
|
pub const KERNEL_HEAP_SIZE: usize = 0x0020_0000;
|
||||||
|
pub const MEMORY_OFFSET: usize = 0x8000_0000;
|
||||||
|
pub const MEMORY_END: usize = 0x8080_0000;
|
||||||
|
pub const USER_STACK_OFFSET: usize = 0x70000000;
|
||||||
|
pub const USER_STACK_SIZE: usize = 0x10000;
|
||||||
|
pub const USER32_STACK_OFFSET: usize = USER_STACK_OFFSET;
|
@ -0,0 +1,31 @@
|
|||||||
|
use consts::MAX_CPU_NUM;
|
||||||
|
use core::ptr::{read_volatile, write_volatile};
|
||||||
|
use memory::*;
|
||||||
|
|
||||||
|
static mut STARTED: [bool; MAX_CPU_NUM] = [false; MAX_CPU_NUM];
|
||||||
|
|
||||||
|
pub unsafe fn set_cpu_id(cpu_id: usize) {
|
||||||
|
unsafe {
|
||||||
|
asm!("mv tp, $0" : : "r"(cpu_id));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub unsafe fn get_cpu_id() -> usize {
|
||||||
|
let mut cpu_id = 0;
|
||||||
|
unsafe {
|
||||||
|
asm!("mv $0, tp" : : "r" (cpu_id));
|
||||||
|
}
|
||||||
|
cpu_id
|
||||||
|
}
|
||||||
|
|
||||||
|
pub unsafe fn has_started(cpu_id: usize) -> bool {
|
||||||
|
read_volatile(&STARTED[cpu_id])
|
||||||
|
}
|
||||||
|
|
||||||
|
pub unsafe fn start_others(hart_mask: usize) {
|
||||||
|
for cpu_id in 0..MAX_CPU_NUM {
|
||||||
|
if (hart_mask >> cpu_id) & 1 != 0 {
|
||||||
|
write_volatile(&mut STARTED[cpu_id], true);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,97 @@
|
|||||||
|
// Copy from Redox consts.rs:
|
||||||
|
|
||||||
|
// Because the memory map is so important to not be aliased, it is defined here, in one place
|
||||||
|
// The lower 256 PML4 entries are reserved for userspace
|
||||||
|
// Each PML4 entry references up to 512 GB of memory
|
||||||
|
// The top (511) PML4 is reserved for recursive mapping
|
||||||
|
// The second from the top (510) PML4 is reserved for the kernel
|
||||||
|
/// The size of a single PML4
|
||||||
|
pub const PML4_SIZE: usize = 0x0000_0080_0000_0000;
|
||||||
|
pub const PML4_MASK: usize = 0x0000_ff80_0000_0000;
|
||||||
|
|
||||||
|
/// Offset of recursive paging
|
||||||
|
pub const RECURSIVE_PAGE_OFFSET: usize = (-(PML4_SIZE as isize)) as usize;
|
||||||
|
pub const RECURSIVE_PAGE_PML4: usize = (RECURSIVE_PAGE_OFFSET & PML4_MASK) / PML4_SIZE;
|
||||||
|
|
||||||
|
/// Offset of kernel
|
||||||
|
pub const KERNEL_OFFSET: usize = RECURSIVE_PAGE_OFFSET - PML4_SIZE;
|
||||||
|
pub const KERNEL_PML4: usize = (KERNEL_OFFSET & PML4_MASK) / PML4_SIZE;
|
||||||
|
|
||||||
|
pub const KERNEL_SIZE: usize = PML4_SIZE;
|
||||||
|
|
||||||
|
/// Offset to kernel heap
|
||||||
|
pub const KERNEL_HEAP_OFFSET: usize = KERNEL_OFFSET - PML4_SIZE;
|
||||||
|
pub const KERNEL_HEAP_PML4: usize = (KERNEL_HEAP_OFFSET & PML4_MASK) / PML4_SIZE;
|
||||||
|
/// Size of kernel heap
|
||||||
|
pub const KERNEL_HEAP_SIZE: usize = 8 * 1024 * 1024; // 8 MB
|
||||||
|
|
||||||
|
pub const MEMORY_OFFSET: usize = 0;
|
||||||
|
|
||||||
|
/// Offset to kernel percpu variables
|
||||||
|
//TODO: Use 64-bit fs offset to enable this pub const KERNEL_PERCPU_OFFSET: usize = KERNEL_HEAP_OFFSET - PML4_SIZE;
|
||||||
|
pub const KERNEL_PERCPU_OFFSET: usize = 0xC000_0000;
|
||||||
|
/// Size of kernel percpu variables
|
||||||
|
pub const KERNEL_PERCPU_SIZE: usize = 64 * 1024; // 64 KB
|
||||||
|
|
||||||
|
/// Offset to user image
|
||||||
|
pub const USER_OFFSET: usize = 0;
|
||||||
|
pub const USER_PML4: usize = (USER_OFFSET & PML4_MASK) / PML4_SIZE;
|
||||||
|
|
||||||
|
/// Offset to user TCB
|
||||||
|
pub const USER_TCB_OFFSET: usize = 0xB000_0000;
|
||||||
|
|
||||||
|
/// Offset to user arguments
|
||||||
|
pub const USER_ARG_OFFSET: usize = USER_OFFSET + PML4_SIZE / 2;
|
||||||
|
|
||||||
|
/// Offset to user heap
|
||||||
|
pub const USER_HEAP_OFFSET: usize = USER_OFFSET + PML4_SIZE;
|
||||||
|
pub const USER_HEAP_PML4: usize = (USER_HEAP_OFFSET & PML4_MASK) / PML4_SIZE;
|
||||||
|
|
||||||
|
/// Offset to user grants
|
||||||
|
pub const USER_GRANT_OFFSET: usize = USER_HEAP_OFFSET + PML4_SIZE;
|
||||||
|
pub const USER_GRANT_PML4: usize = (USER_GRANT_OFFSET & PML4_MASK) / PML4_SIZE;
|
||||||
|
|
||||||
|
/// Offset to user stack
|
||||||
|
pub const USER_STACK_OFFSET: usize = USER_GRANT_OFFSET + PML4_SIZE;
|
||||||
|
pub const USER32_STACK_OFFSET: usize = 0xB000_0000;
|
||||||
|
pub const USER_STACK_PML4: usize = (USER_STACK_OFFSET & PML4_MASK) / PML4_SIZE;
|
||||||
|
/// Size of user stack
|
||||||
|
pub const USER_STACK_SIZE: usize = 1024 * 1024; // 1 MB
|
||||||
|
|
||||||
|
/// Offset to user sigstack
|
||||||
|
pub const USER_SIGSTACK_OFFSET: usize = USER_STACK_OFFSET + PML4_SIZE;
|
||||||
|
pub const USER_SIGSTACK_PML4: usize = (USER_SIGSTACK_OFFSET & PML4_MASK) / PML4_SIZE;
|
||||||
|
/// Size of user sigstack
|
||||||
|
pub const USER_SIGSTACK_SIZE: usize = 256 * 1024; // 256 KB
|
||||||
|
|
||||||
|
/// Offset to user TLS
|
||||||
|
pub const USER_TLS_OFFSET: usize = USER_SIGSTACK_OFFSET + PML4_SIZE;
|
||||||
|
pub const USER_TLS_PML4: usize = (USER_TLS_OFFSET & PML4_MASK) / PML4_SIZE;
|
||||||
|
|
||||||
|
/// Offset to user temporary image (used when cloning)
|
||||||
|
pub const USER_TMP_OFFSET: usize = USER_TLS_OFFSET + PML4_SIZE;
|
||||||
|
pub const USER_TMP_PML4: usize = (USER_TMP_OFFSET & PML4_MASK) / PML4_SIZE;
|
||||||
|
|
||||||
|
/// Offset to user temporary heap (used when cloning)
|
||||||
|
pub const USER_TMP_HEAP_OFFSET: usize = USER_TMP_OFFSET + PML4_SIZE;
|
||||||
|
pub const USER_TMP_HEAP_PML4: usize = (USER_TMP_HEAP_OFFSET & PML4_MASK) / PML4_SIZE;
|
||||||
|
|
||||||
|
/// Offset to user temporary page for grants
|
||||||
|
pub const USER_TMP_GRANT_OFFSET: usize = USER_TMP_HEAP_OFFSET + PML4_SIZE;
|
||||||
|
pub const USER_TMP_GRANT_PML4: usize = (USER_TMP_GRANT_OFFSET & PML4_MASK) / PML4_SIZE;
|
||||||
|
|
||||||
|
/// Offset to user temporary stack (used when cloning)
|
||||||
|
pub const USER_TMP_STACK_OFFSET: usize = USER_TMP_GRANT_OFFSET + PML4_SIZE;
|
||||||
|
pub const USER_TMP_STACK_PML4: usize = (USER_TMP_STACK_OFFSET & PML4_MASK) / PML4_SIZE;
|
||||||
|
|
||||||
|
/// Offset to user temporary sigstack (used when cloning)
|
||||||
|
pub const USER_TMP_SIGSTACK_OFFSET: usize = USER_TMP_STACK_OFFSET + PML4_SIZE;
|
||||||
|
pub const USER_TMP_SIGSTACK_PML4: usize = (USER_TMP_SIGSTACK_OFFSET & PML4_MASK) / PML4_SIZE;
|
||||||
|
|
||||||
|
/// Offset to user temporary tls (used when cloning)
|
||||||
|
pub const USER_TMP_TLS_OFFSET: usize = USER_TMP_SIGSTACK_OFFSET + PML4_SIZE;
|
||||||
|
pub const USER_TMP_TLS_PML4: usize = (USER_TMP_TLS_OFFSET & PML4_MASK) / PML4_SIZE;
|
||||||
|
|
||||||
|
/// Offset for usage in other temporary pages
|
||||||
|
pub const USER_TMP_MISC_OFFSET: usize = USER_TMP_TLS_OFFSET + PML4_SIZE;
|
||||||
|
pub const USER_TMP_MISC_PML4: usize = (USER_TMP_MISC_OFFSET & PML4_MASK) / PML4_SIZE;
|
@ -1,128 +1,6 @@
|
|||||||
#![allow(dead_code)]
|
#![allow(dead_code)]
|
||||||
|
|
||||||
#[cfg(target_arch = "riscv32")]
|
pub use arch::consts::*;
|
||||||
pub use self::riscv::*;
|
|
||||||
#[cfg(target_arch = "x86_64")]
|
|
||||||
pub use self::x86_64::*;
|
|
||||||
|
|
||||||
pub const MAX_CPU_NUM: usize = 8;
|
pub const MAX_CPU_NUM: usize = 8;
|
||||||
pub const MAX_PROCESS_NUM: usize = 48;
|
pub const MAX_PROCESS_NUM: usize = 48;
|
||||||
|
|
||||||
#[cfg(target_arch = "riscv32")]
|
|
||||||
mod riscv {
|
|
||||||
// Physical address available on THINPAD:
|
|
||||||
// [0x80000000, 0x80800000]
|
|
||||||
const P2_SIZE: usize = 1 << 22;
|
|
||||||
const P2_MASK: usize = 0x3ff << 22;
|
|
||||||
pub const RECURSIVE_PAGE_PML4: usize = 0x3fe;
|
|
||||||
pub const KERNEL_OFFSET: usize = 0;
|
|
||||||
pub const KERNEL_PML4: usize = 0x8000_0000 >> 22;
|
|
||||||
pub const KERNEL_HEAP_OFFSET: usize = 0x8020_0000;
|
|
||||||
pub const KERNEL_HEAP_SIZE: usize = 0x0020_0000;
|
|
||||||
pub const MEMORY_OFFSET: usize = 0x8000_0000;
|
|
||||||
pub const MEMORY_END: usize = 0x8080_0000;
|
|
||||||
pub const USER_STACK_OFFSET: usize = 0x70000000;
|
|
||||||
pub const USER_STACK_SIZE: usize = 0x10000;
|
|
||||||
pub const USER32_STACK_OFFSET: usize = USER_STACK_OFFSET;
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(target_arch = "x86_64")]
|
|
||||||
mod x86_64 {
|
|
||||||
// Copy from Redox consts.rs:
|
|
||||||
|
|
||||||
// Because the memory map is so important to not be aliased, it is defined here, in one place
|
|
||||||
// The lower 256 PML4 entries are reserved for userspace
|
|
||||||
// Each PML4 entry references up to 512 GB of memory
|
|
||||||
// The top (511) PML4 is reserved for recursive mapping
|
|
||||||
// The second from the top (510) PML4 is reserved for the kernel
|
|
||||||
/// The size of a single PML4
|
|
||||||
pub const PML4_SIZE: usize = 0x0000_0080_0000_0000;
|
|
||||||
pub const PML4_MASK: usize = 0x0000_ff80_0000_0000;
|
|
||||||
|
|
||||||
/// Offset of recursive paging
|
|
||||||
pub const RECURSIVE_PAGE_OFFSET: usize = (-(PML4_SIZE as isize)) as usize;
|
|
||||||
pub const RECURSIVE_PAGE_PML4: usize = (RECURSIVE_PAGE_OFFSET & PML4_MASK) / PML4_SIZE;
|
|
||||||
|
|
||||||
/// Offset of kernel
|
|
||||||
pub const KERNEL_OFFSET: usize = RECURSIVE_PAGE_OFFSET - PML4_SIZE;
|
|
||||||
pub const KERNEL_PML4: usize = (KERNEL_OFFSET & PML4_MASK) / PML4_SIZE;
|
|
||||||
|
|
||||||
pub const KERNEL_SIZE: usize = PML4_SIZE;
|
|
||||||
|
|
||||||
/// Offset to kernel heap
|
|
||||||
pub const KERNEL_HEAP_OFFSET: usize = KERNEL_OFFSET - PML4_SIZE;
|
|
||||||
pub const KERNEL_HEAP_PML4: usize = (KERNEL_HEAP_OFFSET & PML4_MASK) / PML4_SIZE;
|
|
||||||
/// Size of kernel heap
|
|
||||||
pub const KERNEL_HEAP_SIZE: usize = 8 * 1024 * 1024; // 8 MB
|
|
||||||
|
|
||||||
pub const MEMORY_OFFSET: usize = 0;
|
|
||||||
|
|
||||||
/// Offset to kernel percpu variables
|
|
||||||
//TODO: Use 64-bit fs offset to enable this pub const KERNEL_PERCPU_OFFSET: usize = KERNEL_HEAP_OFFSET - PML4_SIZE;
|
|
||||||
pub const KERNEL_PERCPU_OFFSET: usize = 0xC000_0000;
|
|
||||||
/// Size of kernel percpu variables
|
|
||||||
pub const KERNEL_PERCPU_SIZE: usize = 64 * 1024; // 64 KB
|
|
||||||
|
|
||||||
/// Offset to user image
|
|
||||||
pub const USER_OFFSET: usize = 0;
|
|
||||||
pub const USER_PML4: usize = (USER_OFFSET & PML4_MASK) / PML4_SIZE;
|
|
||||||
|
|
||||||
/// Offset to user TCB
|
|
||||||
pub const USER_TCB_OFFSET: usize = 0xB000_0000;
|
|
||||||
|
|
||||||
/// Offset to user arguments
|
|
||||||
pub const USER_ARG_OFFSET: usize = USER_OFFSET + PML4_SIZE / 2;
|
|
||||||
|
|
||||||
/// Offset to user heap
|
|
||||||
pub const USER_HEAP_OFFSET: usize = USER_OFFSET + PML4_SIZE;
|
|
||||||
pub const USER_HEAP_PML4: usize = (USER_HEAP_OFFSET & PML4_MASK) / PML4_SIZE;
|
|
||||||
|
|
||||||
/// Offset to user grants
|
|
||||||
pub const USER_GRANT_OFFSET: usize = USER_HEAP_OFFSET + PML4_SIZE;
|
|
||||||
pub const USER_GRANT_PML4: usize = (USER_GRANT_OFFSET & PML4_MASK) / PML4_SIZE;
|
|
||||||
|
|
||||||
/// Offset to user stack
|
|
||||||
pub const USER_STACK_OFFSET: usize = USER_GRANT_OFFSET + PML4_SIZE;
|
|
||||||
pub const USER32_STACK_OFFSET: usize = 0xB000_0000;
|
|
||||||
pub const USER_STACK_PML4: usize = (USER_STACK_OFFSET & PML4_MASK) / PML4_SIZE;
|
|
||||||
/// Size of user stack
|
|
||||||
pub const USER_STACK_SIZE: usize = 1024 * 1024; // 1 MB
|
|
||||||
|
|
||||||
/// Offset to user sigstack
|
|
||||||
pub const USER_SIGSTACK_OFFSET: usize = USER_STACK_OFFSET + PML4_SIZE;
|
|
||||||
pub const USER_SIGSTACK_PML4: usize = (USER_SIGSTACK_OFFSET & PML4_MASK) / PML4_SIZE;
|
|
||||||
/// Size of user sigstack
|
|
||||||
pub const USER_SIGSTACK_SIZE: usize = 256 * 1024; // 256 KB
|
|
||||||
|
|
||||||
/// Offset to user TLS
|
|
||||||
pub const USER_TLS_OFFSET: usize = USER_SIGSTACK_OFFSET + PML4_SIZE;
|
|
||||||
pub const USER_TLS_PML4: usize = (USER_TLS_OFFSET & PML4_MASK) / PML4_SIZE;
|
|
||||||
|
|
||||||
/// Offset to user temporary image (used when cloning)
|
|
||||||
pub const USER_TMP_OFFSET: usize = USER_TLS_OFFSET + PML4_SIZE;
|
|
||||||
pub const USER_TMP_PML4: usize = (USER_TMP_OFFSET & PML4_MASK) / PML4_SIZE;
|
|
||||||
|
|
||||||
/// Offset to user temporary heap (used when cloning)
|
|
||||||
pub const USER_TMP_HEAP_OFFSET: usize = USER_TMP_OFFSET + PML4_SIZE;
|
|
||||||
pub const USER_TMP_HEAP_PML4: usize = (USER_TMP_HEAP_OFFSET & PML4_MASK) / PML4_SIZE;
|
|
||||||
|
|
||||||
/// Offset to user temporary page for grants
|
|
||||||
pub const USER_TMP_GRANT_OFFSET: usize = USER_TMP_HEAP_OFFSET + PML4_SIZE;
|
|
||||||
pub const USER_TMP_GRANT_PML4: usize = (USER_TMP_GRANT_OFFSET & PML4_MASK) / PML4_SIZE;
|
|
||||||
|
|
||||||
/// Offset to user temporary stack (used when cloning)
|
|
||||||
pub const USER_TMP_STACK_OFFSET: usize = USER_TMP_GRANT_OFFSET + PML4_SIZE;
|
|
||||||
pub const USER_TMP_STACK_PML4: usize = (USER_TMP_STACK_OFFSET & PML4_MASK) / PML4_SIZE;
|
|
||||||
|
|
||||||
/// Offset to user temporary sigstack (used when cloning)
|
|
||||||
pub const USER_TMP_SIGSTACK_OFFSET: usize = USER_TMP_STACK_OFFSET + PML4_SIZE;
|
|
||||||
pub const USER_TMP_SIGSTACK_PML4: usize = (USER_TMP_SIGSTACK_OFFSET & PML4_MASK) / PML4_SIZE;
|
|
||||||
|
|
||||||
/// Offset to user temporary tls (used when cloning)
|
|
||||||
pub const USER_TMP_TLS_OFFSET: usize = USER_TMP_SIGSTACK_OFFSET + PML4_SIZE;
|
|
||||||
pub const USER_TMP_TLS_PML4: usize = (USER_TMP_TLS_OFFSET & PML4_MASK) / PML4_SIZE;
|
|
||||||
|
|
||||||
/// Offset for usage in other temporary pages
|
|
||||||
pub const USER_TMP_MISC_OFFSET: usize = USER_TMP_TLS_OFFSET + PML4_SIZE;
|
|
||||||
pub const USER_TMP_MISC_PML4: usize = (USER_TMP_MISC_OFFSET & PML4_MASK) / PML4_SIZE;
|
|
||||||
}
|
|
@ -0,0 +1,4 @@
|
|||||||
|
pub struct cpu {
|
||||||
|
pub id: usize
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,44 @@
|
|||||||
|
//! RISCV atomic is not currently supported by Rust.
|
||||||
|
//! This is a ugly workaround.
|
||||||
|
|
||||||
|
use core::cell::UnsafeCell;
|
||||||
|
|
||||||
|
extern {
|
||||||
|
fn __atomic_load_4(src: *const u32) -> u32;
|
||||||
|
fn __atomic_store_4(dst: *mut u32, val: u32);
|
||||||
|
fn __atomic_compare_exchange_4(dst: *mut u32, expected: *mut u32, desired: u32) -> bool;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct AtomicLock
|
||||||
|
{
|
||||||
|
lock: UnsafeCell<u32>
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AtomicLock
|
||||||
|
{
|
||||||
|
pub fn new() -> Self {
|
||||||
|
AtomicLock {
|
||||||
|
lock: UnsafeCell::new(0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns 1 if lock is acquired
|
||||||
|
pub fn try_lock(&self) -> bool {
|
||||||
|
let mut expected: u32 = 0;
|
||||||
|
unsafe {
|
||||||
|
__atomic_compare_exchange_4(self.lock.get(), &mut expected as *mut u32, 1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn load(&self) -> bool {
|
||||||
|
unsafe {
|
||||||
|
__atomic_load_4(self.lock.get()) == 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn store(&self) {
|
||||||
|
unsafe {
|
||||||
|
__atomic_store_4(self.lock.get(), 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,31 @@
|
|||||||
|
use core::sync::atomic::{AtomicBool, Ordering};
|
||||||
|
|
||||||
|
pub struct AtomicLock
|
||||||
|
{
|
||||||
|
lock: AtomicBool
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AtomicLock
|
||||||
|
{
|
||||||
|
pub fn new() -> AtomicLock {
|
||||||
|
AtomicLock {
|
||||||
|
lock: AtomicBool::new(false)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn try_lock(&self) -> bool {
|
||||||
|
self.lock.compare_and_swap(false, true, Ordering::Acquire) == false
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn load(&self) -> bool {
|
||||||
|
self.lock.load(Ordering::Relaxed)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn store(&self) {
|
||||||
|
self.lock.store(false, Ordering::Release);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub const ATOMIC_LOCK_INIT: AtomicLock = AtomicLock {
|
||||||
|
lock: AtomicBool::new(false)
|
||||||
|
};
|
Loading…
Reference in new issue