added workaround for atomic ops

toolchain_update
maoyuchaxue 6 years ago
parent cfda03a0f2
commit f7b7b1bcd6

@ -77,6 +77,7 @@ endif
ld := $(prefix)ld ld := $(prefix)ld
objdump := $(prefix)objdump objdump := $(prefix)objdump
cc := $(prefix)gcc cc := $(prefix)gcc
CC := $(cc)
as := $(prefix)as as := $(prefix)as
.PHONY: all clean run build asm doc justrun kernel .PHONY: all clean run build asm doc justrun kernel

@ -12,6 +12,12 @@ fn main() {
// .compile("cobj"); // .compile("cobj");
gen_vector_asm().unwrap(); gen_vector_asm().unwrap();
} }
if std::env::var("TARGET").unwrap().find("riscv32").is_some() {
cc::Build::new()
.file("src/arch/riscv32/compiler_rt.c")
.flag("-march=rv32ima")
.compile("atomic_rt");
}
} }
fn gen_vector_asm() -> Result<()> { fn gen_vector_asm() -> Result<()> {

@ -0,0 +1,57 @@
// fn __atomic_load_1_workaround(src: *const u8) -> u8;
// fn __atomic_load_2_workaround(src: *const u16) -> u16;
// fn __atomic_load_4_workaround(src: *const u32) -> u32;
// fn __atomic_store_1_workaround(dst: *mut u8, val: u8);
// fn __atomic_store_4_workaround(dst: *mut u32, val: u32);
// fn __atomic_compare_exchange_1_workaround(dst: *mut u8, expected: *mut u8, desired: u8) -> bool;
// fn __atomic_compare_exchange_4_workaround(dst: *mut u32, expected: *mut u32, desired: u32) -> bool;
char __atomic_load_1_workaround(char *src) {
char res = 0;
__asm__ __volatile__("amoadd.w.rl %0, zero, (%1)" : "=r"(res) : "r"(src) : "memory");
return res;
}
short __atomic_load_2_workaround(short *src) {
short res = 0;
__asm__ __volatile__("amoadd.w.rl %0, zero, (%1)" : "=r"(res) : "r"(src) : "memory");
return res;
}
int __atomic_load_4_workaround(int *src) {
int res = 0;
__asm__ __volatile__("amoadd.w.rl %0, zero, (%1)" : "=r"(res) : "r"(src) : "memory");
return res;
}
char __atomic_store_1_workaround(char *dst, char val) {
__asm__ __volatile__("amoswap.w.aq zero, %0, (%1)" :: "r"(val), "r"(dst) : "memory");
}
int __atomic_store_4_workaround(int *dst, int val) {
__asm__ __volatile__("amoswap.w.aq zero, %0, (%1)" :: "r"(val), "r"(dst) : "memory");
}
char __atomic_compare_exchange_1_workaround(char* dst, char* expected, char desired) {
char val = 0;
__asm__ __volatile__("lr.w %0, (%1)" : "=r"(val) : "r"(dst) : "memory");
if (val == *expected) {
int sc_ret = 0;
__asm__ __volatile__("sc.w %0, %1, (%2)" : "=r"(sc_ret) : "r"(desired), "r"(dst) : "memory");
return sc_ret == 0;
}
return 0;
}
char __atomic_compare_exchange_4_workaround(int* dst, int* expected, int desired) {
int val = 0;
__asm__ __volatile__("lr.w %0, (%1)" : "=r"(val) : "r"(dst) : "memory");
if (val == *expected) {
int sc_ret = 0;
__asm__ __volatile__("sc.w %0, %1, (%2)" : "=r"(sc_ret) : "r"(desired), "r"(dst) : "memory");
return sc_ret == 0;
}
return 0;
}

@ -2,6 +2,17 @@
//! //!
//! [atomic](http://llvm.org/docs/Atomics.html#libcalls-atomic) //! [atomic](http://llvm.org/docs/Atomics.html#libcalls-atomic)
#[link(name = "atomic_rt")]
extern {
fn __atomic_load_1_workaround(src: *const u8) -> u8;
fn __atomic_load_2_workaround(src: *const u16) -> u16;
fn __atomic_load_4_workaround(src: *const u32) -> u32;
fn __atomic_store_1_workaround(dst: *mut u8, val: u8);
fn __atomic_store_4_workaround(dst: *mut u32, val: u32);
fn __atomic_compare_exchange_1_workaround(dst: *mut u8, expected: *mut u8, desired: u8) -> bool;
fn __atomic_compare_exchange_4_workaround(dst: *mut u32, expected: *mut u32, desired: u32) -> bool;
}
/// Copy from: /// Copy from:
/// https://github.com/rust-lang-nursery/compiler-builtins/blob/master/src/riscv32.rs /// https://github.com/rust-lang-nursery/compiler-builtins/blob/master/src/riscv32.rs
#[no_mangle] #[no_mangle]
@ -28,59 +39,53 @@ use core::ptr::{read, write};
#[no_mangle] #[no_mangle]
pub unsafe extern fn __atomic_load_1(src: *const u8) -> u8 { pub unsafe extern fn __atomic_load_1(src: *const u8) -> u8 {
let mut res: u8 = 0; __atomic_load_1_workaround(src)
asm!("amoadd.w.rl $0, zero, ($1)" : "=r"(res) : "r"(src) : "memory" : "volatile");
res
} }
#[no_mangle] #[no_mangle]
pub unsafe extern fn __atomic_load_2(src: *const u16) -> u16 { pub unsafe extern fn __atomic_load_2(src: *const u16) -> u16 {
let mut res: u16 = 0; __atomic_load_2_workaround(src)
asm!("amoadd.w.rl $0, zero, ($1)" : "=r"(res) : "r"(src) : "memory" : "volatile");
res
} }
#[no_mangle] #[no_mangle]
pub unsafe extern fn __atomic_load_4(src: *const u32) -> u32 { pub unsafe extern fn __atomic_load_4(src: *const u32) -> u32 {
let mut res: u32 = 0; __atomic_load_4_workaround(src)
asm!("amoadd.w.rl $0, zero, ($1)" : "=r"(res) : "r"(src) : "memory" : "volatile");
res
} }
#[no_mangle] #[no_mangle]
pub unsafe extern fn __atomic_store_1(dst: *mut u8, val: u8) { pub unsafe extern fn __atomic_store_1(dst: *mut u8, val: u8) {
asm!("amoswap.w.aq zero, $0, ($1)" :: "r"(val), "r"(dst) : "memory" : "volatile"); __atomic_store_1_workaround(dst, val);
} }
#[no_mangle] #[no_mangle]
pub unsafe extern fn __atomic_store_4(dst: *mut u32, val: u32) { pub unsafe extern fn __atomic_store_4(dst: *mut u32, val: u32) {
asm!("amoswap.w.aq zero, $0, ($1)" :: "r"(val), "r"(dst) : "memory" : "volatile"); __atomic_store_4_workaround(dst, val);
} }
unsafe fn __atomic_compare_exchange<T: PartialEq>(dst: *mut T, expected: *mut T, desired: T) -> bool { // unsafe fn __atomic_compare_exchange<T: PartialEq>(dst: *mut T, expected: *mut T, desired: T) -> bool {
// use super::interrupt; // // use super::interrupt;
// let flags = interrupt::disable_and_store(); // // let flags = interrupt::disable_and_store();
// let val = read(dst); // // let val = read(dst);
// let success = val == read(expected); // // let success = val == read(expected);
// write(dst, if success {desired} else {val}); // // write(dst, if success {desired} else {val});
// interrupt::restore(flags); // // interrupt::restore(flags);
// success // // success
let mut val: T; // // let mut val: T;
asm!("lr.w $0, ($1)" : "=r"(val) : "r"(dst) : "memory" : "volatile"); // // asm!("lr.w $0, ($1)" : "=r"(val) : "r"(dst) : "memory" : "volatile");
if val == *expected { // // if val == *expected {
let mut sc_ret = 0; // // let mut sc_ret = 0;
asm!("sc.w $0, $1, ($2)" : "=r"(sc_ret) : "r"(desired), "r"(dst) : "memory" : "volatile"); // // asm!("sc.w $0, $1, ($2)" : "=r"(sc_ret) : "r"(desired), "r"(dst) : "memory" : "volatile");
return sc_ret == 0 // // return sc_ret == 0
} // // }
false // false
} // }
#[no_mangle] #[no_mangle]
pub unsafe extern fn __atomic_compare_exchange_1(dst: *mut u8, expected: *mut u8, desired: u8) -> bool { pub unsafe extern fn __atomic_compare_exchange_1(dst: *mut u8, expected: *mut u8, desired: u8) -> bool {
__atomic_compare_exchange(dst, expected, desired) __atomic_compare_exchange_1_workaround(dst, expected, desired)
} }
#[no_mangle] #[no_mangle]
pub unsafe extern fn __atomic_compare_exchange_4(dst: *mut u32, expected: *mut u32, desired: u32) -> bool { pub unsafe extern fn __atomic_compare_exchange_4(dst: *mut u32, expected: *mut u32, desired: u32) -> bool {
__atomic_compare_exchange(dst, expected, desired) __atomic_compare_exchange_4_workaround(dst, expected, desired)
} }

@ -1,8 +1,10 @@
use core::fmt; use core::fmt;
use log::{self, Level, LevelFilter, Log, Metadata, Record}; use log::{self, Level, LevelFilter, Log, Metadata, Record};
use spin::Mutex; use sync::SpinLock as Mutex;
static log_mutex: Mutex<()> = Mutex::new(()); lazy_static! {
static ref log_mutex: Mutex<()> = Mutex::new(());
}
pub fn init() { pub fn init() {
static LOGGER: SimpleLogger = SimpleLogger; static LOGGER: SimpleLogger = SimpleLogger;

@ -0,0 +1,40 @@
//! RISCV atomic is not currently supported by Rust.
//! This is a ugly workaround.
use arch::compiler_rt::{__atomic_compare_exchange_4, __atomic_store_4, __atomic_load_4};
use core::cell::UnsafeCell;
pub struct AtomicLock
{
lock: UnsafeCell<u32>
}
impl AtomicLock
{
pub fn new() -> Self {
AtomicLock {
lock: UnsafeCell::new(0)
}
}
/// Returns 1 if lock is acquired
pub fn try_lock(&self) -> bool {
let mut expected: u32 = 0;
unsafe {
__atomic_compare_exchange_4(self.lock.get(), &mut expected as *mut u32, 1)
}
}
pub fn load(&self) -> bool {
unsafe {
__atomic_load_4(self.lock.get()) == 1
}
}
pub fn store(&self) {
unsafe {
__atomic_store_4(self.lock.get(), 0);
}
}
}

@ -0,0 +1,30 @@
pub struct AtomicLock
{
lock: usize
}
impl AtomicLock
{
pub fn new() -> AtomicLock {
AtomicLock {
lock: ATOMIC_BOOL_INIT
}
}
pub fn try_lock(&self) -> bool {
self.lock.compare_and_swap(false, true, Ordering::Acquire) == false
}
pub fn load(&self) -> bool {
self.lock.load(Ordering::Relaxed)
}
pub fn store(&self) {
self.lock.store(false, Ordering::Release);
}
}
pub const ATOMIC_LOCK_INIT: AtomicLock = AtomicLock {
lock: ATOMIC_BOOL_INIT
};

@ -53,6 +53,15 @@ pub use self::condvar::*;
pub use self::mutex::*; pub use self::mutex::*;
pub use self::semaphore::*; pub use self::semaphore::*;
#[allow(dead_code)]
#[cfg(target_arch = "x86_64")]
#[path = "arch/x86_64/atomic_lock.rs"]
pub mod atomic_lock;
#[cfg(target_arch = "riscv32")]
#[path = "arch/riscv32/atomic_lock.rs"]
pub mod atomic_lock;
mod mutex; mod mutex;
mod condvar; mod condvar;
mod semaphore; mod semaphore;

@ -30,8 +30,8 @@ use arch::interrupt;
use core::cell::UnsafeCell; use core::cell::UnsafeCell;
use core::fmt; use core::fmt;
use core::ops::{Deref, DerefMut}; use core::ops::{Deref, DerefMut};
use core::sync::atomic::{ATOMIC_BOOL_INIT, AtomicBool, Ordering};
use super::Condvar; use super::Condvar;
use super::atomic_lock::AtomicLock;
pub type SpinLock<T> = Mutex<T, Spin>; pub type SpinLock<T> = Mutex<T, Spin>;
pub type SpinNoIrqLock<T> = Mutex<T, SpinNoIrq>; pub type SpinNoIrqLock<T> = Mutex<T, SpinNoIrq>;
@ -39,7 +39,7 @@ pub type ThreadLock<T> = Mutex<T, Condvar>;
pub struct Mutex<T: ?Sized, S: MutexSupport> pub struct Mutex<T: ?Sized, S: MutexSupport>
{ {
lock: AtomicBool, lock: AtomicLock,
support: S, support: S,
data: UnsafeCell<T>, data: UnsafeCell<T>,
} }
@ -78,7 +78,7 @@ impl<T, S: MutexSupport> Mutex<T, S>
/// ``` /// ```
pub fn new(user_data: T) -> Mutex<T, S> { pub fn new(user_data: T) -> Mutex<T, S> {
Mutex { Mutex {
lock: ATOMIC_BOOL_INIT, lock: AtomicLock::new(),
data: UnsafeCell::new(user_data), data: UnsafeCell::new(user_data),
support: S::new(), support: S::new(),
} }
@ -96,9 +96,9 @@ impl<T, S: MutexSupport> Mutex<T, S>
impl<T: ?Sized, S: MutexSupport> Mutex<T, S> impl<T: ?Sized, S: MutexSupport> Mutex<T, S>
{ {
fn obtain_lock(&self) { fn obtain_lock(&self) {
while self.lock.compare_and_swap(false, true, Ordering::Acquire) != false { while !self.lock.try_lock() {
// Wait until the lock looks unlocked before retrying // Wait until the lock looks unlocked before retrying
while self.lock.load(Ordering::Relaxed) { while self.lock.load() {
self.support.cpu_relax(); self.support.cpu_relax();
} }
} }
@ -137,14 +137,14 @@ impl<T: ?Sized, S: MutexSupport> Mutex<T, S>
/// ///
/// If the lock isn't held, this is a no-op. /// If the lock isn't held, this is a no-op.
pub unsafe fn force_unlock(&self) { pub unsafe fn force_unlock(&self) {
self.lock.store(false, Ordering::Release); self.lock.store();
} }
/// Tries to lock the mutex. If it is already locked, it will return None. Otherwise it returns /// Tries to lock the mutex. If it is already locked, it will return None. Otherwise it returns
/// a guard within Some. /// a guard within Some.
pub fn try_lock(&self) -> Option<MutexGuard<T, S>> { pub fn try_lock(&self) -> Option<MutexGuard<T, S>> {
let support_guard = S::before_lock(); let support_guard = S::before_lock();
if self.lock.compare_and_swap(false, true, Ordering::Acquire) == false { if self.lock.try_lock() {
Some(MutexGuard { Some(MutexGuard {
mutex: self, mutex: self,
support_guard, support_guard,
@ -186,7 +186,7 @@ impl<'a, T: ?Sized, S: MutexSupport> Drop for MutexGuard<'a, T, S>
{ {
/// The dropping of the MutexGuard will release the lock it was created from. /// The dropping of the MutexGuard will release the lock it was created from.
fn drop(&mut self) { fn drop(&mut self) {
self.mutex.lock.store(false, Ordering::Release); self.mutex.lock.store();
self.mutex.support.after_unlock(); self.mutex.support.after_unlock();
} }
} }

Loading…
Cancel
Save