yet another riscv atomic fix

master
WangRunji 6 years ago
parent 5d7e97d9e9
commit 190711fbc2

@ -12,11 +12,11 @@ set -e
if [[ ${RV32} = 1 ]]; then
TARGET_ARCH=riscv32
COMPILER_RT_CFLAGS="-march=rv32ia -mabi=ilp32 -O3"
COMPILER_RT_CFLAGS="-march=rv32imac -mabi=ilp32 -O3"
SFSIMG_CFLAGS="-march=rv32ia -mabi=ilp32"
else
TARGET_ARCH=riscv64
COMPILER_RT_CFLAGS="-march=rv64ia -mabi=lp64 -O3"
COMPILER_RT_CFLAGS="-march=rv64imac -mabi=lp64 -O3"
SFSIMG_CFLAGS="-march=rv64ia -mabi=lp64"
fi
UCORE_USER_IMAGE="../user/img/ucore-${TARGET_ARCH}.img"
@ -60,7 +60,7 @@ fi
fi
# if some crates are not exist, build for riscv32 first
if ! [[ -f $CARGO_PATH/git/checkouts/bit-vec-437fa4a002bd318d/9861a58*/src/lib.rs ]]
if ! [[ -f $CARGO_PATH/git/checkouts/bit-vec-437fa4a002bd318d/9861a58/src/lib.rs ]]
then
make kernel arch=riscv32 board=none
fi

@ -10,24 +10,21 @@ fn main() {
let arch: String = std::env::var("ARCH").unwrap();
match arch.as_str() {
"x86_64" => {
// cc::Build::new()
// .file("src/arch/x86_64/driver/apic/lapic.c")
// .file("src/arch/x86_64/driver/keyboard/keyboard.c")
// .flag("-mcmodel=large")
// .compile("cobj");
gen_vector_asm().unwrap();
}
"riscv32" => {
println!("cargo:rerun-if-changed=src/arch/riscv32/compiler_rt.c");
cc::Build::new()
.file("src/arch/riscv32/compiler_rt.c")
.flag("-march=rv32ia")
.flag("-march=rv32imac")
.flag("-mabi=ilp32")
.flag("-Wno-builtin-declaration-mismatch")
.flag("-O3")
.compile("atomic_rt");
if let Ok(file_path) = gen_sfsimg_asm() {
cc::Build::new()
.file(&file_path)
.flag("-march=rv32ia")
.flag("-march=rv32imac")
.flag("-mabi=ilp32")
.compile("sfsimg");
}

@ -10,7 +10,7 @@
v: UnsafeCell<u8>,
}
@@ -265,6 +268,44 @@
@@ -265,6 +268,59 @@
pub const ATOMIC_BOOL_INIT: AtomicBool = AtomicBool::new(false);
#[cfg(target_has_atomic = "8")]
@ -40,12 +40,27 @@
+ ///
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[cfg(target_has_atomic = "cas")]
+ pub fn compare_and_swap(&self, current: bool, new: bool, order: Ordering) -> bool {
+ loop {
+ if let Ok(val) = unsafe { atomic_compare_exchange(self.v.get(), current as u32, new as u32, order, order) } {
+ return val != 0;
+ }
+ match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) {
+ Ok(x) => x,
+ Err(x) => x,
+ }
+ }
+
+ ///
+ #[inline]
+ #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
+ pub fn compare_exchange(&self,
+ current: bool,
+ new: bool,
+ success: Ordering,
+ failure: Ordering)
+ -> Result<bool, bool> {
+ match unsafe {
+ atomic_compare_exchange(self.v.get(), current as u32, new as u32, success, failure)
+ } {
+ Ok(x) => Ok(x != 0),
+ Err(x) => Err(x != 0),
+ }
+ }
+}

@ -1,60 +1,62 @@
// http://llvm.org/docs/Atomics.html#libcalls-atomic
inline void mb() {
__asm__ __volatile__("fence" ::: "memory");
}
typedef unsigned u32;
// K210 doesn't support atomic operation on 0x40000000 (io port)
// We have to detect it and move it to 0x80000000
inline u32* fix_ptr32(u32 *src) {
return (u32)src < 0x80000000?
(u32*)((u32)src + 0x40000000):
src;
inline u32* fix_ptr32(u32 *ptr) {
return (u32)ptr < 0x80000000?
(u32*)((u32)ptr + 0x40000000):
ptr;
}
u32 __atomic_load_1(u32 *src) {
src = fix_ptr32(src);
u32 res = 0;
__asm__ __volatile__("amoadd.w %0, zero, (%1)" : "=r"(res) : "r"(src) : "memory");
return res;
u32 __atomic_load_1(u32 *ptr) {
ptr = fix_ptr32(ptr);
return *ptr;
}
u32 __atomic_load_2(u32 *src) {
src = fix_ptr32(src);
u32 res = 0;
__asm__ __volatile__("amoadd.w %0, zero, (%1)" : "=r"(res) : "r"(src) : "memory");
return res;
u32 __atomic_load_2(u32 *ptr) {
ptr = fix_ptr32(ptr);
return *ptr;
}
u32 __atomic_load_4(u32 *src) {
src = fix_ptr32(src);
u32 res = 0;
__asm__ __volatile__("amoadd.w %0, zero, (%1)" : "=r"(res) : "r"(src) : "memory");
return res;
// relaxed
u32 __atomic_load_4(u32 *ptr) {
ptr = fix_ptr32(ptr);
return *ptr;
}
void __atomic_store_4(u32 *dst, u32 val) {
dst = fix_ptr32(dst);
__asm__ __volatile__("amoswap.w zero, %0, (%1)" :: "r"(val), "r"(dst) : "memory");
// release
void __atomic_store_4(u32 *ptr, u32 val) {
ptr = fix_ptr32(ptr);
mb();
__asm__ __volatile__("amoswap.w zero, %0, (%1)" :: "r"(val), "r"(ptr) : "memory");
}
char __atomic_compare_exchange_4(u32* dst, u32* expected, u32 desired) {
dst = fix_ptr32(dst);
u32 val, expect, result;
// val = *dst
__asm__ __volatile__("lw %0, (%1)" : "=r"(expect) : "r" (expected) : "memory");
__asm__ __volatile__("lr.w %0, (%1)" : "=r"(val) : "r"(dst) : "memory");
// if (val != *expected) goto fail;
if (val != expect) goto __atomic_compare_exchange_4_fail;
// Try: *dst = desired. If success, result = 0, otherwise result != 0.
__asm__ __volatile__("sc.w %0, %1, (%2)" : "=r"(result) : "r"(desired), "r"(dst) : "memory");
return result == 0;
__atomic_compare_exchange_4_fail:
// *expected should always equal to the previous value of *dst
*expected = val;
return 0;
// strong, acquire
char __atomic_compare_exchange_4(u32* ptr, u32* expected, u32 desired) {
ptr = fix_ptr32(ptr);
u32 val, expect = *expected, result, ret;
while(1) {
__asm__ __volatile__("lr.w.aq %0, (%1)" : "=r"(val) : "r"(ptr) : "memory");
ret = val == expect;
if(!ret) {
// *expected should always equal to the previous value of *ptr
*expected = val;
return ret;
}
// Try: *ptr = desired. If success, result == 0, otherwise result != 0.
__asm__ __volatile__("sc.w.aq %0, %1, (%2)" : "=r"(result) : "r"(desired), "r"(ptr) : "memory");
if(result == 0) {
return ret;
}
}
}
u32 __atomic_fetch_add_4(u32* ptr, u32 val) {
@ -76,43 +78,45 @@ typedef unsigned long long u64;
// K210 doesn't support atomic operation on 0x40000000 (io port)
// We have to detect it and move it to 0x80000000
inline u64* fix_ptr64(u64 *src) {
return (u64)src < 0x80000000?
(u64*)((u64)src + 0x40000000):
src;
inline u64* fix_ptr64(u64 *ptr) {
return (u64)ptr < 0x80000000?
(u64*)((u64)ptr + 0x40000000):
ptr;
}
u64 __atomic_load_8(u64 *src) {
src = fix_ptr64(src);
u64 res = 0;
__asm__ __volatile__("amoadd.d %0, zero, (%1)" : "=r"(res) : "r"(src) : "memory");
return res;
// relaxed
u64 __atomic_load_8(u64 *ptr) {
ptr = fix_ptr64(ptr);
return *ptr;
}
void __atomic_store_8(u64 *dst, u64 val) {
dst = fix_ptr64(dst);
__asm__ __volatile__("amoswap.d zero, %0, (%1)" :: "r"(val), "r"(dst) : "memory");
// release
void __atomic_store_8(u64 *ptr, u64 val) {
ptr = fix_ptr64(ptr);
mb();
__asm__ __volatile__("amoswap.d zero, %0, (%1)" :: "r"(val), "r"(ptr) : "memory");
}
char __atomic_compare_exchange_8(u64* dst, u64* expected, u64 desired) {
dst = fix_ptr64(dst);
u64 val, expect, result;
// val = *dst
__asm__ __volatile__("ld %0, (%1)" : "=r"(expect) : "r" (expected) : "memory");
__asm__ __volatile__("lr.d %0, (%1)" : "=r"(val) : "r"(dst) : "memory");
// if (val != *expected) goto fail;
if (val != expect) goto __atomic_compare_exchange_8_fail;
// Try: *dst = desired. If success, result = 0, otherwise result != 0.
__asm__ __volatile__("sc.d %0, %1, (%2)" : "=r"(result) : "r"(desired), "r"(dst) : "memory");
return result == 0;
__atomic_compare_exchange_8_fail:
// *expected should always equal to the previous value of *dst
*expected = val;
return 0;
// strong, acquire
char __atomic_compare_exchange_8(u64* ptr, u64* expected, u64 desired) {
ptr = fix_ptr64(ptr);
u64 val, expect = *expected, result, ret;
while(1) {
__asm__ __volatile__("lr.d.aq %0, (%1)" : "=r"(val) : "r"(ptr) : "memory");
ret = val == expect;
if(!ret) {
// *expected should always equal to the previous value of *ptr
*expected = val;
return ret;
}
// Try: *ptr = desired. If success, result == 0, otherwise result != 0.
__asm__ __volatile__("sc.d.aq %0, %1, (%2)" : "=r"(result) : "r"(desired), "r"(ptr) : "memory");
if(result == 0) {
return ret;
}
}
}
u64 __atomic_fetch_add_8(u64* ptr, u64 val) {

Loading…
Cancel
Save