Fix RV32 atomic.

- Fix __atomic_compare_exchange_4().
- Add patch for core::sync::atomic.
- Revert kernel Mutex.
master
WangRunji 6 years ago
parent 925a08f9ae
commit 81196729e4

@ -137,6 +137,9 @@ kernel:
ifeq ($(arch), x86_64) ifeq ($(arch), x86_64)
@bootimage build $(build_args) @bootimage build $(build_args)
else else
@-patch -p0 -N -b \
$(shell rustc --print sysroot)/lib/rustlib/src/rust/src/libcore/sync/atomic.rs \
src/arch/riscv32/atomic.patch
@CC=$(cc) cargo xbuild $(build_args) @CC=$(cc) cargo xbuild $(build_args)
endif endif

@ -0,0 +1,57 @@
--- atomic_backup.rs 2018-10-06 19:59:14.000000000 +0800
+++ atomic.rs 2018-10-26 14:34:31.000000000 +0800
@@ -125,6 +125,9 @@
#[cfg(target_has_atomic = "8")]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct AtomicBool {
+ #[cfg(any(target_arch = "riscv32", target_arch = "riscv64"))]
+ v: UnsafeCell<u32>,
+ #[cfg(not(any(target_arch = "riscv32", target_arch = "riscv64")))]
v: UnsafeCell<u8>,
}
@@ -265,6 +268,44 @@
pub const ATOMIC_BOOL_INIT: AtomicBool = AtomicBool::new(false);
#[cfg(target_has_atomic = "8")]
+#[cfg(any(target_arch = "riscv32", target_arch = "riscv64"))]
+impl AtomicBool {
+ ///
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub const fn new(v: bool) -> AtomicBool {
+ AtomicBool { v: UnsafeCell::new(v as u32) }
+ }
+
+ ///
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn load(&self, order: Ordering) -> bool {
+ unsafe { atomic_load(self.v.get(), order) != 0 }
+ }
+
+ ///
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn store(&self, val: bool, order: Ordering) {
+ unsafe { atomic_store(self.v.get(), val as u32, order); }
+ }
+
+ ///
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[cfg(target_has_atomic = "cas")]
+ pub fn compare_and_swap(&self, current: bool, new: bool, order: Ordering) -> bool {
+ loop {
+ if let Ok(val) = unsafe { atomic_compare_exchange(self.v.get(), current as u32, new as u32, order, order) } {
+ return val != 0;
+ }
+ }
+ }
+}
+
+#[cfg(target_has_atomic = "8")]
+#[cfg(not(any(target_arch = "riscv32", target_arch = "riscv64")))]
impl AtomicBool {
/// Creates a new `AtomicBool`.
///

@ -1,50 +1,27 @@
// http://llvm.org/docs/Atomics.html#libcalls-atomic // http://llvm.org/docs/Atomics.html#libcalls-atomic
char __atomic_load_1(char *src) {
char res = 0;
__asm__ __volatile__("amoadd.w.rl %0, zero, (%1)" : "=r"(res) : "r"(src) : "memory");
return res;
}
short __atomic_load_2(short *src) {
short res = 0;
__asm__ __volatile__("amoadd.w.rl %0, zero, (%1)" : "=r"(res) : "r"(src) : "memory");
return res;
}
int __atomic_load_4(int *src) { int __atomic_load_4(int *src) {
int res = 0; int res = 0;
__asm__ __volatile__("amoadd.w.rl %0, zero, (%1)" : "=r"(res) : "r"(src) : "memory"); __asm__ __volatile__("amoadd.w.rl %0, zero, (%1)" : "=r"(res) : "r"(src) : "memory");
return res; return res;
} }
char __atomic_store_1(char *dst, char val) {
__asm__ __volatile__("amoswap.w.aq zero, %0, (%1)" :: "r"(val), "r"(dst) : "memory");
}
int __atomic_store_4(int *dst, int val) { int __atomic_store_4(int *dst, int val) {
__asm__ __volatile__("amoswap.w.aq zero, %0, (%1)" :: "r"(val), "r"(dst) : "memory"); __asm__ __volatile__("amoswap.w.aq zero, %0, (%1)" :: "r"(val), "r"(dst) : "memory");
} }
char __atomic_compare_exchange_1(char* dst, char* expected, char desired) {
char val = 0;
__asm__ __volatile__("lr.w %0, (%1)" : "=r"(val) : "r"(dst) : "memory");
if (val == *expected) {
int sc_ret = 0;
__asm__ __volatile__("sc.w %0, %1, (%2)" : "=r"(sc_ret) : "r"(desired), "r"(dst) : "memory");
return sc_ret == 0;
}
return 0;
}
char __atomic_compare_exchange_4(int* dst, int* expected, int desired) { char __atomic_compare_exchange_4(int* dst, int* expected, int desired) {
int val = 0; int val;
// val = *dst
__asm__ __volatile__("lr.w %0, (%1)" : "=r"(val) : "r"(dst) : "memory"); __asm__ __volatile__("lr.w %0, (%1)" : "=r"(val) : "r"(dst) : "memory");
if (val == *expected) { if (val == *expected) {
int sc_ret = 0; int result;
__asm__ __volatile__("sc.w %0, %1, (%2)" : "=r"(sc_ret) : "r"(desired), "r"(dst) : "memory"); // Try: *dst = desired. If success, result = 0, otherwise result != 0.
return sc_ret == 0; __asm__ __volatile__("sc.w %0, %1, (%2)" : "=r"(result) : "r"(desired), "r"(dst) : "memory");
return result == 0;
} }
// *expected should always equal to the previous value of *dst
*expected = val;
return 0; return 0;
} }

@ -1,7 +1,7 @@
use consts::KERNEL_OFFSET; use consts::KERNEL_OFFSET;
use core::ptr::Unique; use core::ptr::Unique;
use core::fmt; use core::fmt;
use sync::SpinLock as Mutex; use spin::Mutex;
use volatile::Volatile; use volatile::Volatile;
use x86_64::instructions::port::Port; use x86_64::instructions::port::Port;
use logging::Color; use logging::Color;

@ -2,7 +2,7 @@ use simple_filesystem::*;
use alloc::boxed::Box; use alloc::boxed::Box;
#[cfg(target_arch = "x86_64")] #[cfg(target_arch = "x86_64")]
use arch::driver::ide; use arch::driver::ide;
use sync::SpinLock as Mutex; use spin::Mutex;
// Hard link user program // Hard link user program
#[cfg(target_arch = "riscv32")] #[cfg(target_arch = "riscv32")]

@ -1,6 +1,6 @@
use core::fmt; use core::fmt;
use log::{self, Level, LevelFilter, Log, Metadata, Record}; use log::{self, Level, LevelFilter, Log, Metadata, Record};
use sync::SpinLock as Mutex; use spin::Mutex;
lazy_static! { lazy_static! {
static ref log_mutex: Mutex<()> = Mutex::new(()); static ref log_mutex: Mutex<()> = Mutex::new(());

@ -1,8 +1,7 @@
pub use arch::paging::*; pub use arch::paging::*;
use bit_allocator::{BitAlloc, BitAlloc4K, BitAlloc64K}; use bit_allocator::{BitAlloc, BitAlloc4K, BitAlloc64K};
use consts::MEMORY_OFFSET; use consts::MEMORY_OFFSET;
use sync::{MutexGuard, Spin}; use spin::{Mutex, MutexGuard};
use sync::SpinLock as Mutex;
use super::HEAP_ALLOCATOR; use super::HEAP_ALLOCATOR;
use ucore_memory::{*, paging::PageTable}; use ucore_memory::{*, paging::PageTable};
use ucore_memory::cow::CowExt; use ucore_memory::cow::CowExt;
@ -49,7 +48,7 @@ lazy_static! {
} }
/// The only way to get active page table /// The only way to get active page table
pub fn active_table() -> MutexGuard<'static, CowExt<ActivePageTable>, Spin> { pub fn active_table() -> MutexGuard<'static, CowExt<ActivePageTable>> {
ACTIVE_TABLE.lock() ACTIVE_TABLE.lock()
} }

@ -1,44 +0,0 @@
//! RISCV atomic is not currently supported by Rust.
//! This is a ugly workaround.
use core::cell::UnsafeCell;
extern {
fn __atomic_load_4(src: *const u32) -> u32;
fn __atomic_store_4(dst: *mut u32, val: u32);
fn __atomic_compare_exchange_4(dst: *mut u32, expected: *mut u32, desired: u32) -> bool;
}
pub struct AtomicLock
{
lock: UnsafeCell<u32>
}
impl AtomicLock
{
pub fn new() -> Self {
AtomicLock {
lock: UnsafeCell::new(0)
}
}
/// Returns 1 if lock is acquired
pub fn try_lock(&self) -> bool {
let mut expected: u32 = 0;
unsafe {
__atomic_compare_exchange_4(self.lock.get(), &mut expected as *mut u32, 1)
}
}
pub fn load(&self) -> bool {
unsafe {
__atomic_load_4(self.lock.get()) == 1
}
}
pub fn store(&self) {
unsafe {
__atomic_store_4(self.lock.get(), 0);
}
}
}

@ -1,31 +0,0 @@
use core::sync::atomic::{AtomicBool, Ordering};
pub struct AtomicLock
{
lock: AtomicBool
}
impl AtomicLock
{
pub fn new() -> AtomicLock {
AtomicLock {
lock: AtomicBool::new(false)
}
}
pub fn try_lock(&self) -> bool {
self.lock.compare_and_swap(false, true, Ordering::Acquire) == false
}
pub fn load(&self) -> bool {
self.lock.load(Ordering::Relaxed)
}
pub fn store(&self) {
self.lock.store(false, Ordering::Release);
}
}
pub const ATOMIC_LOCK_INIT: AtomicLock = AtomicLock {
lock: AtomicBool::new(false)
};

@ -53,15 +53,6 @@ pub use self::condvar::*;
pub use self::mutex::*; pub use self::mutex::*;
pub use self::semaphore::*; pub use self::semaphore::*;
#[allow(dead_code)]
#[cfg(target_arch = "x86_64")]
#[path = "arch/x86_64/atomic_lock.rs"]
pub mod atomic_lock;
#[cfg(target_arch = "riscv32")]
#[path = "arch/riscv32/atomic_lock.rs"]
pub mod atomic_lock;
mod mutex; mod mutex;
mod condvar; mod condvar;
mod semaphore; mod semaphore;

@ -30,8 +30,8 @@ use arch::interrupt;
use core::cell::UnsafeCell; use core::cell::UnsafeCell;
use core::fmt; use core::fmt;
use core::ops::{Deref, DerefMut}; use core::ops::{Deref, DerefMut};
use core::sync::atomic::{ATOMIC_BOOL_INIT, AtomicBool, Ordering};
use super::Condvar; use super::Condvar;
use super::atomic_lock::AtomicLock;
pub type SpinLock<T> = Mutex<T, Spin>; pub type SpinLock<T> = Mutex<T, Spin>;
pub type SpinNoIrqLock<T> = Mutex<T, SpinNoIrq>; pub type SpinNoIrqLock<T> = Mutex<T, SpinNoIrq>;
@ -39,7 +39,7 @@ pub type ThreadLock<T> = Mutex<T, Condvar>;
pub struct Mutex<T: ?Sized, S: MutexSupport> pub struct Mutex<T: ?Sized, S: MutexSupport>
{ {
lock: AtomicLock, lock: AtomicBool,
support: S, support: S,
data: UnsafeCell<T>, data: UnsafeCell<T>,
} }
@ -78,7 +78,7 @@ impl<T, S: MutexSupport> Mutex<T, S>
/// ``` /// ```
pub fn new(user_data: T) -> Mutex<T, S> { pub fn new(user_data: T) -> Mutex<T, S> {
Mutex { Mutex {
lock: AtomicLock::new(), lock: ATOMIC_BOOL_INIT,
data: UnsafeCell::new(user_data), data: UnsafeCell::new(user_data),
support: S::new(), support: S::new(),
} }
@ -96,9 +96,9 @@ impl<T, S: MutexSupport> Mutex<T, S>
impl<T: ?Sized, S: MutexSupport> Mutex<T, S> impl<T: ?Sized, S: MutexSupport> Mutex<T, S>
{ {
fn obtain_lock(&self) { fn obtain_lock(&self) {
while !self.lock.try_lock() { while self.lock.compare_and_swap(false, true, Ordering::Acquire) != false {
// Wait until the lock looks unlocked before retrying // Wait until the lock looks unlocked before retrying
while self.lock.load() { while self.lock.load(Ordering::Relaxed) {
self.support.cpu_relax(); self.support.cpu_relax();
} }
} }
@ -137,14 +137,14 @@ impl<T: ?Sized, S: MutexSupport> Mutex<T, S>
/// ///
/// If the lock isn't held, this is a no-op. /// If the lock isn't held, this is a no-op.
pub unsafe fn force_unlock(&self) { pub unsafe fn force_unlock(&self) {
self.lock.store(); self.lock.store(false, Ordering::Release);
} }
/// Tries to lock the mutex. If it is already locked, it will return None. Otherwise it returns /// Tries to lock the mutex. If it is already locked, it will return None. Otherwise it returns
/// a guard within Some. /// a guard within Some.
pub fn try_lock(&self) -> Option<MutexGuard<T, S>> { pub fn try_lock(&self) -> Option<MutexGuard<T, S>> {
let support_guard = S::before_lock(); let support_guard = S::before_lock();
if self.lock.try_lock() { if self.lock.compare_and_swap(false, true, Ordering::Acquire) == false {
Some(MutexGuard { Some(MutexGuard {
mutex: self, mutex: self,
support_guard, support_guard,
@ -174,19 +174,19 @@ impl<T: ?Sized + Default, S: MutexSupport> Default for Mutex<T, S> {
impl<'a, T: ?Sized, S: MutexSupport> Deref for MutexGuard<'a, T, S> impl<'a, T: ?Sized, S: MutexSupport> Deref for MutexGuard<'a, T, S>
{ {
type Target = T; type Target = T;
fn deref<'b>(&'b self) -> &'b T { unsafe { &*self.mutex.data.get() } } fn deref(&self) -> &T { unsafe { &*self.mutex.data.get() } }
} }
impl<'a, T: ?Sized, S: MutexSupport> DerefMut for MutexGuard<'a, T, S> impl<'a, T: ?Sized, S: MutexSupport> DerefMut for MutexGuard<'a, T, S>
{ {
fn deref_mut<'b>(&'b mut self) -> &'b mut T { unsafe { &mut *self.mutex.data.get() } } fn deref_mut(&mut self) -> &mut T { unsafe { &mut *self.mutex.data.get() } }
} }
impl<'a, T: ?Sized, S: MutexSupport> Drop for MutexGuard<'a, T, S> impl<'a, T: ?Sized, S: MutexSupport> Drop for MutexGuard<'a, T, S>
{ {
/// The dropping of the MutexGuard will release the lock it was created from. /// The dropping of the MutexGuard will release the lock it was created from.
fn drop(&mut self) { fn drop(&mut self) {
self.mutex.lock.store(); self.mutex.lock.store(false, Ordering::Release);
self.mutex.support.after_unlock(); self.mutex.support.after_unlock();
} }
} }

Loading…
Cancel
Save