diff --git a/kernel/Makefile b/kernel/Makefile index 0b49f67..e344ad1 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -137,6 +137,9 @@ kernel: ifeq ($(arch), x86_64) @bootimage build $(build_args) else + @-patch -p0 -N -b \ + $(shell rustc --print sysroot)/lib/rustlib/src/rust/src/libcore/sync/atomic.rs \ + src/arch/riscv32/atomic.patch @CC=$(cc) cargo xbuild $(build_args) endif diff --git a/kernel/src/arch/riscv32/atomic.patch b/kernel/src/arch/riscv32/atomic.patch new file mode 100644 index 0000000..ec7ca3a --- /dev/null +++ b/kernel/src/arch/riscv32/atomic.patch @@ -0,0 +1,57 @@ +--- atomic_backup.rs 2018-10-06 19:59:14.000000000 +0800 ++++ atomic.rs 2018-10-26 14:34:31.000000000 +0800 +@@ -125,6 +125,9 @@ + #[cfg(target_has_atomic = "8")] + #[stable(feature = "rust1", since = "1.0.0")] + pub struct AtomicBool { ++ #[cfg(any(target_arch = "riscv32", target_arch = "riscv64"))] ++ v: UnsafeCell, ++ #[cfg(not(any(target_arch = "riscv32", target_arch = "riscv64")))] + v: UnsafeCell, + } + +@@ -265,6 +268,44 @@ + pub const ATOMIC_BOOL_INIT: AtomicBool = AtomicBool::new(false); + + #[cfg(target_has_atomic = "8")] ++#[cfg(any(target_arch = "riscv32", target_arch = "riscv64"))] ++impl AtomicBool { ++ /// ++ #[inline] ++ #[stable(feature = "rust1", since = "1.0.0")] ++ pub const fn new(v: bool) -> AtomicBool { ++ AtomicBool { v: UnsafeCell::new(v as u32) } ++ } ++ ++ /// ++ #[inline] ++ #[stable(feature = "rust1", since = "1.0.0")] ++ pub fn load(&self, order: Ordering) -> bool { ++ unsafe { atomic_load(self.v.get(), order) != 0 } ++ } ++ ++ /// ++ #[inline] ++ #[stable(feature = "rust1", since = "1.0.0")] ++ pub fn store(&self, val: bool, order: Ordering) { ++ unsafe { atomic_store(self.v.get(), val as u32, order); } ++ } ++ ++ /// ++ #[inline] ++ #[stable(feature = "rust1", since = "1.0.0")] ++ #[cfg(target_has_atomic = "cas")] ++ pub fn compare_and_swap(&self, current: bool, new: bool, order: Ordering) -> bool { ++ loop { ++ if let Ok(val) = unsafe { atomic_compare_exchange(self.v.get(), current as u32, new as u32, order, order) } { ++ return val != 0; ++ } ++ } ++ } ++} ++ ++#[cfg(target_has_atomic = "8")] ++#[cfg(not(any(target_arch = "riscv32", target_arch = "riscv64")))] + impl AtomicBool { + /// Creates a new `AtomicBool`. + /// diff --git a/kernel/src/arch/riscv32/compiler_rt.c b/kernel/src/arch/riscv32/compiler_rt.c index 1d41cf6..95c871f 100644 --- a/kernel/src/arch/riscv32/compiler_rt.c +++ b/kernel/src/arch/riscv32/compiler_rt.c @@ -1,50 +1,27 @@ // http://llvm.org/docs/Atomics.html#libcalls-atomic -char __atomic_load_1(char *src) { - char res = 0; - __asm__ __volatile__("amoadd.w.rl %0, zero, (%1)" : "=r"(res) : "r"(src) : "memory"); - return res; -} - -short __atomic_load_2(short *src) { - short res = 0; - __asm__ __volatile__("amoadd.w.rl %0, zero, (%1)" : "=r"(res) : "r"(src) : "memory"); - return res; -} - int __atomic_load_4(int *src) { int res = 0; __asm__ __volatile__("amoadd.w.rl %0, zero, (%1)" : "=r"(res) : "r"(src) : "memory"); return res; } -char __atomic_store_1(char *dst, char val) { - __asm__ __volatile__("amoswap.w.aq zero, %0, (%1)" :: "r"(val), "r"(dst) : "memory"); -} - int __atomic_store_4(int *dst, int val) { __asm__ __volatile__("amoswap.w.aq zero, %0, (%1)" :: "r"(val), "r"(dst) : "memory"); } -char __atomic_compare_exchange_1(char* dst, char* expected, char desired) { - char val = 0; - __asm__ __volatile__("lr.w %0, (%1)" : "=r"(val) : "r"(dst) : "memory"); - if (val == *expected) { - int sc_ret = 0; - __asm__ __volatile__("sc.w %0, %1, (%2)" : "=r"(sc_ret) : "r"(desired), "r"(dst) : "memory"); - return sc_ret == 0; - } - return 0; -} - char __atomic_compare_exchange_4(int* dst, int* expected, int desired) { - int val = 0; + int val; + // val = *dst __asm__ __volatile__("lr.w %0, (%1)" : "=r"(val) : "r"(dst) : "memory"); if (val == *expected) { - int sc_ret = 0; - __asm__ __volatile__("sc.w %0, %1, (%2)" : "=r"(sc_ret) : "r"(desired), "r"(dst) : "memory"); - return sc_ret == 0; + int result; + // Try: *dst = desired. If success, result = 0, otherwise result != 0. + __asm__ __volatile__("sc.w %0, %1, (%2)" : "=r"(result) : "r"(desired), "r"(dst) : "memory"); + return result == 0; } + // *expected should always equal to the previous value of *dst + *expected = val; return 0; } diff --git a/kernel/src/arch/x86_64/driver/vga.rs b/kernel/src/arch/x86_64/driver/vga.rs index a5b2feb..7ef8720 100644 --- a/kernel/src/arch/x86_64/driver/vga.rs +++ b/kernel/src/arch/x86_64/driver/vga.rs @@ -1,7 +1,7 @@ use consts::KERNEL_OFFSET; use core::ptr::Unique; use core::fmt; -use sync::SpinLock as Mutex; +use spin::Mutex; use volatile::Volatile; use x86_64::instructions::port::Port; use logging::Color; diff --git a/kernel/src/fs.rs b/kernel/src/fs.rs index 27a2b74..f1db383 100644 --- a/kernel/src/fs.rs +++ b/kernel/src/fs.rs @@ -2,7 +2,7 @@ use simple_filesystem::*; use alloc::boxed::Box; #[cfg(target_arch = "x86_64")] use arch::driver::ide; -use sync::SpinLock as Mutex; +use spin::Mutex; // Hard link user program #[cfg(target_arch = "riscv32")] diff --git a/kernel/src/logging.rs b/kernel/src/logging.rs index c1f2241..b860de6 100644 --- a/kernel/src/logging.rs +++ b/kernel/src/logging.rs @@ -1,6 +1,6 @@ use core::fmt; use log::{self, Level, LevelFilter, Log, Metadata, Record}; -use sync::SpinLock as Mutex; +use spin::Mutex; lazy_static! { static ref log_mutex: Mutex<()> = Mutex::new(()); diff --git a/kernel/src/memory.rs b/kernel/src/memory.rs index dec17fb..0f203f3 100644 --- a/kernel/src/memory.rs +++ b/kernel/src/memory.rs @@ -1,8 +1,7 @@ pub use arch::paging::*; use bit_allocator::{BitAlloc, BitAlloc4K, BitAlloc64K}; use consts::MEMORY_OFFSET; -use sync::{MutexGuard, Spin}; -use sync::SpinLock as Mutex; +use spin::{Mutex, MutexGuard}; use super::HEAP_ALLOCATOR; use ucore_memory::{*, paging::PageTable}; use ucore_memory::cow::CowExt; @@ -49,7 +48,7 @@ lazy_static! { } /// The only way to get active page table -pub fn active_table() -> MutexGuard<'static, CowExt, Spin> { +pub fn active_table() -> MutexGuard<'static, CowExt> { ACTIVE_TABLE.lock() } diff --git a/kernel/src/sync/arch/riscv32/atomic_lock.rs b/kernel/src/sync/arch/riscv32/atomic_lock.rs deleted file mode 100644 index ee19bfa..0000000 --- a/kernel/src/sync/arch/riscv32/atomic_lock.rs +++ /dev/null @@ -1,44 +0,0 @@ -//! RISCV atomic is not currently supported by Rust. -//! This is a ugly workaround. - -use core::cell::UnsafeCell; - -extern { - fn __atomic_load_4(src: *const u32) -> u32; - fn __atomic_store_4(dst: *mut u32, val: u32); - fn __atomic_compare_exchange_4(dst: *mut u32, expected: *mut u32, desired: u32) -> bool; -} - -pub struct AtomicLock -{ - lock: UnsafeCell -} - -impl AtomicLock -{ - pub fn new() -> Self { - AtomicLock { - lock: UnsafeCell::new(0) - } - } - - /// Returns 1 if lock is acquired - pub fn try_lock(&self) -> bool { - let mut expected: u32 = 0; - unsafe { - __atomic_compare_exchange_4(self.lock.get(), &mut expected as *mut u32, 1) - } - } - - pub fn load(&self) -> bool { - unsafe { - __atomic_load_4(self.lock.get()) == 1 - } - } - - pub fn store(&self) { - unsafe { - __atomic_store_4(self.lock.get(), 0); - } - } -} diff --git a/kernel/src/sync/arch/x86_64/atomic_lock.rs b/kernel/src/sync/arch/x86_64/atomic_lock.rs deleted file mode 100644 index 06550c8..0000000 --- a/kernel/src/sync/arch/x86_64/atomic_lock.rs +++ /dev/null @@ -1,31 +0,0 @@ -use core::sync::atomic::{AtomicBool, Ordering}; - -pub struct AtomicLock -{ - lock: AtomicBool -} - -impl AtomicLock -{ - pub fn new() -> AtomicLock { - AtomicLock { - lock: AtomicBool::new(false) - } - } - - pub fn try_lock(&self) -> bool { - self.lock.compare_and_swap(false, true, Ordering::Acquire) == false - } - - pub fn load(&self) -> bool { - self.lock.load(Ordering::Relaxed) - } - - pub fn store(&self) { - self.lock.store(false, Ordering::Release); - } -} - -pub const ATOMIC_LOCK_INIT: AtomicLock = AtomicLock { - lock: AtomicBool::new(false) -}; \ No newline at end of file diff --git a/kernel/src/sync/mod.rs b/kernel/src/sync/mod.rs index c2bae78..cfa5071 100644 --- a/kernel/src/sync/mod.rs +++ b/kernel/src/sync/mod.rs @@ -53,15 +53,6 @@ pub use self::condvar::*; pub use self::mutex::*; pub use self::semaphore::*; -#[allow(dead_code)] -#[cfg(target_arch = "x86_64")] -#[path = "arch/x86_64/atomic_lock.rs"] -pub mod atomic_lock; - -#[cfg(target_arch = "riscv32")] -#[path = "arch/riscv32/atomic_lock.rs"] -pub mod atomic_lock; - mod mutex; mod condvar; mod semaphore; diff --git a/kernel/src/sync/mutex.rs b/kernel/src/sync/mutex.rs index d542e76..6cddc9c 100644 --- a/kernel/src/sync/mutex.rs +++ b/kernel/src/sync/mutex.rs @@ -30,8 +30,8 @@ use arch::interrupt; use core::cell::UnsafeCell; use core::fmt; use core::ops::{Deref, DerefMut}; +use core::sync::atomic::{ATOMIC_BOOL_INIT, AtomicBool, Ordering}; use super::Condvar; -use super::atomic_lock::AtomicLock; pub type SpinLock = Mutex; pub type SpinNoIrqLock = Mutex; @@ -39,7 +39,7 @@ pub type ThreadLock = Mutex; pub struct Mutex { - lock: AtomicLock, + lock: AtomicBool, support: S, data: UnsafeCell, } @@ -78,7 +78,7 @@ impl Mutex /// ``` pub fn new(user_data: T) -> Mutex { Mutex { - lock: AtomicLock::new(), + lock: ATOMIC_BOOL_INIT, data: UnsafeCell::new(user_data), support: S::new(), } @@ -96,9 +96,9 @@ impl Mutex impl Mutex { fn obtain_lock(&self) { - while !self.lock.try_lock() { + while self.lock.compare_and_swap(false, true, Ordering::Acquire) != false { // Wait until the lock looks unlocked before retrying - while self.lock.load() { + while self.lock.load(Ordering::Relaxed) { self.support.cpu_relax(); } } @@ -137,14 +137,14 @@ impl Mutex /// /// If the lock isn't held, this is a no-op. pub unsafe fn force_unlock(&self) { - self.lock.store(); + self.lock.store(false, Ordering::Release); } /// Tries to lock the mutex. If it is already locked, it will return None. Otherwise it returns /// a guard within Some. pub fn try_lock(&self) -> Option> { let support_guard = S::before_lock(); - if self.lock.try_lock() { + if self.lock.compare_and_swap(false, true, Ordering::Acquire) == false { Some(MutexGuard { mutex: self, support_guard, @@ -174,19 +174,19 @@ impl Default for Mutex { impl<'a, T: ?Sized, S: MutexSupport> Deref for MutexGuard<'a, T, S> { type Target = T; - fn deref<'b>(&'b self) -> &'b T { unsafe { &*self.mutex.data.get() } } + fn deref(&self) -> &T { unsafe { &*self.mutex.data.get() } } } impl<'a, T: ?Sized, S: MutexSupport> DerefMut for MutexGuard<'a, T, S> { - fn deref_mut<'b>(&'b mut self) -> &'b mut T { unsafe { &mut *self.mutex.data.get() } } + fn deref_mut(&mut self) -> &mut T { unsafe { &mut *self.mutex.data.get() } } } impl<'a, T: ?Sized, S: MutexSupport> Drop for MutexGuard<'a, T, S> { /// The dropping of the MutexGuard will release the lock it was created from. fn drop(&mut self) { - self.mutex.lock.store(); + self.mutex.lock.store(false, Ordering::Release); self.mutex.support.after_unlock(); } }