|
|
@ -1,40 +1,32 @@
|
|
|
|
|
|
|
|
// http://llvm.org/docs/Atomics.html#libcalls-atomic
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
char __atomic_load_1(char *src) {
|
|
|
|
// fn __atomic_load_1_workaround(src: *const u8) -> u8;
|
|
|
|
|
|
|
|
// fn __atomic_load_2_workaround(src: *const u16) -> u16;
|
|
|
|
|
|
|
|
// fn __atomic_load_4_workaround(src: *const u32) -> u32;
|
|
|
|
|
|
|
|
// fn __atomic_store_1_workaround(dst: *mut u8, val: u8);
|
|
|
|
|
|
|
|
// fn __atomic_store_4_workaround(dst: *mut u32, val: u32);
|
|
|
|
|
|
|
|
// fn __atomic_compare_exchange_1_workaround(dst: *mut u8, expected: *mut u8, desired: u8) -> bool;
|
|
|
|
|
|
|
|
// fn __atomic_compare_exchange_4_workaround(dst: *mut u32, expected: *mut u32, desired: u32) -> bool;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
char __atomic_load_1_workaround(char *src) {
|
|
|
|
|
|
|
|
char res = 0;
|
|
|
|
char res = 0;
|
|
|
|
__asm__ __volatile__("amoadd.w.rl %0, zero, (%1)" : "=r"(res) : "r"(src) : "memory");
|
|
|
|
__asm__ __volatile__("amoadd.w.rl %0, zero, (%1)" : "=r"(res) : "r"(src) : "memory");
|
|
|
|
return res;
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
short __atomic_load_2_workaround(short *src) {
|
|
|
|
short __atomic_load_2(short *src) {
|
|
|
|
short res = 0;
|
|
|
|
short res = 0;
|
|
|
|
__asm__ __volatile__("amoadd.w.rl %0, zero, (%1)" : "=r"(res) : "r"(src) : "memory");
|
|
|
|
__asm__ __volatile__("amoadd.w.rl %0, zero, (%1)" : "=r"(res) : "r"(src) : "memory");
|
|
|
|
return res;
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int __atomic_load_4_workaround(int *src) {
|
|
|
|
int __atomic_load_4(int *src) {
|
|
|
|
int res = 0;
|
|
|
|
int res = 0;
|
|
|
|
__asm__ __volatile__("amoadd.w.rl %0, zero, (%1)" : "=r"(res) : "r"(src) : "memory");
|
|
|
|
__asm__ __volatile__("amoadd.w.rl %0, zero, (%1)" : "=r"(res) : "r"(src) : "memory");
|
|
|
|
return res;
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
char __atomic_store_1_workaround(char *dst, char val) {
|
|
|
|
char __atomic_store_1(char *dst, char val) {
|
|
|
|
__asm__ __volatile__("amoswap.w.aq zero, %0, (%1)" :: "r"(val), "r"(dst) : "memory");
|
|
|
|
__asm__ __volatile__("amoswap.w.aq zero, %0, (%1)" :: "r"(val), "r"(dst) : "memory");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int __atomic_store_4_workaround(int *dst, int val) {
|
|
|
|
int __atomic_store_4(int *dst, int val) {
|
|
|
|
__asm__ __volatile__("amoswap.w.aq zero, %0, (%1)" :: "r"(val), "r"(dst) : "memory");
|
|
|
|
__asm__ __volatile__("amoswap.w.aq zero, %0, (%1)" :: "r"(val), "r"(dst) : "memory");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
char __atomic_compare_exchange_1_workaround(char* dst, char* expected, char desired) {
|
|
|
|
char __atomic_compare_exchange_1(char* dst, char* expected, char desired) {
|
|
|
|
char val = 0;
|
|
|
|
char val = 0;
|
|
|
|
__asm__ __volatile__("lr.w %0, (%1)" : "=r"(val) : "r"(dst) : "memory");
|
|
|
|
__asm__ __volatile__("lr.w %0, (%1)" : "=r"(val) : "r"(dst) : "memory");
|
|
|
|
if (val == *expected) {
|
|
|
|
if (val == *expected) {
|
|
|
@ -45,7 +37,7 @@ char __atomic_compare_exchange_1_workaround(char* dst, char* expected, char desi
|
|
|
|
return 0;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
char __atomic_compare_exchange_4_workaround(int* dst, int* expected, int desired) {
|
|
|
|
char __atomic_compare_exchange_4(int* dst, int* expected, int desired) {
|
|
|
|
int val = 0;
|
|
|
|
int val = 0;
|
|
|
|
__asm__ __volatile__("lr.w %0, (%1)" : "=r"(val) : "r"(dst) : "memory");
|
|
|
|
__asm__ __volatile__("lr.w %0, (%1)" : "=r"(val) : "r"(dst) : "memory");
|
|
|
|
if (val == *expected) {
|
|
|
|
if (val == *expected) {
|
|
|
|