Switch to RV64 GNU toolchain. Simplify compiler_rt.

master
WangRunji 6 years ago
parent f27fd37d82
commit 72e92c07f9

@ -18,7 +18,7 @@
arch ?= riscv32
mode ?= debug
LOG ?= debug
smp ?= 1
smp ?= 4
target := $(arch)-blog_os
kernel := target/$(target)/$(mode)/ucore
@ -71,13 +71,12 @@ ifeq ($(uname), Darwin)
prefix := x86_64-elf-
endif
ifeq ($(arch), riscv32)
prefix := riscv32-unknown-elf-
prefix := riscv64-unknown-elf-
endif
ld := $(prefix)ld
objdump := $(prefix)objdump
cc := $(prefix)gcc
CC := $(cc)
as := $(prefix)as
.PHONY: all clean run build asm doc justrun kernel
@ -107,9 +106,12 @@ endif
asm:
@$(objdump) -dS $(kernel) | less
elf-h:
header:
@$(objdump) -h $(kernel)
sym:
@$(objdump) -t $(kernel) | less
$(bin): kernel
ifdef board
@cp $(kernel) $@
@ -121,7 +123,7 @@ else
--enable-32bit \
--enable-logo \
--disable-fp-emulation \
--host=riscv32-unknown-elf \
--host=riscv64-unknown-elf \
--with-payload=$(abspath $(kernel)) && \
make && \
cp bbl ../../kernel/$@
@ -131,7 +133,7 @@ kernel:
ifeq ($(arch), x86_64)
@bootimage build $(build_args)
else
@cargo xbuild $(build_args)
@CC=$(cc) cargo xbuild $(build_args)
endif
# make user.o from binary files

@ -15,7 +15,8 @@ fn main() {
if std::env::var("TARGET").unwrap().find("riscv32").is_some() {
cc::Build::new()
.file("src/arch/riscv32/compiler_rt.c")
.flag("-march=rv32ima")
.flag("-march=rv32ia")
.flag("-mabi=ilp32")
.compile("atomic_rt");
}
}

@ -9,7 +9,7 @@
"cpu": "generic-rv32",
"features": "",
"max-atomic-width": "32",
"linker": "riscv32-unknown-elf-ld",
"linker": "riscv64-unknown-elf-ld",
"linker-flavor": "ld",
"pre-link-args": {
"ld": [

@ -1,40 +1,32 @@
// http://llvm.org/docs/Atomics.html#libcalls-atomic
// fn __atomic_load_1_workaround(src: *const u8) -> u8;
// fn __atomic_load_2_workaround(src: *const u16) -> u16;
// fn __atomic_load_4_workaround(src: *const u32) -> u32;
// fn __atomic_store_1_workaround(dst: *mut u8, val: u8);
// fn __atomic_store_4_workaround(dst: *mut u32, val: u32);
// fn __atomic_compare_exchange_1_workaround(dst: *mut u8, expected: *mut u8, desired: u8) -> bool;
// fn __atomic_compare_exchange_4_workaround(dst: *mut u32, expected: *mut u32, desired: u32) -> bool;
char __atomic_load_1_workaround(char *src) {
char __atomic_load_1(char *src) {
char res = 0;
__asm__ __volatile__("amoadd.w.rl %0, zero, (%1)" : "=r"(res) : "r"(src) : "memory");
return res;
}
short __atomic_load_2_workaround(short *src) {
short __atomic_load_2(short *src) {
short res = 0;
__asm__ __volatile__("amoadd.w.rl %0, zero, (%1)" : "=r"(res) : "r"(src) : "memory");
return res;
}
int __atomic_load_4_workaround(int *src) {
int __atomic_load_4(int *src) {
int res = 0;
__asm__ __volatile__("amoadd.w.rl %0, zero, (%1)" : "=r"(res) : "r"(src) : "memory");
return res;
}
char __atomic_store_1_workaround(char *dst, char val) {
char __atomic_store_1(char *dst, char val) {
__asm__ __volatile__("amoswap.w.aq zero, %0, (%1)" :: "r"(val), "r"(dst) : "memory");
}
int __atomic_store_4_workaround(int *dst, int val) {
int __atomic_store_4(int *dst, int val) {
__asm__ __volatile__("amoswap.w.aq zero, %0, (%1)" :: "r"(val), "r"(dst) : "memory");
}
char __atomic_compare_exchange_1_workaround(char* dst, char* expected, char desired) {
char __atomic_compare_exchange_1(char* dst, char* expected, char desired) {
char val = 0;
__asm__ __volatile__("lr.w %0, (%1)" : "=r"(val) : "r"(dst) : "memory");
if (val == *expected) {
@ -45,7 +37,7 @@ char __atomic_compare_exchange_1_workaround(char* dst, char* expected, char desi
return 0;
}
char __atomic_compare_exchange_4_workaround(int* dst, int* expected, int desired) {
char __atomic_compare_exchange_4(int* dst, int* expected, int desired) {
int val = 0;
__asm__ __volatile__("lr.w %0, (%1)" : "=r"(val) : "r"(dst) : "memory");
if (val == *expected) {

@ -2,17 +2,6 @@
//!
//! [atomic](http://llvm.org/docs/Atomics.html#libcalls-atomic)
#[link(name = "atomic_rt")]
extern {
fn __atomic_load_1_workaround(src: *const u8) -> u8;
fn __atomic_load_2_workaround(src: *const u16) -> u16;
fn __atomic_load_4_workaround(src: *const u32) -> u32;
fn __atomic_store_1_workaround(dst: *mut u8, val: u8);
fn __atomic_store_4_workaround(dst: *mut u32, val: u32);
fn __atomic_compare_exchange_1_workaround(dst: *mut u8, expected: *mut u8, desired: u8) -> bool;
fn __atomic_compare_exchange_4_workaround(dst: *mut u32, expected: *mut u32, desired: u32) -> bool;
}
/// Copy from:
/// https://github.com/rust-lang-nursery/compiler-builtins/blob/master/src/riscv32.rs
#[no_mangle]
@ -34,58 +23,3 @@ pub extern fn __mulsi3(mut a: u32, mut b: u32) -> u32 {
pub extern fn abort() {
loop {}
}
use core::ptr::{read, write};
#[no_mangle]
pub unsafe extern fn __atomic_load_1(src: *const u8) -> u8 {
__atomic_load_1_workaround(src)
}
#[no_mangle]
pub unsafe extern fn __atomic_load_2(src: *const u16) -> u16 {
__atomic_load_2_workaround(src)
}
#[no_mangle]
pub unsafe extern fn __atomic_load_4(src: *const u32) -> u32 {
__atomic_load_4_workaround(src)
}
#[no_mangle]
pub unsafe extern fn __atomic_store_1(dst: *mut u8, val: u8) {
__atomic_store_1_workaround(dst, val);
}
#[no_mangle]
pub unsafe extern fn __atomic_store_4(dst: *mut u32, val: u32) {
__atomic_store_4_workaround(dst, val);
}
// unsafe fn __atomic_compare_exchange<T: PartialEq>(dst: *mut T, expected: *mut T, desired: T) -> bool {
// // use super::interrupt;
// // let flags = interrupt::disable_and_store();
// // let val = read(dst);
// // let success = val == read(expected);
// // write(dst, if success {desired} else {val});
// // interrupt::restore(flags);
// // success
// // let mut val: T;
// // asm!("lr.w $0, ($1)" : "=r"(val) : "r"(dst) : "memory" : "volatile");
// // if val == *expected {
// // let mut sc_ret = 0;
// // asm!("sc.w $0, $1, ($2)" : "=r"(sc_ret) : "r"(desired), "r"(dst) : "memory" : "volatile");
// // return sc_ret == 0
// // }
// false
// }
#[no_mangle]
pub unsafe extern fn __atomic_compare_exchange_1(dst: *mut u8, expected: *mut u8, desired: u8) -> bool {
__atomic_compare_exchange_1_workaround(dst, expected, desired)
}
#[no_mangle]
pub unsafe extern fn __atomic_compare_exchange_4(dst: *mut u32, expected: *mut u32, desired: u32) -> bool {
__atomic_compare_exchange_4_workaround(dst, expected, desired)
}

@ -1,10 +1,14 @@
//! RISCV atomic is not currently supported by Rust.
//! This is a ugly workaround.
use arch::compiler_rt::{__atomic_compare_exchange_4, __atomic_store_4, __atomic_load_4};
use core::cell::UnsafeCell;
extern {
fn __atomic_load_4(src: *const u32) -> u32;
fn __atomic_store_4(dst: *mut u32, val: u32);
fn __atomic_compare_exchange_4(dst: *mut u32, expected: *mut u32, desired: u32) -> bool;
}
pub struct AtomicLock
{
lock: UnsafeCell<u32>

@ -1,14 +1,15 @@
use core::sync::atomic::{AtomicBool, Ordering};
pub struct AtomicLock
{
lock: usize
lock: AtomicBool
}
impl AtomicLock
{
pub fn new() -> AtomicLock {
AtomicLock {
lock: ATOMIC_BOOL_INIT
lock: AtomicBool::new(false)
}
}
@ -26,5 +27,5 @@ impl AtomicLock
}
pub const ATOMIC_LOCK_INIT: AtomicLock = AtomicLock {
lock: ATOMIC_BOOL_INIT
lock: AtomicBool::new(false)
};

@ -4084,8 +4084,8 @@ fi
case "${BUILD_32BIT}" in
yes|default)
echo "Building 32-bit pk"
CFLAGS="$default_CFLAGS -march=rv32ia -mabi=ilp32"
LDFLAGS="-march=rv32ia -mabi=ilp32"
CFLAGS="$default_CFLAGS -march=rv32iac -mabi=ilp32"
LDFLAGS="-march=rv32iac -mabi=ilp32"
install_subdir="riscv32-unknown-elf"
;;
*)

@ -88,8 +88,8 @@ AC_ARG_ENABLE([32bit],
case "${BUILD_32BIT}" in
yes|default)
echo "Building 32-bit pk"
CFLAGS="$default_CFLAGS -march=rv32ia -mabi=ilp32"
LDFLAGS="-march=rv32ia -mabi=ilp32"
CFLAGS="$default_CFLAGS -march=rv32iac -mabi=ilp32"
LDFLAGS="-march=rv32iac -mabi=ilp32"
install_subdir="riscv32-unknown-elf"
;;
*)

Loading…
Cancel
Save