biscuit: support aarch64

master
equation314 7 years ago
parent d84f21465c
commit ddede19b61

@ -40,7 +40,7 @@ ifneq ($(arch), x86_64)
endif endif
biscuit: biscuit:
ifeq ($(arch), x86_64) ifeq ($(arch), $(filter $(arch), x86_64 aarch64))
@echo Building biscuit programs @echo Building biscuit programs
@mkdir -p biscuit/build @mkdir -p biscuit/build
@cd biscuit/build && cmake $(cmake_build_args) .. && make @cd biscuit/build && cmake $(cmake_build_args) .. && make

@ -14,12 +14,12 @@ if (${ARCH} STREQUAL i386)
if(APPLE) if(APPLE)
set(PREFIX i386-elf-) set(PREFIX i386-elf-)
endif () endif ()
set(CMAKE_C_FLAGS "-m32") set(CMAKE_C_FLAGS "-m32 -mno-red-zone")
elseif (${ARCH} STREQUAL x86_64) elseif (${ARCH} STREQUAL x86_64)
if(APPLE) if(APPLE)
set(PREFIX x86_64-elf-) set(PREFIX x86_64-elf-)
endif () endif ()
set(CMAKE_C_FLAGS "-m64") set(CMAKE_C_FLAGS "-m64 -mno-red-zone")
elseif (${ARCH} STREQUAL riscv32) elseif (${ARCH} STREQUAL riscv32)
set(PREFIX riscv64-unknown-elf-) set(PREFIX riscv64-unknown-elf-)
set(CMAKE_C_FLAGS "-march=rv32imac -mabi=ilp32 -mcmodel=medany") set(CMAKE_C_FLAGS "-march=rv32imac -mabi=ilp32 -mcmodel=medany")
@ -28,7 +28,6 @@ elseif (${ARCH} STREQUAL riscv64)
set(CMAKE_C_FLAGS "-march=rv64imac -mabi=lp64 -mcmodel=medany") set(CMAKE_C_FLAGS "-march=rv64imac -mabi=lp64 -mcmodel=medany")
elseif (${ARCH} STREQUAL aarch64) elseif (${ARCH} STREQUAL aarch64)
set(PREFIX aarch64-none-elf-) set(PREFIX aarch64-none-elf-)
set(CMAKE_C_FLAGS "-mgeneral-regs-only")
set(LINK_FLAGS "-Ttext 0xffff000000000000") set(LINK_FLAGS "-Ttext 0xffff000000000000")
else() else()
message("Unsupported arch: ${ARCH}") message("Unsupported arch: ${ARCH}")
@ -36,7 +35,7 @@ endif ()
set(CMAKE_ASM_COMPILER ${PREFIX}gcc) set(CMAKE_ASM_COMPILER ${PREFIX}gcc)
set(CMAKE_C_COMPILER ${PREFIX}gcc) set(CMAKE_C_COMPILER ${PREFIX}gcc)
set(CMAKE_RANLIB ${PREFIX}ranlib) set(CMAKE_RANLIB ${PREFIX}ranlib)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wall -Werror -MMD -MP -O -g -ffreestanding -nostdlib -nostdinc -fno-builtin -mno-red-zone -fno-stack-protector -fPIC -std=gnu11") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wall -Werror -MMD -MP -O -g -ffreestanding -nostdlib -nostdinc -fno-builtin -fno-stack-protector -fPIC -std=gnu11")
set(CMAKE_ASM_FLAGS ${CMAKE_C_FLAGS}) set(CMAKE_ASM_FLAGS ${CMAKE_C_FLAGS})
set(CMAKE_C_LINK_FLAGS "${LINK_FLAGS} -nostdlib") # override default value to get rid of '-Wl,-search_paths_first -Wl,-headerpad_max_install_names' set(CMAKE_C_LINK_FLAGS "${LINK_FLAGS} -nostdlib") # override default value to get rid of '-Wl,-search_paths_first -Wl,-headerpad_max_install_names'
set(CMAKE_SHARED_LIBRARY_LINK_C_FLAGS) # override default value to get rid of '-rdynamic' on Linux set(CMAKE_SHARED_LIBRARY_LINK_C_FLAGS) # override default value to get rid of '-rdynamic' on Linux
@ -48,5 +47,5 @@ add_library(ulib ${LIBS})
foreach (PATH ${SRCS}) foreach (PATH ${SRCS})
get_filename_component(NAME ${PATH} NAME_WE) get_filename_component(NAME ${PATH} NAME_WE)
add_executable(${NAME} ${PATH}) add_executable(${NAME} ${PATH})
target_link_libraries(${NAME} ulib) target_link_libraries(${NAME} ulib gcc)
endforeach () endforeach ()

@ -22,6 +22,7 @@ struct timeval before, end;
void rgettimeofday(struct timeval *h, void *p) void rgettimeofday(struct timeval *h, void *p)
{ {
#if defined(__x86_64__)
time_t hi, lo, v; time_t hi, lo, v;
asm( asm(
"rdtsc\n" "rdtsc\n"
@ -31,6 +32,9 @@ void rgettimeofday(struct timeval *h, void *p)
v = hi << 32 | lo; v = hi << 32 | lo;
if (h) if (h)
h->tv_usec = v; h->tv_usec = v;
#else
// TODO: aarch64
#endif
} }
void start() void start()

@ -86,13 +86,20 @@ static struct kinfo_t *kinfo;
// stack is not used after munmapping it, but before calling exit(2). we use // stack is not used after munmapping it, but before calling exit(2). we use
// this macro to make sure the clobbers are coherent for these three pieces of // this macro to make sure the clobbers are coherent for these three pieces of
// code using syscalls. // code using syscalls.
#if defined(__x86_64__)
#define SYSCALL_CLOBBERS "cc", "memory", "r9", "r10", "r11", "r12", "r13", \ #define SYSCALL_CLOBBERS "cc", "memory", "r9", "r10", "r11", "r12", "r13", \
"r14", "r15" "r14", "r15"
#elif defined(__aarch64__)
#define SYSCALL_CLOBBERS "cc", "memory"
#endif
long long
syscall(long a1, long a2, long a3, long a4, syscall(long a1, long a2, long a3, long a4,
long a5, long trap) long a5, long trap)
{ {
long ret; long ret;
#if defined(__x86_64__)
register long r8 asm("r8") = a5; register long r8 asm("r8") = a5;
// we may want to follow the sys5 abi and have the kernel restore // we may want to follow the sys5 abi and have the kernel restore
@ -104,6 +111,20 @@ syscall(long a1, long a2, long a3, long a4,
: "=a"(ret) : "=a"(ret)
: "0"(trap), "D"(a1), "S"(a2), "d"(a3), "c"(a4), "r"(r8) : "0"(trap), "D"(a1), "S"(a2), "d"(a3), "c"(a4), "r"(r8)
: SYSCALL_CLOBBERS); : SYSCALL_CLOBBERS);
#elif defined(__aarch64__)
register long x8 asm("x8") = trap;
register long x0 asm("x0") = a1;
register long x1 asm("x1") = a2;
register long x2 asm("x2") = a3;
register long x3 asm("x3") = a4;
register long x4 asm("x4") = a5;
asm volatile(
"svc 0"
: "=r"(ret)
: "r"(x8), "0"(x0), "r"(x1), "r"(x2), "r"(x3), "r"(x4)
: SYSCALL_CLOBBERS);
#endif
return ret; return ret;
} }
@ -837,6 +858,7 @@ tfork_thread(struct tfork_t *args, long (*fn)(void *), void *fnarg)
int tid; int tid;
long flags = FORK_THREAD; long flags = FORK_THREAD;
#if defined(__x86_64__)
// rbx and rbp are preserved across syscalls. i don't know how to // rbx and rbp are preserved across syscalls. i don't know how to
// specify rbp as a register contraint. // specify rbp as a register contraint.
register ulong rbp asm("rbp") = (ulong)fn; register ulong rbp asm("rbp") = (ulong)fn;
@ -859,6 +881,30 @@ tfork_thread(struct tfork_t *args, long (*fn)(void *), void *fnarg)
: "=a"(tid) : "=a"(tid)
: "D"(args), "S"(flags), "0"(SYS_FORK), "r"(rbp), "r"(rbx) : "D"(args), "S"(flags), "0"(SYS_FORK), "r"(rbp), "r"(rbx)
: SYSCALL_CLOBBERS); : SYSCALL_CLOBBERS);
#elif defined(__aarch64__)
// all registers are preserved across syscalls for aarch64.
register ulong x8 asm("x8") = SYS_FORK;
register ulong x0 asm("x0") = (ulong)args;
register ulong x1 asm("x1") = flags;
asm volatile(
"svc 0\n"
"cmp x0, #0\n"
// parent or error
"b.ne 1f\n"
// child
"ldr x0, %5\n"
"ldr x9, %4\n"
"blr x9\n"
"bl tfork_done\n"
"mov x0, #0\n"
"str xzr, [x0]\n"
"1:\n"
: "=r"(tid)
: "r"(x8), "0"(x0), "r"(x1), "m"(fn), "m"(fnarg)
: SYSCALL_CLOBBERS);
#endif
return tid; return tid;
} }
@ -907,6 +953,7 @@ _pcreate(void *vpcarg)
status = (long)(pcargs.fn(pcargs.arg)); status = (long)(pcargs.fn(pcargs.arg));
free(pcargs.tls); free(pcargs.tls);
#if defined(__x86_64__)
// rbx and rbp are preserved across syscalls. i don't know how to // rbx and rbp are preserved across syscalls. i don't know how to
// specify rbp as a register contraint. // specify rbp as a register contraint.
register ulong rbp asm("rbp") = SYS_THREXIT; register ulong rbp asm("rbp") = SYS_THREXIT;
@ -929,6 +976,29 @@ _pcreate(void *vpcarg)
: "a"(SYS_MUNMAP), "D"(pcargs.stack), "S"(pcargs.stksz), : "a"(SYS_MUNMAP), "D"(pcargs.stack), "S"(pcargs.stksz),
"r"(rbp), "r"(rbx) "r"(rbp), "r"(rbx)
: SYSCALL_CLOBBERS); : SYSCALL_CLOBBERS);
#elif defined(__aarch64__)
register ulong x8 asm("x8") = SYS_MUNMAP;
register ulong x0 asm("x0") = (ulong)pcargs.stack;
register ulong x1 asm("x1") = (ulong)pcargs.stksz;
asm volatile(
"svc 0\n"
"cmp x0, #0\n"
"b.eq 1f\n"
"mov x0, #0\n"
"str xzr, [x0]\n"
"1:\n"
"mov x8, %3\n"
"ldr x0, %4\n"
"svc 0\n"
"mov x0, #1\n"
"str xzr, [x0]\n"
:
: "r"(x8), "r"(x0), "r"(x1),
"X"(SYS_THREXIT), "m"(status)
: SYSCALL_CLOBBERS);
#endif
// not reached // not reached
return 0; return 0;
} }
@ -1107,7 +1177,11 @@ pthread_barrier_wait(pthread_barrier_t *b)
uint o = b->current; uint o = b->current;
uint n = o + 1; uint n = o + 1;
if ((o & m) != 0) { if ((o & m) != 0) {
#if defined(__x86_64__)
asm volatile("pause":::"memory"); asm volatile("pause":::"memory");
#elif defined(__aarch64__)
asm volatile("yield":::"memory");
#endif
continue; continue;
} }
c = n; c = n;
@ -1126,7 +1200,11 @@ pthread_barrier_wait(pthread_barrier_t *b)
} }
while ((b->current & m) == 0) while ((b->current & m) == 0)
#if defined(__x86_64__)
asm volatile("pause":::"memory"); asm volatile("pause":::"memory");
#elif defined(__aarch64__)
asm volatile("yield":::"memory");
#endif
c = __sync_add_and_fetch(&b->current, -1); c = __sync_add_and_fetch(&b->current, -1);
if (c == m) if (c == m)
@ -2785,6 +2863,7 @@ sscanf(const char *src, const char *fmt, ...)
ulong ulong
rdtsc(void) rdtsc(void)
{ {
#if defined(__x86_64__)
ulong low, hi; ulong low, hi;
asm volatile( asm volatile(
"rdtsc\n" "rdtsc\n"
@ -2792,6 +2871,10 @@ rdtsc(void)
: :
:); :);
return hi << 32 | low; return hi << 32 | low;
#else
// TODO: aarch64
return 0;
#endif
} }
static char readlineb[256]; static char readlineb[256];
@ -3534,6 +3617,7 @@ __start(int argc, char **argv, struct kinfo_t *k)
void void
_start(void) _start(void)
{ {
#if defined(__x86_64__)
// make sure that the stack is 16-byte aligned, as gcc assumes, after // make sure that the stack is 16-byte aligned, as gcc assumes, after
// _start's function prologue. gcc emits SSE instructions that require // _start's function prologue. gcc emits SSE instructions that require
// 16-byte alignment (misalignment generates #GP). // 16-byte alignment (misalignment generates #GP).
@ -3545,6 +3629,13 @@ _start(void)
"movabs $__start, %%rax\n" "movabs $__start, %%rax\n"
"jmpq *%%rax\n" "jmpq *%%rax\n"
::: "memory", "cc"); ::: "memory", "cc");
#elif defined(__aarch64__)
asm(
"ldr x0, [sp]\n" // argc
"add x1, sp, #8\n" // argv
"bl __start\n"
::: "memory", "cc");
#endif
} }
/* NGINX STUFF */ /* NGINX STUFF */

@ -48,7 +48,11 @@ void *groupfault(void *a)
int b = (int)(long)a; int b = (int)(long)a;
while (!go) while (!go)
asm volatile("pause"); #if defined(__x86_64__)
asm volatile("pause":::"memory");
#elif defined(__aarch64__)
asm volatile("yield":::"memory");
#endif
blah = b; blah = b;

@ -118,6 +118,7 @@ pid_t
_getppid(void) _getppid(void)
{ {
pid_t ret; pid_t ret;
#if defined(__x86_64__)
asm volatile( asm volatile(
"movq %%rsp, %%r10\n" "movq %%rsp, %%r10\n"
"leaq 2(%%rip), %%r11\n" "leaq 2(%%rip), %%r11\n"
@ -125,6 +126,14 @@ _getppid(void)
: "=a"(ret) : "=a"(ret)
: "0"(40ul) : "0"(40ul)
: "cc", "memory", "r9", "r10", "r11", "edi", "esi", "edx", "ecx", "r8"); : "cc", "memory", "r9", "r10", "r11", "edi", "esi", "edx", "ecx", "r8");
#elif defined(__aarch64__)
register long x8 asm("x8") = 40ul;
asm volatile(
"svc 0"
: "=r"(ret)
: "r"(x8)
: "cc", "memory");
#endif
return ret; return ret;
} }
@ -135,6 +144,7 @@ void *igetpids(void *idp)
long total = 0; long total = 0;
while (!cease) { while (!cease) {
#if defined(__x86_64__)
asm volatile( asm volatile(
"movl $40, %%eax\n" "movl $40, %%eax\n"
"movq %%rsp, %%r10\n" "movq %%rsp, %%r10\n"
@ -143,6 +153,14 @@ void *igetpids(void *idp)
: :
: :
: SYSCALL_CLOBBERS, "eax", "edi", "esi", "edx", "ecx", "r8"); : SYSCALL_CLOBBERS, "eax", "edi", "esi", "edx", "ecx", "r8");
#elif defined(__aarch64__)
asm volatile(
"mov x8, #40\n"
"svc 0"
:
:
: "cc", "memory");
#endif
total++; total++;
} }
return (void *)total; return (void *)total;
@ -692,10 +710,14 @@ void *locks(void *_arg)
pthread_barrier_wait(&bar); pthread_barrier_wait(&bar);
while (!cease) { while (!cease) {
#if defined(__x86_64__)
asm("lock incq %0\n" asm("lock incq %0\n"
: :
: "m"(tot) : "m"(tot)
: "cc", "memory"); : "cc", "memory");
#else
// TODO: aarch64
#endif
} }
return (void *)tot; return (void *)tot;
} }

@ -1643,14 +1643,26 @@ void
validateint(int *p) validateint(int *p)
{ {
ulong ret; ulong ret;
#define SYS_PIPE2 293
#if defined(__x86_64__)
asm volatile( asm volatile(
"movq %%rsp, %%r10\n" "movq %%rsp, %%r10\n"
"leaq 2(%%rip), %%r11\n" "leaq 2(%%rip), %%r11\n"
"sysenter\n" "sysenter\n"
: "=a"(ret) : "=a"(ret)
#define SYS_PIPE2 293
: "0"(SYS_PIPE2), "D"(p) : "0"(SYS_PIPE2), "D"(p)
: "cc", "memory"); : "cc", "memory");
#elif defined(__aarch64__)
register ulong x8 asm("x8") = SYS_PIPE2;
register ulong x0 asm("x0") = (ulong)p;
asm volatile(
"svc 0\n"
: "=r"(ret)
: "r"(x8), "0"(x0)
: "cc", "memory");
#endif
if (ret == 0) if (ret == 0)
errx(-1, "bad int passed?"); errx(-1, "bad int passed?");
} }
@ -2789,7 +2801,11 @@ static volatile int go;
static void *_locker(void *v) static void *_locker(void *v)
{ {
while (go != 1) while (go != 1)
#if defined(__x86_64__)
asm volatile("pause\n":::"memory"); asm volatile("pause\n":::"memory");
#elif defined(__aarch64__)
asm volatile("yield\n":::"memory");
#endif
pthread_mutex_t *m = (pthread_mutex_t *)v; pthread_mutex_t *m = (pthread_mutex_t *)v;
int i; int i;
for (i = 0; i < ltimes; i++) { for (i = 0; i < ltimes; i++) {
@ -2904,7 +2920,11 @@ static void _condtest(const int nt)
if (pthread_create(&t[i], NULL, _condsleep, &args[i])) if (pthread_create(&t[i], NULL, _condsleep, &args[i]))
errx(-1, "pthread_ create"); errx(-1, "pthread_ create");
while (go == 0) while (go == 0)
#if defined(__x86_64__)
asm volatile("pause\n":::"memory"); asm volatile("pause\n":::"memory");
#elif defined(__aarch64__)
asm volatile("yield\n":::"memory");
#endif
} }
for (i = 0; i < nt; i++) for (i = 0; i < nt; i++)
@ -2953,7 +2973,11 @@ static void _condbctest(const int nt)
for (i = 0; i < bctimes; i++) { for (i = 0; i < bctimes; i++) {
volatile int *p = &lcounter; volatile int *p = &lcounter;
while (*p < enext) while (*p < enext)
#if defined(__x86_64__)
asm volatile("pause\n":::"memory"); asm volatile("pause\n":::"memory");
#elif defined(__aarch64__)
asm volatile("yield\n":::"memory");
#endif
if (pthread_mutex_lock(&m)) if (pthread_mutex_lock(&m))
err(-1, "lock"); err(-1, "lock");
if (i == bctimes - 1) if (i == bctimes - 1)

@ -101,6 +101,7 @@ setaffinity(int c)
ulong ulong
rdtsc(void) rdtsc(void)
{ {
#if defined(__x86_64__)
ulong low, hi; ulong low, hi;
asm volatile( asm volatile(
"rdtsc\n" "rdtsc\n"
@ -108,5 +109,9 @@ rdtsc(void)
: :
:); :);
return hi << 32 | low; return hi << 32 | low;
#else
// TODO: aarch64
return 0;
#endif
} }
#endif #endif

@ -39,7 +39,11 @@ public:
{ {
// Wait if the barrier is in the exit phase // Wait if the barrier is in the exit phase
while (entered_ & phase_mask) while (entered_ & phase_mask)
#if defined(__x86_64__)
asm volatile("pause":::); asm volatile("pause":::);
#elif defined(__aarch64__)
asm volatile("yield":::);
#endif
// Enter the barrier // Enter the barrier
auto v = ++entered_; auto v = ++entered_;
@ -52,7 +56,11 @@ public:
// Wait until the barrier switches to the exit phase // Wait until the barrier switches to the exit phase
while (!(entered_.load(std::memory_order_relaxed) & phase_mask)) while (!(entered_.load(std::memory_order_relaxed) & phase_mask))
#if defined(__x86_64__)
asm volatile("pause":::); asm volatile("pause":::);
#elif defined(__aarch64__)
asm volatile("yield":::);
#endif
// Exit the batter // Exit the batter
if ((v = --entered_) == phase_mask) if ((v = --entered_) == phase_mask)

Loading…
Cancel
Save