Fix register size (4->XLEN) in trap handling.

toolchain_update
dzy 6 years ago
parent 45b91fbcad
commit 98c94a0d83

@ -17,39 +17,39 @@ _restore_kernel_sp:
# sscratch = previous-sp, sp = kernel-sp # sscratch = previous-sp, sp = kernel-sp
_save_context: _save_context:
# provide room for trap frame # provide room for trap frame
addi sp, sp, -36 * 4 addi sp, sp, -36 * XLENB
# save x registers except x2 (sp) # save x registers except x2 (sp)
sw x1, 1*4(sp) sw x1, 1*XLENB(sp)
sw x3, 3*4(sp) sw x3, 3*XLENB(sp)
# tp(x4) = hartid. DON'T change. # tp(x4) = hartid. DON'T change.
# sw x4, 4*4(sp) # sw x4, 4*XLENB(sp)
sw x5, 5*4(sp) sw x5, 5*XLENB(sp)
sw x6, 6*4(sp) sw x6, 6*XLENB(sp)
sw x7, 7*4(sp) sw x7, 7*XLENB(sp)
sw x8, 8*4(sp) sw x8, 8*XLENB(sp)
sw x9, 9*4(sp) sw x9, 9*XLENB(sp)
sw x10, 10*4(sp) sw x10, 10*XLENB(sp)
sw x11, 11*4(sp) sw x11, 11*XLENB(sp)
sw x12, 12*4(sp) sw x12, 12*XLENB(sp)
sw x13, 13*4(sp) sw x13, 13*XLENB(sp)
sw x14, 14*4(sp) sw x14, 14*XLENB(sp)
sw x15, 15*4(sp) sw x15, 15*XLENB(sp)
sw x16, 16*4(sp) sw x16, 16*XLENB(sp)
sw x17, 17*4(sp) sw x17, 17*XLENB(sp)
sw x18, 18*4(sp) sw x18, 18*XLENB(sp)
sw x19, 19*4(sp) sw x19, 19*XLENB(sp)
sw x20, 20*4(sp) sw x20, 20*XLENB(sp)
sw x21, 21*4(sp) sw x21, 21*XLENB(sp)
sw x22, 22*4(sp) sw x22, 22*XLENB(sp)
sw x23, 23*4(sp) sw x23, 23*XLENB(sp)
sw x24, 24*4(sp) sw x24, 24*XLENB(sp)
sw x25, 25*4(sp) sw x25, 25*XLENB(sp)
sw x26, 26*4(sp) sw x26, 26*XLENB(sp)
sw x27, 27*4(sp) sw x27, 27*XLENB(sp)
sw x28, 28*4(sp) sw x28, 28*XLENB(sp)
sw x29, 29*4(sp) sw x29, 29*XLENB(sp)
sw x30, 30*4(sp) sw x30, 30*XLENB(sp)
sw x31, 31*4(sp) sw x31, 31*XLENB(sp)
# get sp, sstatus, sepc, stval, scause # get sp, sstatus, sepc, stval, scause
# set sscratch = 0 # set sscratch = 0
@ -59,20 +59,20 @@ _save_context:
csrr s3, (xtval) csrr s3, (xtval)
csrr s4, (xcause) csrr s4, (xcause)
# store sp, sstatus, sepc, sbadvaddr, scause # store sp, sstatus, sepc, sbadvaddr, scause
sw s0, 2*4(sp) sw s0, 2*XLENB(sp)
sw s1, 32*4(sp) sw s1, 32*XLENB(sp)
sw s2, 33*4(sp) sw s2, 33*XLENB(sp)
sw s3, 34*4(sp) sw s3, 34*XLENB(sp)
sw s4, 35*4(sp) sw s4, 35*XLENB(sp)
.endm .endm
.macro RESTORE_ALL .macro RESTORE_ALL
lw s1, 32*4(sp) # s1 = sstatus lw s1, 32*XLENB(sp) # s1 = sstatus
lw s2, 33*4(sp) # s2 = sepc lw s2, 33*XLENB(sp) # s2 = sepc
andi s0, s1, 1 << 8 andi s0, s1, 1 << 8
bnez s0, _restore_context # back to S-mode? (sstatus.SPP = 1) bnez s0, _restore_context # back to S-mode? (sstatus.SPP = 1)
_save_kernel_sp: _save_kernel_sp:
addi s0, sp, 36*4 addi s0, sp, 36*XLENB
csrw (xscratch), s0 # sscratch = kernel-sp csrw (xscratch), s0 # sscratch = kernel-sp
_restore_context: _restore_context:
# restore sstatus, sepc # restore sstatus, sepc
@ -80,38 +80,38 @@ _restore_context:
csrw (xepc), s2 csrw (xepc), s2
# restore x registers except x2 (sp) # restore x registers except x2 (sp)
lw x1, 1*4(sp) lw x1, 1*XLENB(sp)
lw x3, 3*4(sp) lw x3, 3*XLENB(sp)
# lw x4, 4*4(sp) # lw x4, 4*XLENB(sp)
lw x5, 5*4(sp) lw x5, 5*XLENB(sp)
lw x6, 6*4(sp) lw x6, 6*XLENB(sp)
lw x7, 7*4(sp) lw x7, 7*XLENB(sp)
lw x8, 8*4(sp) lw x8, 8*XLENB(sp)
lw x9, 9*4(sp) lw x9, 9*XLENB(sp)
lw x10, 10*4(sp) lw x10, 10*XLENB(sp)
lw x11, 11*4(sp) lw x11, 11*XLENB(sp)
lw x12, 12*4(sp) lw x12, 12*XLENB(sp)
lw x13, 13*4(sp) lw x13, 13*XLENB(sp)
lw x14, 14*4(sp) lw x14, 14*XLENB(sp)
lw x15, 15*4(sp) lw x15, 15*XLENB(sp)
lw x16, 16*4(sp) lw x16, 16*XLENB(sp)
lw x17, 17*4(sp) lw x17, 17*XLENB(sp)
lw x18, 18*4(sp) lw x18, 18*XLENB(sp)
lw x19, 19*4(sp) lw x19, 19*XLENB(sp)
lw x20, 20*4(sp) lw x20, 20*XLENB(sp)
lw x21, 21*4(sp) lw x21, 21*XLENB(sp)
lw x22, 22*4(sp) lw x22, 22*XLENB(sp)
lw x23, 23*4(sp) lw x23, 23*XLENB(sp)
lw x24, 24*4(sp) lw x24, 24*XLENB(sp)
lw x25, 25*4(sp) lw x25, 25*XLENB(sp)
lw x26, 26*4(sp) lw x26, 26*XLENB(sp)
lw x27, 27*4(sp) lw x27, 27*XLENB(sp)
lw x28, 28*4(sp) lw x28, 28*XLENB(sp)
lw x29, 29*4(sp) lw x29, 29*XLENB(sp)
lw x30, 30*4(sp) lw x30, 30*XLENB(sp)
lw x31, 31*4(sp) lw x31, 31*XLENB(sp)
# restore sp last # restore sp last
lw x2, 2*4(sp) lw x2, 2*XLENB(sp)
.endm .endm
.section .text .section .text

@ -49,6 +49,7 @@ int __atomic_fetch_sub_4(int* ptr, int val) {
return res; return res;
} }
#ifdef TARGET_IS_64BITS
typedef unsigned long long u64; typedef unsigned long long u64;
u64 __atomic_load_8(u64 *src) { u64 __atomic_load_8(u64 *src) {
@ -87,3 +88,4 @@ u64 __atomic_fetch_sub_8(u64* ptr, u64 val) {
__asm__ __volatile__("amoadd.d.rl %0, %1, (%2)" : "=r"(res) : "r"(-val), "r"(ptr) : "memory"); __asm__ __volatile__("amoadd.d.rl %0, %1, (%2)" : "=r"(res) : "r"(-val), "r"(ptr) : "memory");
return res; return res;
} }
#endif

@ -63,6 +63,18 @@ global_asm!("
.macro XRET\n sret\n .endm .macro XRET\n sret\n .endm
"); ");
#[cfg(target_pointer_width = "32")]
global_asm!("
.equ XLENB, 4
.equ XLENb, 32
");
#[cfg(target_pointer_width = "64")]
global_asm!("
.equ XLENB, 8
.equ XLENb, 64
");
#[cfg(feature = "board_k210")] #[cfg(feature = "board_k210")]
global_asm!(include_str!("boot/boot_k210.asm")); global_asm!(include_str!("boot/boot_k210.asm"));
global_asm!(include_str!("boot/entry.asm")); global_asm!(include_str!("boot/entry.asm"));

Loading…
Cancel
Save