merge interrupt & context switch for rv32 & rv64

toolchain_update
WangRunji 6 years ago
parent de24f6673c
commit b3a8e95d78

@ -15,50 +15,26 @@ pub unsafe fn restore(flags: usize) {
} }
#[inline(always)] #[inline(always)]
#[cfg(target_arch = "riscv32")] #[cfg(any(target_arch = "riscv32", target_arch = "riscv64"))]
pub unsafe fn disable_and_store() -> usize { pub unsafe fn disable_and_store() -> usize {
if option_env!("m_mode").is_some() { if option_env!("m_mode").is_some() {
let mstatus: usize; let mstatus: usize;
asm!("csrrci $0, 0x300, 1 << 3" : "=r"(mstatus)); asm!("csrci mstatus, 1 << 3" : "=r"(mstatus));
mstatus & (1 << 3) mstatus & (1 << 3)
} else { } else {
let sstatus: usize; let sstatus: usize;
asm!("csrrci $0, 0x100, 1 << 1" : "=r"(sstatus)); asm!("csrci sstatus, 1 << 1" : "=r"(sstatus));
sstatus & (1 << 1) sstatus & (1 << 1)
} }
} }
#[inline(always)] #[inline(always)]
#[cfg(target_arch = "riscv64")] #[cfg(any(target_arch = "riscv32", target_arch = "riscv64"))]
pub unsafe fn disable_and_store() -> usize {
if option_env!("m_mode").is_some() {
let mstatus: usize;
asm!("csrrci $0, 0x300, 1 << 3" : "=r"(mstatus));
mstatus & (1 << 3)
} else {
let sstatus: usize;
asm!("csrrci $0, 0x100, 1 << 1" : "=r"(sstatus));
sstatus & (1 << 1)
}
}
#[inline(always)]
#[cfg(target_arch = "riscv32")]
pub unsafe fn restore(flags: usize) {
if option_env!("m_mode").is_some() {
asm!("csrs 0x300, $0" :: "r"(flags));
} else {
asm!("csrs 0x100, $0" :: "r"(flags));
}
}
#[inline(always)]
#[cfg(target_arch = "riscv64")]
pub unsafe fn restore(flags: usize) { pub unsafe fn restore(flags: usize) {
if option_env!("m_mode").is_some() { if option_env!("m_mode").is_some() {
asm!("csrs 0x300, $0" :: "r"(flags)); asm!("csrs mstatus, $0" :: "r"(flags));
} else { } else {
asm!("csrs 0x100, $0" :: "r"(flags)); asm!("csrs sstatus, $0" :: "r"(flags));
} }
} }

@ -148,105 +148,70 @@ pub struct Context(usize);
impl Context { impl Context {
/// Switch to another kernel thread. /// Switch to another kernel thread.
/// ///
/// Defined in `trap.asm`.
///
/// Push all callee-saved registers at the current kernel stack. /// Push all callee-saved registers at the current kernel stack.
/// Store current sp, switch to target. /// Store current sp, switch to target.
/// Pop all callee-saved registers, then return to the target. /// Pop all callee-saved registers, then return to the target.
#[cfg(target_arch = "riscv32")]
#[naked] #[naked]
#[inline(never)] #[inline(never)]
pub unsafe extern fn switch(&mut self, target: &mut Self) { pub unsafe extern fn switch(&mut self, target: &mut Self) {
asm!( #[cfg(target_arch = "riscv32")]
" asm!(r"
// save from's registers .equ XLENB, 4
addi sp, sp, -4*14 .macro Load reg, mem
sw sp, 0(a0) lw \reg, \mem
sw ra, 0*4(sp) .endm
sw s0, 2*4(sp) .macro Store reg, mem
sw s1, 3*4(sp) sw \reg, \mem
sw s2, 4*4(sp) .endm");
sw s3, 5*4(sp)
sw s4, 6*4(sp)
sw s5, 7*4(sp)
sw s6, 8*4(sp)
sw s7, 9*4(sp)
sw s8, 10*4(sp)
sw s9, 11*4(sp)
sw s10, 12*4(sp)
sw s11, 13*4(sp)
csrrs s11, 0x180, x0 // satp
sw s11, 1*4(sp)
// restore to's registers
lw sp, 0(a1)
lw s11, 1*4(sp)
csrrw x0, 0x180, s11 // satp
lw ra, 0*4(sp)
lw s0, 2*4(sp)
lw s1, 3*4(sp)
lw s2, 4*4(sp)
lw s3, 5*4(sp)
lw s4, 6*4(sp)
lw s5, 7*4(sp)
lw s6, 8*4(sp)
lw s7, 9*4(sp)
lw s8, 10*4(sp)
lw s9, 11*4(sp)
lw s10, 12*4(sp)
lw s11, 13*4(sp)
addi sp, sp, 4*14
sw zero, 0(a1)
ret"
: : : : "volatile" )
}
#[cfg(target_arch = "riscv64")] #[cfg(target_arch = "riscv64")]
#[naked] asm!(r"
#[inline(never)] .equ XLENB, 8
pub unsafe extern fn switch(&mut self, target: &mut Self) { .macro Load reg, mem
asm!( ld \reg, \mem
" .endm
.macro Store reg, mem
sd \reg, \mem
.endm");
asm!("
// save from's registers // save from's registers
addi sp, sp, -8*14 addi sp, sp, (-XLENB*14)
sd sp, 0(a0) Store sp, 0(a0)
sd ra, 0*8(sp) Store ra, 0*XLENB(sp)
sd s0, 2*8(sp) Store s0, 2*XLENB(sp)
sd s1, 3*8(sp) Store s1, 3*XLENB(sp)
sd s2, 4*8(sp) Store s2, 4*XLENB(sp)
sd s3, 5*8(sp) Store s3, 5*XLENB(sp)
sd s4, 6*8(sp) Store s4, 6*XLENB(sp)
sd s5, 7*8(sp) Store s5, 7*XLENB(sp)
sd s6, 8*8(sp) Store s6, 8*XLENB(sp)
sd s7, 9*8(sp) Store s7, 9*XLENB(sp)
sd s8, 10*8(sp) Store s8, 10*XLENB(sp)
sd s9, 11*8(sp) Store s9, 11*XLENB(sp)
sd s10, 12*8(sp) Store s10, 12*XLENB(sp)
sd s11, 13*8(sp) Store s11, 13*XLENB(sp)
csrrs s11, 0x180, x0 // satp csrr s11, satp
sd s11, 1*8(sp) Store s11, 1*XLENB(sp)
// restore to's registers // restore to's registers
ld sp, 0(a1) Load sp, 0(a1)
ld s11, 1*8(sp) Load s11, 1*XLENB(sp)
csrrw x0, 0x180, s11 // satp csrw satp, s11
ld ra, 0*8(sp) Load ra, 0*XLENB(sp)
ld s0, 2*8(sp) Load s0, 2*XLENB(sp)
ld s1, 3*8(sp) Load s1, 3*XLENB(sp)
ld s2, 4*8(sp) Load s2, 4*XLENB(sp)
ld s3, 5*8(sp) Load s3, 5*XLENB(sp)
ld s4, 6*8(sp) Load s4, 6*XLENB(sp)
ld s5, 7*8(sp) Load s5, 7*XLENB(sp)
ld s6, 8*8(sp) Load s6, 8*XLENB(sp)
ld s7, 9*8(sp) Load s7, 9*XLENB(sp)
ld s8, 10*8(sp) Load s8, 10*XLENB(sp)
ld s9, 11*8(sp) Load s9, 11*XLENB(sp)
ld s10, 12*8(sp) Load s10, 12*XLENB(sp)
ld s11, 13*8(sp) Load s11, 13*XLENB(sp)
addi sp, sp, 8*14 addi sp, sp, (XLENB*14)
sd zero, 0(a1) Store zero, 0(a1)
ret" ret"
: : : : "volatile" ) : : : : "volatile" )
} }

@ -74,29 +74,28 @@ global_asm!("
.macro TEST_BACK_TO_KERNEL .macro TEST_BACK_TO_KERNEL
andi s0, s1, 1 << 8 // sstatus.SPP = 1 andi s0, s1, 1 << 8 // sstatus.SPP = 1
.endm .endm
"); ");
#[cfg(target_pointer_width = "32")] #[cfg(target_arch = "riscv32")]
global_asm!(" global_asm!(r"
.equ XLENB, 4 .equ XLENB, 4
.equ XLENb, 32 .equ XLENb, 32
.macro LOAD a1, a2 .macro LOAD a1, a2
lw \\a1, \\a2*XLENB(sp) lw \a1, \a2*XLENB(sp)
.endm .endm
.macro STORE a1, a2 .macro STORE a1, a2
sw \\a1, \\a2*XLENB(sp) sw \a1, \a2*XLENB(sp)
.endm .endm
"); ");
#[cfg(target_pointer_width = "64")] #[cfg(target_arch = "riscv64")]
global_asm!(" global_asm!(r"
.equ XLENB, 8 .equ XLENB, 8
.equ XLENb, 64 .equ XLENb, 64
.macro LOAD a1, a2 .macro LOAD a1, a2
ld \\a1, \\a2*XLENB(sp) ld \a1, \a2*XLENB(sp)
.endm .endm
.macro STORE a1, a2 .macro STORE a1, a2
sd \\a1, \\a2*XLENB(sp) sd \a1, \a2*XLENB(sp)
.endm .endm
"); ");

Loading…
Cancel
Save