Merge remote-tracking branch 'rcore/dev' into dev

master
Harry Chen 6 years ago
commit 324b64c2ed

@ -1,11 +1,14 @@
use super::ipi::IPIEventItem;
use alloc::boxed::Box; use alloc::boxed::Box;
use alloc::vec::*;
use core::sync::atomic::{AtomicBool, Ordering};
use x86_64::registers::model_specific::Msr; use x86_64::registers::model_specific::Msr;
use x86_64::structures::gdt::*; use x86_64::structures::gdt::*;
use x86_64::structures::tss::TaskStateSegment; use x86_64::structures::tss::TaskStateSegment;
use x86_64::{PrivilegeLevel, VirtAddr}; use x86_64::{PrivilegeLevel, VirtAddr};
use crate::consts::MAX_CPU_NUM; use crate::consts::MAX_CPU_NUM;
use crate::sync::{Semaphore, SpinLock as Mutex};
/// Init TSS & GDT. /// Init TSS & GDT.
pub fn init() { pub fn init() {
@ -20,22 +23,16 @@ static mut CPUS: [Option<Cpu>; MAX_CPU_NUM] = [
None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None,
None,
// None, None, None, None, None, None, None, None,
// None, None, None, None, None, None, None, None,
// None, None, None, None, None, None, None, None,
// None, None, None, None, None, None, None, None,
// None, None, None, None, None, None, None, None,
// None, None, None, None, None, None, None, None,
// None, None, None, None, None, None, None, None,
// None, None, None, None, None, None, None, None,
]; ];
pub struct Cpu { pub struct Cpu {
gdt: GlobalDescriptorTable, gdt: GlobalDescriptorTable,
tss: TaskStateSegment, tss: TaskStateSegment,
double_fault_stack: [u8; 0x100], double_fault_stack: [u8; 0x100],
preemption_disabled: AtomicBool, // TODO: check this on timer(). This is currently unavailable since related code is in rcore_thread.
ipi_handler_queue: Mutex<Vec<IPIEventItem>>,
id: usize,
} }
impl Cpu { impl Cpu {
@ -44,9 +41,45 @@ impl Cpu {
gdt: GlobalDescriptorTable::new(), gdt: GlobalDescriptorTable::new(),
tss: TaskStateSegment::new(), tss: TaskStateSegment::new(),
double_fault_stack: [0u8; 0x100], double_fault_stack: [0u8; 0x100],
preemption_disabled: AtomicBool::new(false),
ipi_handler_queue: Mutex::new(vec![]),
id: 0,
} }
} }
pub fn iter() -> impl Iterator<Item = &'static Self> {
unsafe {
CPUS.iter()
.filter_map(|x| x.as_ref())
}
}
pub fn id(&self) -> usize {
self.id
}
pub fn notify_event(&self, item: IPIEventItem) {
let mut queue = self.ipi_handler_queue.lock();
queue.push(item);
}
pub fn current() -> &'static mut Cpu {
unsafe { CPUS[super::cpu::id()].as_mut().unwrap() }
}
pub fn handle_ipi(&self) {
let mut queue = self.ipi_handler_queue.lock();
let handlers = core::mem::replace(queue.as_mut(), vec![]);
drop(queue);
for handler in handlers {
handler();
}
}
pub fn disable_preemption(&self) -> bool {
self.preemption_disabled.swap(true, Ordering::Relaxed)
}
pub fn restore_preemption(&self, val: bool) {
self.preemption_disabled.store(val, Ordering::Relaxed);
}
pub fn can_preempt(&self) -> bool {
self.preemption_disabled.load(Ordering::Relaxed)
}
unsafe fn init(&'static mut self) { unsafe fn init(&'static mut self) {
use x86_64::instructions::segmentation::{load_fs, set_cs}; use x86_64::instructions::segmentation::{load_fs, set_cs};
use x86_64::instructions::tables::load_tss; use x86_64::instructions::tables::load_tss;
@ -63,7 +96,7 @@ impl Cpu {
self.gdt.add_entry(UCODE); self.gdt.add_entry(UCODE);
self.gdt.add_entry(Descriptor::tss_segment(&self.tss)); self.gdt.add_entry(Descriptor::tss_segment(&self.tss));
self.gdt.load(); self.gdt.load();
self.id = super::cpu::id();
// reload code segment register // reload code segment register
set_cs(KCODE_SELECTOR); set_cs(KCODE_SELECTOR);
// load TSS // load TSS
@ -81,7 +114,7 @@ const KCODE: Descriptor = Descriptor::UserSegment(0x0020980000000000); // EXECUT
const UCODE: Descriptor = Descriptor::UserSegment(0x0020F80000000000); // EXECUTABLE | USER_SEGMENT | USER_MODE | PRESENT | LONG_MODE const UCODE: Descriptor = Descriptor::UserSegment(0x0020F80000000000); // EXECUTABLE | USER_SEGMENT | USER_MODE | PRESENT | LONG_MODE
const KDATA: Descriptor = Descriptor::UserSegment(0x0000920000000000); // DATA_WRITABLE | USER_SEGMENT | PRESENT const KDATA: Descriptor = Descriptor::UserSegment(0x0000920000000000); // DATA_WRITABLE | USER_SEGMENT | PRESENT
const UDATA: Descriptor = Descriptor::UserSegment(0x0000F20000000000); // DATA_WRITABLE | USER_SEGMENT | USER_MODE | PRESENT const UDATA: Descriptor = Descriptor::UserSegment(0x0000F20000000000); // DATA_WRITABLE | USER_SEGMENT | USER_MODE | PRESENT
// Copied from xv6 // Copied from xv6
const UCODE32: Descriptor = Descriptor::UserSegment(0x00cffa00_0000ffff); // EXECUTABLE | USER_SEGMENT | USER_MODE | PRESENT const UCODE32: Descriptor = Descriptor::UserSegment(0x00cffa00_0000ffff); // EXECUTABLE | USER_SEGMENT | USER_MODE | PRESENT
const UDATA32: Descriptor = Descriptor::UserSegment(0x00cff200_0000ffff); // EXECUTABLE | USER_SEGMENT | USER_MODE | PRESENT const UDATA32: Descriptor = Descriptor::UserSegment(0x00cff200_0000ffff); // EXECUTABLE | USER_SEGMENT | USER_MODE | PRESENT

@ -45,3 +45,6 @@ pub const PIRQE: u8 = 20;
pub const PIRQF: u8 = 21; pub const PIRQF: u8 = 21;
pub const PIRQG: u8 = 22; pub const PIRQG: u8 = 22;
pub const PIRQH: u8 = 23; pub const PIRQH: u8 = 23;
// IPI constants
pub const IPIFuncCall: u8 = 0xfc;

@ -109,6 +109,11 @@ pub extern "C" fn rust_trap(tf: &mut TrapFrame) {
Syscall32 => syscall32(tf), Syscall32 => syscall32(tf),
InvalidOpcode => invalid_opcode(tf), InvalidOpcode => invalid_opcode(tf),
DivideError | GeneralProtectionFault => error(tf), DivideError | GeneralProtectionFault => error(tf),
IPIFuncCall => {
let irq = tf.trap_num as u8 - IRQ0;
super::ack(irq); // must ack before switching
super::super::gdt::Cpu::current().handle_ipi();
}
_ => panic!("Unhandled interrupt {:x}", tf.trap_num), _ => panic!("Unhandled interrupt {:x}", tf.trap_num),
} }
} }

@ -0,0 +1,40 @@
/// Interface for inter-processor interrupt.
/// This module wraps inter-processor interrupt into a broadcast-calling style.
use crate::consts::KERNEL_OFFSET;
use alloc::boxed::{Box, FnBox};
use alloc::sync::Arc;
use apic::{LocalApic, XApic, LAPIC_ADDR};
use core::sync::atomic::{spin_loop_hint, AtomicU8, Ordering};
pub type IPIEventItem = Box<FnBox()>;
unsafe fn get_apic() -> XApic {
let mut lapic = unsafe { XApic::new(KERNEL_OFFSET + LAPIC_ADDR) };
lapic
}
pub fn invoke_on_allcpu(f: impl Fn() + 'static, wait: bool) {
// Step 1: initialize
use super::interrupt::consts::IPIFuncCall;
let mut apic = unsafe { get_apic() };
let func = Arc::new(f);
let cpu_count = super::gdt::Cpu::iter().count();
let rest_count = Arc::new(AtomicU8::new(cpu_count as u8));
// Step 2: invoke
for cpu in super::gdt::Cpu::iter() {
let func_clone = func.clone();
let rest_clone = rest_count.clone();
cpu.notify_event(Box::new(move || {
func_clone();
rest_clone.fetch_sub(1, Ordering::Relaxed);
}));
apic.send_ipi(cpu.id() as u8, IPIFuncCall);
}
if wait {
// spin if remote invocation do not complete
while rest_count.load(Ordering::Relaxed) != 0 {
spin_loop_hint();
}
}
}

@ -14,6 +14,7 @@ pub mod paging;
pub mod rand; pub mod rand;
pub mod syscall; pub mod syscall;
pub mod timer; pub mod timer;
pub mod ipi;
static AP_CAN_INIT: AtomicBool = AtomicBool::new(false); static AP_CAN_INIT: AtomicBool = AtomicBool::new(false);
@ -51,9 +52,9 @@ pub extern "C" fn _start(boot_info: &'static BootInfo) -> ! {
crate::drivers::init(); crate::drivers::init();
// init cpu scheduler and process manager, and add user shell app in process manager // init cpu scheduler and process manager, and add user shell app in process manager
crate::process::init(); crate::process::init();
//wake up other CPUs // wake up other CPUs
AP_CAN_INIT.store(true, Ordering::Relaxed); AP_CAN_INIT.store(true, Ordering::Relaxed);
//call the first main function in kernel. // call the first main function in kernel.
crate::kmain(); crate::kmain();
} }

@ -1,6 +1,7 @@
// Depends on kernel // Depends on kernel
use crate::consts::KERNEL_OFFSET; use crate::consts::KERNEL_OFFSET;
use crate::memory::{active_table, alloc_frame, dealloc_frame}; use crate::memory::{active_table, alloc_frame, dealloc_frame};
use core::sync::atomic::Ordering;
use log::*; use log::*;
use rcore_memory::paging::*; use rcore_memory::paging::*;
use x86_64::instructions::tlb; use x86_64::instructions::tlb;
@ -12,7 +13,7 @@ use x86_64::structures::paging::{
page_table::{PageTable as x86PageTable, PageTableEntry, PageTableFlags as EF}, page_table::{PageTable as x86PageTable, PageTableEntry, PageTableFlags as EF},
FrameAllocator, FrameDeallocator, FrameAllocator, FrameDeallocator,
}; };
use x86_64::PhysAddr; use x86_64::{VirtAddr, PhysAddr};
pub trait PageExt { pub trait PageExt {
fn of_addr(address: usize) -> Self; fn of_addr(address: usize) -> Self;
@ -57,11 +58,13 @@ impl PageTable for ActivePageTable {
.unwrap() .unwrap()
.flush(); .flush();
} }
flush_tlb_all(addr);
unsafe { &mut *(get_entry_ptr(addr, 1)) } unsafe { &mut *(get_entry_ptr(addr, 1)) }
} }
fn unmap(&mut self, addr: usize) { fn unmap(&mut self, addr: usize) {
self.0.unmap(Page::of_addr(addr)).unwrap().1.flush(); self.0.unmap(Page::of_addr(addr)).unwrap().1.flush();
flush_tlb_all(addr);
} }
fn get_entry(&mut self, addr: usize) -> Option<&mut Entry> { fn get_entry(&mut self, addr: usize) -> Option<&mut Entry> {
@ -93,6 +96,7 @@ impl Entry for PageEntry {
use x86_64::{instructions::tlb::flush, VirtAddr}; use x86_64::{instructions::tlb::flush, VirtAddr};
let addr = VirtAddr::new_unchecked((self as *const _ as u64) << 9); let addr = VirtAddr::new_unchecked((self as *const _ as u64) << 9);
flush(addr); flush(addr);
flush_tlb_all(addr.as_u64() as usize);
} }
fn accessed(&self) -> bool { fn accessed(&self) -> bool {
self.0.flags().contains(EF::ACCESSED) self.0.flags().contains(EF::ACCESSED)
@ -277,3 +281,14 @@ impl FrameDeallocator<Size4KiB> for FrameAllocatorForX86 {
dealloc_frame(frame.start_address().as_u64() as usize); dealloc_frame(frame.start_address().as_u64() as usize);
} }
} }
/// Flush TLB for `vaddr` on all CPU
fn flush_tlb_all(vaddr: usize) {
if !super::AP_CAN_INIT.load(Ordering::Relaxed) {
return;
}
super::ipi::invoke_on_allcpu(
move || tlb::flush(VirtAddr::new(vaddr as u64)),
false,
);
}

@ -6,6 +6,7 @@
#![feature(optin_builtin_traits)] #![feature(optin_builtin_traits)]
#![feature(panic_info_message)] #![feature(panic_info_message)]
#![feature(global_asm)] #![feature(global_asm)]
#![feature(fnbox)]
#![deny(unused_must_use)] #![deny(unused_must_use)]
#![no_std] #![no_std]

@ -18,8 +18,13 @@ pub fn add_user_shell() {
// let init_shell="/bin/busybox"; // from alpine linux // let init_shell="/bin/busybox"; // from alpine linux
// //
// #[cfg(not(target_arch = "x86_64"))] // #[cfg(not(target_arch = "x86_64"))]
#[cfg(not(feature = "board_rocket_chip"))]
let init_shell = "/busybox"; //from docker-library let init_shell = "/busybox"; //from docker-library
// fd is not available on rocket chip
#[cfg(feature = "board_rocket_chip")]
let init_shell = "/rust/sh";
#[cfg(target_arch = "x86_64")] #[cfg(target_arch = "x86_64")]
let init_envs = let init_envs =
vec!["PATH=/usr/sbin:/usr/bin:/sbin:/bin:/usr/x86_64-alpine-linux-musl/bin".into()]; vec!["PATH=/usr/sbin:/usr/bin:/sbin:/bin:/usr/x86_64-alpine-linux-musl/bin".into()];

@ -132,6 +132,14 @@ impl Syscall<'_> {
SYS_DUP3 => self.sys_dup2(args[0], args[1]), // TODO: handle `flags` SYS_DUP3 => self.sys_dup2(args[0], args[1]), // TODO: handle `flags`
SYS_PIPE2 => self.sys_pipe(args[0] as *mut u32), // TODO: handle `flags` SYS_PIPE2 => self.sys_pipe(args[0] as *mut u32), // TODO: handle `flags`
SYS_UTIMENSAT => self.unimplemented("utimensat", Ok(0)), SYS_UTIMENSAT => self.unimplemented("utimensat", Ok(0)),
SYS_COPY_FILE_RANGE => self.sys_copy_file_range(
args[0],
args[1] as *mut usize,
args[2],
args[3] as *mut usize,
args[4],
args[5],
),
// io multiplexing // io multiplexing
SYS_PPOLL => { SYS_PPOLL => {
@ -231,6 +239,7 @@ impl Syscall<'_> {
args[2] as i32, args[2] as i32,
args[3] as *const TimeSpec, args[3] as *const TimeSpec,
), ),
SYS_TKILL => self.unimplemented("tkill", Ok(0)),
// time // time
SYS_NANOSLEEP => self.sys_nanosleep(args[0] as *const TimeSpec), SYS_NANOSLEEP => self.sys_nanosleep(args[0] as *const TimeSpec),
@ -276,25 +285,15 @@ impl Syscall<'_> {
args[2] as u32, args[2] as u32,
args[3] as *const u8, args[3] as *const u8,
), ),
SYS_COPY_FILE_RANGE => self.sys_copy_file_range( SYS_GETRANDOM => {
args[0], self.sys_getrandom(args[0] as *mut u8, args[1] as usize, args[2] as u32)
args[1] as *mut usize, }
args[2],
args[3] as *mut usize,
args[4],
args[5],
),
// custom // custom
SYS_MAP_PCI_DEVICE => self.sys_map_pci_device(args[0], args[1]), SYS_MAP_PCI_DEVICE => self.sys_map_pci_device(args[0], args[1]),
SYS_GET_PADDR => { SYS_GET_PADDR => {
self.sys_get_paddr(args[0] as *const u64, args[1] as *mut u64, args[2]) self.sys_get_paddr(args[0] as *const u64, args[1] as *mut u64, args[2])
} }
//SYS_GETRANDOM => self.unimplemented("getrandom", Err(SysError::EINVAL)),
SYS_GETRANDOM => {
self.sys_getrandom(args[0] as *mut u8, args[1] as usize, args[2] as u32)
}
SYS_TKILL => self.unimplemented("tkill", Ok(0)),
_ => { _ => {
let ret = match () { let ret = match () {
#[cfg(target_arch = "x86_64")] #[cfg(target_arch = "x86_64")]

@ -1 +1 @@
Subproject commit bb73d6ecce1ab0e6fae692c51e4335772b0335d4 Subproject commit 9fb1d459b50bc14c7ac56d9fd94b4b8485620730
Loading…
Cancel
Save