x86_64: simplify IPI

master
WangRunji 6 years ago
parent 2adbc4dcd9
commit 75919dce41

@ -47,30 +47,28 @@ impl Cpu {
}
}
pub fn foreach(mut f: impl FnMut(&mut Cpu)) {
pub fn iter() -> impl Iterator<Item = &'static Self> {
unsafe {
CPUS.iter_mut()
.filter_map(|x| x.as_mut())
.for_each(f);
CPUS.iter()
.filter_map(|x| x.as_ref())
}
}
pub fn get_id(&self) -> usize {
pub fn id(&self) -> usize {
self.id
}
pub fn notify_event(&mut self, item: IPIEventItem) {
pub fn notify_event(&self, item: IPIEventItem) {
let mut queue = self.ipi_handler_queue.lock();
queue.push(item);
}
pub fn current() -> &'static mut Cpu {
unsafe { CPUS[super::cpu::id()].as_mut().unwrap() }
}
pub fn ipi_handler(&mut self) {
pub fn handle_ipi(&self) {
let mut queue = self.ipi_handler_queue.lock();
let mut current_events: Vec<IPIEventItem> = vec![];
::core::mem::swap(&mut current_events, queue.as_mut());
let handlers = core::mem::replace(queue.as_mut(), vec![]);
drop(queue);
for ev in current_events.iter() {
ev.call();
for handler in handlers {
handler();
}
}
pub fn disable_preemption(&self) -> bool {

@ -112,7 +112,7 @@ pub extern "C" fn rust_trap(tf: &mut TrapFrame) {
IPIFuncCall => {
let irq = tf.trap_num as u8 - IRQ0;
super::ack(irq); // must ack before switching
super::super::gdt::Cpu::current().ipi_handler();
super::super::gdt::Cpu::current().handle_ipi();
}
_ => panic!("Unhandled interrupt {:x}", tf.trap_num),
}

@ -1,82 +1,40 @@
// Interface for inter-processor interrupt.
// This module wraps inter-processor interrupt into a broadcast-calling style.
/// Interface for inter-processor interrupt.
/// This module wraps inter-processor interrupt into a broadcast-calling style.
use crate::consts::KERNEL_OFFSET;
use crate::sync::{Semaphore, SpinLock as Mutex};
use alloc::boxed::Box;
use alloc::boxed::{Box, FnBox};
use alloc::sync::Arc;
use apic::{LocalApic, XApic, LAPIC_ADDR};
use lazy_static::*;
use rcore_memory::Page;
use x86_64::instructions::tlb;
use x86_64::VirtAddr;
use core::sync::atomic::{spin_loop_hint, AtomicU8, Ordering};
struct IPIInvoke<'a, A>(&'a (Fn(&A) -> ()), &'a A);
pub type IPIEventItem = Box<FnBox()>;
lazy_static! {
static ref IPI_INVOKE_LOCK: Mutex<()> = Mutex::new(());
}
pub trait InvokeEventHandle {
fn call(&self);
}
struct InvokeEvent<A: 'static> {
function: fn(&A) -> (),
argument: Arc<A>,
done_semaphore: Arc<Semaphore>,
}
impl<A> InvokeEventHandle for InvokeEvent<A> {
fn call(&self) {
let arg_ref = self.argument.as_ref();
(self.function)(arg_ref);
self.done_semaphore.release();
}
}
pub type IPIEventItem = Box<InvokeEventHandle>;
// TODO: something fishy is going on here...
// In fact, the argument lives as long as the Arc.
fn create_item<A: 'static>(f: fn(&A) -> (), arg: &Arc<A>, sem: &Arc<Semaphore>) -> IPIEventItem {
Box::new(InvokeEvent {
function: f,
argument: arg.clone(),
done_semaphore: sem.clone(),
})
}
unsafe fn get_apic() -> XApic {
let mut lapic = unsafe { XApic::new(KERNEL_OFFSET + LAPIC_ADDR) };
lapic
}
pub fn invoke_on_allcpu<A: 'static>(f: fn(&A) -> (), arg: A, wait: bool) {
pub fn invoke_on_allcpu(f: impl Fn() + 'static, wait: bool) {
// Step 1: initialize
use super::interrupt::consts::IPIFuncCall;
let mut apic = unsafe { get_apic() };
let sem = Arc::new(Semaphore::new(0));
let arcarg = Arc::new(arg);
let mut cpu_count = 0;
let func = Arc::new(f);
let cpu_count = super::gdt::Cpu::iter().count();
let rest_count = Arc::new(AtomicU8::new(cpu_count as u8));
// Step 2: invoke
super::gdt::Cpu::foreach(|cpu| {
let id = cpu.get_id();
cpu_count += 1;
cpu.notify_event(create_item(f, &arcarg, &sem));
apic.send_ipi(id as u8, IPIFuncCall);
});
for cpu in super::gdt::Cpu::iter() {
let func_clone = func.clone();
let rest_clone = rest_count.clone();
cpu.notify_event(Box::new(move || {
func_clone();
rest_clone.fetch_sub(1, Ordering::Relaxed);
}));
apic.send_ipi(cpu.id() as u8, IPIFuncCall);
}
if wait {
for _ in 0..cpu_count {
sem.acquire();
// spin if remote invocation do not complete
while rest_count.load(Ordering::Relaxed) != 0 {
spin_loop_hint();
}
}
}
// Examples of such cases.
pub fn tlb_shootdown(tuple: &(usize, usize)) {
// debug!("CPU {}: remote tlb flush {:x?}", super::cpu::id(), tuple);
let (start_addr, end_addr) = *tuple;
for p in Page::range_of(start_addr, end_addr) {
tlb::flush(VirtAddr::new(p.start_address() as u64));
}
}

@ -1,6 +1,7 @@
// Depends on kernel
use crate::consts::KERNEL_OFFSET;
use crate::memory::{active_table, alloc_frame, dealloc_frame};
use core::sync::atomic::Ordering;
use log::*;
use rcore_memory::paging::*;
use x86_64::instructions::tlb;
@ -12,8 +13,7 @@ use x86_64::structures::paging::{
page_table::{PageTable as x86PageTable, PageTableEntry, PageTableFlags as EF},
FrameAllocator, FrameDeallocator,
};
use x86_64::PhysAddr;
use core::sync::atomic::Ordering;
use x86_64::{VirtAddr, PhysAddr};
pub trait PageExt {
fn of_addr(address: usize) -> Self;
@ -287,5 +287,8 @@ fn flush_tlb_all(vaddr: usize) {
if !super::AP_CAN_INIT.load(Ordering::Relaxed) {
return;
}
super::ipi::invoke_on_allcpu(super::ipi::tlb_shootdown, (vaddr, vaddr + 0x1000), false);
super::ipi::invoke_on_allcpu(
move || tlb::flush(VirtAddr::new(vaddr as u64)),
false,
);
}

@ -6,6 +6,7 @@
#![feature(optin_builtin_traits)]
#![feature(panic_info_message)]
#![feature(global_asm)]
#![feature(fnbox)]
#![deny(unused_must_use)]
#![no_std]

Loading…
Cancel
Save