Now CPU1 can handle interrupt. Alloc TSS & GDT & IDT at kernel heap.

master
WangRunji 7 years ago
parent 3e9ee46850
commit f6df3c412c

@ -1,3 +1,8 @@
pub fn init() {
enable_nxe_bit();
enable_write_protect_bit();
}
/// Enable 'No-Execute' bit in page entry
pub fn enable_nxe_bit() {
use x86_64::registers::msr::{IA32_EFER, rdmsr, wrmsr};
@ -26,7 +31,7 @@ pub fn enable_write_protect_bit() {
/// The error code is `value written to 0x501` *2 +1, so it should be odd
pub unsafe fn exit_in_qemu(error_code: u8) -> ! {
use x86_64::instructions::port::outb;
assert!(error_code & 1 == 1);
assert_eq!(error_code & 1, 1, "error code should be odd");
outb(0x501, (error_code - 1) / 2);
unreachable!()
}

@ -4,25 +4,33 @@ use x86_64::structures::tss::TaskStateSegment;
use x86_64::structures::gdt::SegmentSelector;
use x86_64::{PrivilegeLevel, VirtualAddress};
use spin::Once;
use alloc::boxed::Box;
static TSS: Once<TaskStateSegment> = Once::new();
static GDT: Once<Gdt> = Once::new();
pub fn init(double_fault_stack_top: usize) {
/// Alloc TSS & GDT at kernel heap, then init and load it.
/// The double fault stack will be allocated at kernel heap too.
pub fn init() {
use x86_64::structures::gdt::SegmentSelector;
use x86_64::instructions::segmentation::set_cs;
use x86_64::instructions::tables::load_tss;
let tss = TSS.call_once(|| {
struct DoubleFaultStack {
space: [u8; 4096]
}
let double_fault_stack_top = Box::into_raw(Box::new(
DoubleFaultStack{space: [0; 4096]}
)) as usize + 4096;
let mut tss = Box::new({
let mut tss = TaskStateSegment::new();
tss.interrupt_stack_table[DOUBLE_FAULT_IST_INDEX]
= VirtualAddress(double_fault_stack_top);
tss
});
let tss = unsafe{ &*Box::into_raw(tss) };
let mut code_selector = SegmentSelector(0);
let mut tss_selector = SegmentSelector(0);
let gdt = GDT.call_once(|| {
let gdt = Box::new({
let mut gdt = Gdt::new();
gdt.add_entry(GNULL);
code_selector =
@ -33,6 +41,7 @@ pub fn init(double_fault_stack_top: usize) {
tss_selector = gdt.add_entry(Descriptor::tss_segment(&tss));
gdt
});
let gdt = unsafe{ &*Box::into_raw(gdt) };
gdt.load();
unsafe {

@ -1,10 +1,10 @@
use x86_64::structures::idt::Idt;
use spin::Once;
use alloc::boxed::Box;
static IDT: Once<Idt> = Once::new();
/// Alloc IDT at kernel heap, then init and load it.
pub fn init() {
let idt = IDT.call_once(|| {
let idt = Box::new({
use arch::interrupt::irq::*;
use consts::irq::*;
use arch::gdt::DOUBLE_FAULT_IST_INDEX;
@ -22,6 +22,7 @@ pub fn init() {
}
idt
});
let idt = unsafe{ &*Box::into_raw(idt) };
idt.load();
}

@ -5,8 +5,3 @@ pub mod paging;
pub mod gdt;
pub mod idt;
pub mod smp;
pub fn init() {
cpu::enable_nxe_bit();
cpu::enable_write_protect_bit();
}

@ -20,12 +20,11 @@ pub fn start_other_cores(acpi: &ACPI_Result, mc: &mut MemoryController) {
for i in 1 .. acpi.cpu_num {
let apic_id = acpi.cpu_acpi_ids[i as usize];
*args = EntryArgs {
kstack: mc.alloc_stack(1).unwrap().top() as u64,
kstack: mc.alloc_stack(7).unwrap().top() as u64,
page_table: page_table,
stack: 0x8000, // just enough stack to get us to entry64mp
};
start_ap(apic_id, ENTRYOTHER_ADDR);
loop{}
}
}
@ -35,6 +34,7 @@ fn copy_entryother() {
let entryother_start = entryother_start as usize;
let entryother_end = entryother_end as usize;
let size = entryother_end - entryother_start;
assert!(size <= 0x1000, "entryother code is too large, not supported.");
unsafe{ memmove(ENTRYOTHER_ADDR as *mut u8, entryother_start as *mut u8, size); }
debug!("smp: copied entryother code to 0x7000");
}

@ -46,23 +46,22 @@ mod arch;
// The entry point of Rust kernel
#[no_mangle]
pub extern "C" fn rust_main(multiboot_information_address: usize) {
pub extern "C" fn rust_main(multiboot_information_address: usize) -> ! {
arch::cpu::init();
// ATTENTION: we have a very small stack and no guard page
println!("Hello World{}", "!");
let boot_info = unsafe { multiboot2::load(multiboot_information_address) };
arch::init();
// set up guard page and map the heap pages
let mut memory_controller = memory::init(boot_info);
unsafe {
use consts::{KERNEL_HEAP_OFFSET, KERNEL_HEAP_SIZE};
HEAP_ALLOCATOR.lock().init(KERNEL_HEAP_OFFSET, KERNEL_HEAP_OFFSET + KERNEL_HEAP_SIZE);
HEAP_ALLOCATOR.lock().init(KERNEL_HEAP_OFFSET, KERNEL_HEAP_SIZE);
}
let double_fault_stack = memory_controller.alloc_stack(1)
.expect("could not allocate double fault stack");
arch::gdt::init(double_fault_stack.top());
arch::gdt::init();
arch::idt::init();
test!(global_allocator);
@ -78,15 +77,17 @@ pub extern "C" fn rust_main(multiboot_information_address: usize) {
loop{}
test_end!();
unreachable!();
}
#[no_mangle]
pub extern "C" fn other_main() {
// print OK
unsafe{ *(0xb8000 as *mut u32) = 0x2f4b2f4f; }
loop {}
// FIXME: Page Fault
pub extern "C" fn other_main() -> ! {
arch::cpu::init();
arch::gdt::init();
arch::idt::init();
println!("Hello world! from AP!");
unsafe{ let a = *(0xdeadbeaf as *const u8); } // Page fault
loop {}
}
use linked_list_allocator::LockedHeap;

Loading…
Cancel
Save