Now CPU1 can handle interrupt. Alloc TSS & GDT & IDT at kernel heap.

master
WangRunji 7 years ago
parent 3e9ee46850
commit f6df3c412c

@ -1,3 +1,8 @@
pub fn init() {
enable_nxe_bit();
enable_write_protect_bit();
}
/// Enable 'No-Execute' bit in page entry /// Enable 'No-Execute' bit in page entry
pub fn enable_nxe_bit() { pub fn enable_nxe_bit() {
use x86_64::registers::msr::{IA32_EFER, rdmsr, wrmsr}; use x86_64::registers::msr::{IA32_EFER, rdmsr, wrmsr};
@ -26,7 +31,7 @@ pub fn enable_write_protect_bit() {
/// The error code is `value written to 0x501` *2 +1, so it should be odd /// The error code is `value written to 0x501` *2 +1, so it should be odd
pub unsafe fn exit_in_qemu(error_code: u8) -> ! { pub unsafe fn exit_in_qemu(error_code: u8) -> ! {
use x86_64::instructions::port::outb; use x86_64::instructions::port::outb;
assert!(error_code & 1 == 1); assert_eq!(error_code & 1, 1, "error code should be odd");
outb(0x501, (error_code - 1) / 2); outb(0x501, (error_code - 1) / 2);
unreachable!() unreachable!()
} }

@ -4,25 +4,33 @@ use x86_64::structures::tss::TaskStateSegment;
use x86_64::structures::gdt::SegmentSelector; use x86_64::structures::gdt::SegmentSelector;
use x86_64::{PrivilegeLevel, VirtualAddress}; use x86_64::{PrivilegeLevel, VirtualAddress};
use spin::Once; use spin::Once;
use alloc::boxed::Box;
static TSS: Once<TaskStateSegment> = Once::new(); /// Alloc TSS & GDT at kernel heap, then init and load it.
static GDT: Once<Gdt> = Once::new(); /// The double fault stack will be allocated at kernel heap too.
pub fn init() {
pub fn init(double_fault_stack_top: usize) {
use x86_64::structures::gdt::SegmentSelector; use x86_64::structures::gdt::SegmentSelector;
use x86_64::instructions::segmentation::set_cs; use x86_64::instructions::segmentation::set_cs;
use x86_64::instructions::tables::load_tss; use x86_64::instructions::tables::load_tss;
let tss = TSS.call_once(|| { struct DoubleFaultStack {
space: [u8; 4096]
}
let double_fault_stack_top = Box::into_raw(Box::new(
DoubleFaultStack{space: [0; 4096]}
)) as usize + 4096;
let mut tss = Box::new({
let mut tss = TaskStateSegment::new(); let mut tss = TaskStateSegment::new();
tss.interrupt_stack_table[DOUBLE_FAULT_IST_INDEX] tss.interrupt_stack_table[DOUBLE_FAULT_IST_INDEX]
= VirtualAddress(double_fault_stack_top); = VirtualAddress(double_fault_stack_top);
tss tss
}); });
let tss = unsafe{ &*Box::into_raw(tss) };
let mut code_selector = SegmentSelector(0); let mut code_selector = SegmentSelector(0);
let mut tss_selector = SegmentSelector(0); let mut tss_selector = SegmentSelector(0);
let gdt = GDT.call_once(|| { let gdt = Box::new({
let mut gdt = Gdt::new(); let mut gdt = Gdt::new();
gdt.add_entry(GNULL); gdt.add_entry(GNULL);
code_selector = code_selector =
@ -33,6 +41,7 @@ pub fn init(double_fault_stack_top: usize) {
tss_selector = gdt.add_entry(Descriptor::tss_segment(&tss)); tss_selector = gdt.add_entry(Descriptor::tss_segment(&tss));
gdt gdt
}); });
let gdt = unsafe{ &*Box::into_raw(gdt) };
gdt.load(); gdt.load();
unsafe { unsafe {

@ -1,10 +1,10 @@
use x86_64::structures::idt::Idt; use x86_64::structures::idt::Idt;
use spin::Once; use spin::Once;
use alloc::boxed::Box;
static IDT: Once<Idt> = Once::new(); /// Alloc IDT at kernel heap, then init and load it.
pub fn init() { pub fn init() {
let idt = IDT.call_once(|| { let idt = Box::new({
use arch::interrupt::irq::*; use arch::interrupt::irq::*;
use consts::irq::*; use consts::irq::*;
use arch::gdt::DOUBLE_FAULT_IST_INDEX; use arch::gdt::DOUBLE_FAULT_IST_INDEX;
@ -22,6 +22,7 @@ pub fn init() {
} }
idt idt
}); });
let idt = unsafe{ &*Box::into_raw(idt) };
idt.load(); idt.load();
} }

@ -4,9 +4,4 @@ pub mod interrupt;
pub mod paging; pub mod paging;
pub mod gdt; pub mod gdt;
pub mod idt; pub mod idt;
pub mod smp; pub mod smp;
pub fn init() {
cpu::enable_nxe_bit();
cpu::enable_write_protect_bit();
}

@ -20,12 +20,11 @@ pub fn start_other_cores(acpi: &ACPI_Result, mc: &mut MemoryController) {
for i in 1 .. acpi.cpu_num { for i in 1 .. acpi.cpu_num {
let apic_id = acpi.cpu_acpi_ids[i as usize]; let apic_id = acpi.cpu_acpi_ids[i as usize];
*args = EntryArgs { *args = EntryArgs {
kstack: mc.alloc_stack(1).unwrap().top() as u64, kstack: mc.alloc_stack(7).unwrap().top() as u64,
page_table: page_table, page_table: page_table,
stack: 0x8000, // just enough stack to get us to entry64mp stack: 0x8000, // just enough stack to get us to entry64mp
}; };
start_ap(apic_id, ENTRYOTHER_ADDR); start_ap(apic_id, ENTRYOTHER_ADDR);
loop{}
} }
} }
@ -35,6 +34,7 @@ fn copy_entryother() {
let entryother_start = entryother_start as usize; let entryother_start = entryother_start as usize;
let entryother_end = entryother_end as usize; let entryother_end = entryother_end as usize;
let size = entryother_end - entryother_start; let size = entryother_end - entryother_start;
assert!(size <= 0x1000, "entryother code is too large, not supported.");
unsafe{ memmove(ENTRYOTHER_ADDR as *mut u8, entryother_start as *mut u8, size); } unsafe{ memmove(ENTRYOTHER_ADDR as *mut u8, entryother_start as *mut u8, size); }
debug!("smp: copied entryother code to 0x7000"); debug!("smp: copied entryother code to 0x7000");
} }

@ -46,23 +46,22 @@ mod arch;
// The entry point of Rust kernel // The entry point of Rust kernel
#[no_mangle] #[no_mangle]
pub extern "C" fn rust_main(multiboot_information_address: usize) { pub extern "C" fn rust_main(multiboot_information_address: usize) -> ! {
arch::cpu::init();
// ATTENTION: we have a very small stack and no guard page // ATTENTION: we have a very small stack and no guard page
println!("Hello World{}", "!"); println!("Hello World{}", "!");
let boot_info = unsafe { multiboot2::load(multiboot_information_address) }; let boot_info = unsafe { multiboot2::load(multiboot_information_address) };
arch::init();
// set up guard page and map the heap pages // set up guard page and map the heap pages
let mut memory_controller = memory::init(boot_info); let mut memory_controller = memory::init(boot_info);
unsafe { unsafe {
use consts::{KERNEL_HEAP_OFFSET, KERNEL_HEAP_SIZE}; use consts::{KERNEL_HEAP_OFFSET, KERNEL_HEAP_SIZE};
HEAP_ALLOCATOR.lock().init(KERNEL_HEAP_OFFSET, KERNEL_HEAP_OFFSET + KERNEL_HEAP_SIZE); HEAP_ALLOCATOR.lock().init(KERNEL_HEAP_OFFSET, KERNEL_HEAP_SIZE);
} }
let double_fault_stack = memory_controller.alloc_stack(1) arch::gdt::init();
.expect("could not allocate double fault stack");
arch::gdt::init(double_fault_stack.top());
arch::idt::init(); arch::idt::init();
test!(global_allocator); test!(global_allocator);
@ -78,15 +77,17 @@ pub extern "C" fn rust_main(multiboot_information_address: usize) {
loop{} loop{}
test_end!(); test_end!();
unreachable!();
} }
#[no_mangle] #[no_mangle]
pub extern "C" fn other_main() { pub extern "C" fn other_main() -> ! {
// print OK arch::cpu::init();
unsafe{ *(0xb8000 as *mut u32) = 0x2f4b2f4f; } arch::gdt::init();
loop {} arch::idt::init();
// FIXME: Page Fault
println!("Hello world! from AP!"); println!("Hello world! from AP!");
unsafe{ let a = *(0xdeadbeaf as *const u8); } // Page fault
loop {}
} }
use linked_list_allocator::LockedHeap; use linked_list_allocator::LockedHeap;

Loading…
Cancel
Save