Move more to x86_64 arch

toolchain_update
WangRunji 7 years ago
parent d3ed84ba61
commit f7d75696bc

@ -8,12 +8,9 @@ pub mod keyboard;
pub mod pit;
pub mod ide;
pub fn init(rsdt_addr: usize, mut page_map: impl FnMut(usize, usize)) -> acpi::AcpiResult {
pub fn init(rsdt_addr: usize) -> acpi::AcpiResult {
assert_has_not_been_called!();
page_map(0x07fe1000, 1); // RSDT
page_map(0xfec00000, 1); // IOAPIC
let acpi = acpi::init(rsdt_addr).expect("Failed to init ACPI");
assert_eq!(acpi.lapic_addr as usize, 0xfee00000);
trace!("acpi = {:?}", acpi);

@ -0,0 +1,116 @@
use bit_allocator::{BitAlloc, BitAlloc64K};
use consts::KERNEL_OFFSET;
// Depends on kernel
use memory::{active_table, FRAME_ALLOCATOR, init_heap, MemoryArea, MemoryAttr, MemorySet, Stack};
use multiboot2::{ElfSection, ElfSectionFlags, ElfSectionsTag};
use multiboot2::BootInformation;
use ucore_memory::PAGE_SIZE;
use ucore_memory::paging::PageTable;
// BootInformation may trigger page fault after kernel remap
// So just take its ownership
pub fn init(boot_info: BootInformation) -> MemorySet {
assert_has_not_been_called!("memory::init must be called only once");
info!("{:?}", boot_info);
init_frame_allocator(&boot_info);
let ms = remap_the_kernel(&boot_info);
init_heap();
ms
}
fn init_frame_allocator(boot_info: &BootInformation) {
let memory_areas = boot_info.memory_map_tag().expect("Memory map tag required")
.memory_areas();
let elf_sections = boot_info.elf_sections_tag().expect("Elf sections tag required")
.sections().filter(|s| s.is_allocated());
let mut ba = FRAME_ALLOCATOR.lock();
for area in memory_areas {
ba.insert(to_range(area.start_address(), area.end_address()));
}
for section in elf_sections {
ba.remove(to_range(section.start_address() as usize, section.end_address() as usize));
}
ba.remove(to_range(boot_info.start_address(), boot_info.end_address()));
use core::ops::Range;
fn to_range(mut start_addr: usize, mut end_addr: usize) -> Range<usize> {
use consts::KERNEL_OFFSET;
if start_addr >= KERNEL_OFFSET {
start_addr -= KERNEL_OFFSET;
}
if end_addr >= KERNEL_OFFSET {
end_addr -= KERNEL_OFFSET;
}
let page_start = start_addr / PAGE_SIZE;
let mut page_end = (end_addr - 1) / PAGE_SIZE + 1;
if page_end >= BitAlloc64K::CAP {
warn!("page num {:#x} out of range {:#x}", page_end, BitAlloc64K::CAP);
page_end = BitAlloc64K::CAP;
}
page_start..page_end
}
}
fn remap_the_kernel(boot_info: &BootInformation) -> MemorySet {
extern { fn stack_bottom(); }
let stack_bottom = stack_bottom as usize + KERNEL_OFFSET;
let kstack = Stack {
top: stack_bottom + 8 * PAGE_SIZE,
bottom: stack_bottom + 1 * PAGE_SIZE,
};
let mut memory_set = memory_set_from(boot_info.elf_sections_tag().unwrap(), kstack);
use consts::{KERNEL_HEAP_OFFSET, KERNEL_HEAP_SIZE};
use super::smp::ENTRYOTHER_ADDR;
memory_set.push(MemoryArea::new_physical(0xb8000, 0xb9000, KERNEL_OFFSET, MemoryAttr::default(), "VGA"));
memory_set.push(MemoryArea::new_physical(0xfee00000, 0xfee01000, KERNEL_OFFSET, MemoryAttr::default(), "LAPIC"));
memory_set.push(MemoryArea::new_identity(0x07fe1000, 0x07fe1000 + PAGE_SIZE, MemoryAttr::default(), "RSDT"));
memory_set.push(MemoryArea::new_identity(0xfec00000, 0xfec00000 + PAGE_SIZE, MemoryAttr::default(), "IOAPIC"));
memory_set.push(MemoryArea::new(KERNEL_HEAP_OFFSET, KERNEL_HEAP_OFFSET + KERNEL_HEAP_SIZE, MemoryAttr::default(), "kernel_heap"));
memory_set.push(MemoryArea::new_identity(ENTRYOTHER_ADDR, ENTRYOTHER_ADDR + PAGE_SIZE, MemoryAttr::default().execute(), "entry_other.text"));
memory_set.push(MemoryArea::new_physical(0, 4096, KERNEL_OFFSET, MemoryAttr::default(), "entry_other.ctrl"));
debug!("{:#x?}", memory_set);
unsafe { memory_set.activate(); }
info!("NEW TABLE!!!");
// turn the stack bottom into a guard page
active_table().unmap(stack_bottom);
debug!("guard page at {:x?}", stack_bottom);
memory_set
}
fn memory_set_from(sections: ElfSectionsTag, kstack: Stack) -> MemorySet {
assert_has_not_been_called!();
// WARNING: must ensure it's large enough
static mut SPACE: [u8; 0x1000] = [0; 0x1000];
let mut set = unsafe { MemorySet::new_from_raw_space(&mut SPACE, kstack) };
for section in sections.sections().filter(|s| s.is_allocated()) {
set.push(memory_area_from(section));
}
set
}
fn memory_area_from(section: ElfSection) -> MemoryArea {
let mut start_addr = section.start_address() as usize;
let mut end_addr = section.end_address() as usize;
assert_eq!(start_addr % PAGE_SIZE, 0, "sections need to be page aligned");
let name = unsafe { &*(section.name() as *const str) };
if start_addr >= KERNEL_OFFSET {
start_addr -= KERNEL_OFFSET;
end_addr -= KERNEL_OFFSET;
}
MemoryArea::new_physical(start_addr, end_addr, KERNEL_OFFSET, memory_attr_from(section.flags()), name)
}
fn memory_attr_from(elf_flags: ElfSectionFlags) -> MemoryAttr {
let mut flags = MemoryAttr::default();
if !elf_flags.contains(ElfSectionFlags::ALLOCATED) { flags = flags.hide(); }
if !elf_flags.contains(ElfSectionFlags::WRITABLE) { flags = flags.readonly(); }
if elf_flags.contains(ElfSectionFlags::EXECUTABLE) { flags = flags.execute(); }
flags
}

@ -1,7 +1,36 @@
use memory::MemorySet;
use multiboot2;
pub mod driver;
pub mod cpu;
pub mod interrupt;
pub mod paging;
pub mod gdt;
pub mod idt;
pub mod smp;
pub mod smp;
pub mod memory;
pub fn init(multiboot_information_address: usize) -> MemorySet {
idt::init();
let boot_info = unsafe { multiboot2::load(multiboot_information_address) };
let rsdt_addr = boot_info.rsdp_v1_tag().unwrap().rsdt_address();
let ms = memory::init(boot_info);
// Now heap is available
gdt::init();
let acpi = driver::init(rsdt_addr);
smp::start_other_cores(&acpi);
ms
}
/// The entry point for another processors
#[no_mangle]
pub extern "C" fn other_main() -> ! {
idt::init();
gdt::init();
driver::apic::other_init();
let cpu_id = driver::apic::lapic_id();
let ms = unsafe { smp::notify_started(cpu_id) };
println!("Hello world! from CPU {}!", cpu_id);
// unsafe{ let a = *(0xdeadbeaf as *const u8); } // Page fault
loop {}
}

@ -4,13 +4,9 @@ use core::ptr::{read_volatile, write_volatile};
use memory::*;
use x86_64::registers::control::Cr3;
const ENTRYOTHER_ADDR: u32 = 0x7000;
pub fn start_other_cores(acpi: &AcpiResult, ms: &mut MemorySet) {
use consts::KERNEL_OFFSET;
ms.push(MemoryArea::new_identity(ENTRYOTHER_ADDR as usize, ENTRYOTHER_ADDR as usize + 1, MemoryAttr::default().execute(), "entry_other.text"));
ms.push(MemoryArea::new_physical(0, 4096, KERNEL_OFFSET, MemoryAttr::default(), "entry_other.ctrl"));
pub const ENTRYOTHER_ADDR: usize = 0x7000;
pub fn start_other_cores(acpi: &AcpiResult) {
let args = unsafe { &mut *(0x8000 as *mut EntryArgs).offset(-1) };
for i in 1 .. acpi.cpu_num {
let apic_id = acpi.cpu_acpi_ids[i as usize];
@ -21,7 +17,7 @@ pub fn start_other_cores(acpi: &AcpiResult, ms: &mut MemorySet) {
stack: args as *const _ as u32, // just enough stack to get us to entry64mp
};
unsafe { MS = Some(ms); }
start_ap(apic_id, ENTRYOTHER_ADDR);
start_ap(apic_id, ENTRYOTHER_ADDR as u32);
while unsafe { !read_volatile(&STARTED[i as usize]) } {}
}
}
@ -39,5 +35,7 @@ static mut MS: Option<MemorySet> = None;
pub unsafe fn notify_started(cpu_id: u8) -> MemorySet {
write_volatile(&mut STARTED[cpu_id as usize], true);
MS.take().unwrap()
let ms = MS.take().unwrap();
ms.activate();
ms
}

@ -1,13 +1,13 @@
#![allow(dead_code)]
pub const MAX_CPU_NUM: usize = 8;
pub const MAX_PROCESS_NUM: usize = 32;
#[cfg(target_arch = "riscv")]
pub use self::riscv::*;
#[cfg(target_arch = "x86_64")]
pub use self::x86_64::*;
pub const MAX_CPU_NUM: usize = 8;
pub const MAX_PROCESS_NUM: usize = 32;
#[cfg(target_arch = "riscv")]
mod riscv {
// Physical address available on THINPAD:
@ -50,6 +50,9 @@ mod x86_64 {
/// Size of kernel heap
pub const KERNEL_HEAP_SIZE: usize = 8 * 1024 * 1024; // 8 MB
pub const KERNEL_STACK_OFFSET: usize = KERNEL_HEAP_OFFSET + KERNEL_HEAP_SIZE;
pub const KERNEL_STACK_SIZE: usize = 1 * 1024 * 1024; // 8 MB
/// Offset to kernel percpu variables
//TODO: Use 64-bit fs offset to enable this pub const KERNEL_PERCPU_OFFSET: usize = KERNEL_HEAP_OFFSET - PML4_SIZE;
pub const KERNEL_PERCPU_OFFSET: usize = 0xC000_0000;

@ -50,9 +50,12 @@ extern crate volatile;
extern crate x86_64;
extern crate xmas_elf;
// Export to asm
pub use arch::interrupt::rust_trap;
#[cfg(target_arch = "x86_64")]
pub use arch::interrupt::set_return_rsp;
#[cfg(target_arch = "x86_64")]
pub use arch::other_main;
use linked_list_allocator::LockedHeap;
#[macro_use] // print!
@ -93,6 +96,7 @@ mod arch;
#[cfg(target_arch = "riscv")]
pub extern fn rust_main() -> ! {
arch::init();
memory::init_heap();
loop {}
}
@ -100,54 +104,23 @@ pub extern fn rust_main() -> ! {
#[no_mangle]
#[cfg(target_arch = "x86_64")]
pub extern "C" fn rust_main(multiboot_information_address: usize) -> ! {
arch::idt::init();
io::init();
// ATTENTION: we have a very small stack and no guard page
println!("Hello World{}", "!");
let boot_info = unsafe { multiboot2::load(multiboot_information_address) };
let rsdt_addr = boot_info.rsdp_v1_tag().unwrap().rsdt_address();
// set up guard page and map the heap pages
let mut kernel_memory = memory::init(boot_info);
arch::gdt::init();
memory::test::cow();
let acpi = arch::driver::init(rsdt_addr, |addr: usize, count: usize| {
use memory::*;
kernel_memory.push(MemoryArea::new_identity(addr, addr + count * 0x1000, MemoryAttr::default(), "acpi"))
});
io::init();
let ms = arch::init(multiboot_information_address);
arch::smp::start_other_cores(&acpi, &mut kernel_memory);
process::init(kernel_memory);
process::init(ms);
fs::load_sfs();
unsafe{ arch::interrupt::enable(); }
unsafe { arch::interrupt::enable(); }
// thread::test::unpack();
// sync::test::philosopher_using_mutex();
// sync::test::philosopher_using_monitor();
sync::mpsc::test::test_all();
loop {}
}
// sync::mpsc::test::test_all();
/// The entry point for another processors
#[no_mangle]
#[cfg(target_arch = "x86_64")]
pub extern "C" fn other_main() -> ! {
arch::gdt::init();
arch::idt::init();
arch::driver::apic::other_init();
let cpu_id = arch::driver::apic::lapic_id();
let ms = unsafe { arch::smp::notify_started(cpu_id) };
unsafe { ms.activate(); }
println!("Hello world! from CPU {}!", arch::driver::apic::lapic_id());
// unsafe{ let a = *(0xdeadbeaf as *const u8); } // Page fault
loop {}
}

@ -1,22 +1,19 @@
pub use arch::paging::*;
use bit_allocator::{BitAlloc, BitAlloc64K};
use consts::KERNEL_OFFSET;
use multiboot2::{ElfSection, ElfSectionFlags, ElfSectionsTag};
use multiboot2::BootInformation;
pub use self::stack_allocator::*;
use self::stack_allocator::*;
use spin::{Mutex, MutexGuard};
use super::HEAP_ALLOCATOR;
use ucore_memory::{*, paging::PageTable, cow::CowExt};
pub use ucore_memory::memory_set::{MemoryAttr, MemoryArea, MemorySet as MemorySet_, Stack};
use ucore_memory::{*, cow::CowExt, paging::PageTable};
pub use ucore_memory::memory_set::{MemoryArea, MemoryAttr, MemorySet as MemorySet_, Stack};
pub type MemorySet = MemorySet_<InactivePageTable0>;
mod stack_allocator;
lazy_static! {
static ref FRAME_ALLOCATOR: Mutex<BitAlloc64K> = Mutex::new(BitAlloc64K::default());
pub static ref FRAME_ALLOCATOR: Mutex<BitAlloc64K> = Mutex::new(BitAlloc64K::default());
}
static STACK_ALLOCATOR: Mutex<Option<StackAllocator>> = Mutex::new(None);
pub static STACK_ALLOCATOR: Mutex<Option<StackAllocator>> = Mutex::new(None);
pub fn alloc_frame() -> Option<usize> {
FRAME_ALLOCATOR.lock().alloc().map(|id| id * PAGE_SIZE)
@ -50,125 +47,21 @@ pub fn page_fault_handler(addr: usize) -> bool {
active_table().page_fault_handler(addr, || alloc_frame().unwrap())
}
pub fn init(boot_info: BootInformation) -> MemorySet {
assert_has_not_been_called!("memory::init must be called only once");
info!("{:?}", boot_info);
init_frame_allocator(&boot_info);
let kernel_memory = remap_the_kernel(boot_info);
use consts::{KERNEL_HEAP_OFFSET, KERNEL_HEAP_SIZE};
pub fn init_heap() {
use consts::{KERNEL_HEAP_OFFSET, KERNEL_HEAP_SIZE, KERNEL_STACK_OFFSET, KERNEL_STACK_SIZE};
unsafe { HEAP_ALLOCATOR.lock().init(KERNEL_HEAP_OFFSET, KERNEL_HEAP_SIZE); }
*STACK_ALLOCATOR.lock() = Some({
use ucore_memory::Page;
let stack_alloc_range = Page::range_of(KERNEL_HEAP_OFFSET + KERNEL_HEAP_SIZE,
KERNEL_HEAP_OFFSET + KERNEL_HEAP_SIZE + 0x1000000);
stack_allocator::StackAllocator::new(stack_alloc_range)
StackAllocator::new(Page::range_of(KERNEL_STACK_OFFSET, KERNEL_STACK_OFFSET + KERNEL_STACK_SIZE))
});
kernel_memory
}
fn init_frame_allocator(boot_info: &BootInformation) {
let memory_areas = boot_info.memory_map_tag().expect("Memory map tag required")
.memory_areas();
let elf_sections = boot_info.elf_sections_tag().expect("Elf sections tag required")
.sections().filter(|s| s.is_allocated());
let mut ba = FRAME_ALLOCATOR.lock();
for area in memory_areas {
ba.insert(to_range(area.start_address(), area.end_address()));
}
for section in elf_sections {
ba.remove(to_range(section.start_address() as usize, section.end_address() as usize));
}
ba.remove(to_range(boot_info.start_address(), boot_info.end_address()));
use core::ops::Range;
fn to_range(mut start_addr: usize, mut end_addr: usize) -> Range<usize> {
use consts::KERNEL_OFFSET;
if start_addr >= KERNEL_OFFSET {
start_addr -= KERNEL_OFFSET;
}
if end_addr >= KERNEL_OFFSET {
end_addr -= KERNEL_OFFSET;
}
let page_start = start_addr / PAGE_SIZE;
let mut page_end = (end_addr - 1) / PAGE_SIZE + 1;
if page_end >= BitAlloc64K::CAP {
warn!("page num {:#x} out of range {:#x}", page_end, BitAlloc64K::CAP);
page_end = BitAlloc64K::CAP;
}
page_start..page_end
}
}
fn remap_the_kernel(boot_info: BootInformation) -> MemorySet {
extern { fn stack_bottom(); }
let stack_bottom = stack_bottom as usize + KERNEL_OFFSET;
let kstack = Stack {
top: stack_bottom + 8 * PAGE_SIZE,
bottom: stack_bottom + 1 * PAGE_SIZE,
};
let mut memory_set = memory_set_from(boot_info.elf_sections_tag().unwrap(), kstack);
use consts::{KERNEL_OFFSET, KERNEL_HEAP_OFFSET, KERNEL_HEAP_SIZE};
memory_set.push(MemoryArea::new_physical(0xb8000, 0xb9000, KERNEL_OFFSET, MemoryAttr::default(), "VGA"));
memory_set.push(MemoryArea::new_physical(0xfee00000, 0xfee01000, KERNEL_OFFSET, MemoryAttr::default(), "LAPIC"));
memory_set.push(MemoryArea::new(KERNEL_HEAP_OFFSET, KERNEL_HEAP_OFFSET + KERNEL_HEAP_SIZE, MemoryAttr::default(), "kernel_heap"));
debug!("{:#x?}", memory_set);
unsafe { memory_set.activate(); }
info!("NEW TABLE!!!");
// turn the stack bottom into a guard page
active_table().unmap(stack_bottom);
debug!("guard page at {:?}", stack_bottom);
memory_set
}
fn memory_set_from(sections: ElfSectionsTag, kstack: Stack) -> MemorySet {
assert_has_not_been_called!();
// WARNING: must ensure it's large enough
static mut SPACE: [u8; 0x1000] = [0; 0x1000];
let mut set = unsafe { MemorySet::new_from_raw_space(&mut SPACE, kstack) };
for section in sections.sections().filter(|s| s.is_allocated()) {
set.push(memory_area_from(section));
}
set
}
fn memory_area_from(section: ElfSection) -> MemoryArea {
let mut start_addr = section.start_address() as usize;
let mut end_addr = section.end_address() as usize;
assert_eq!(start_addr % PAGE_SIZE, 0, "sections need to be page aligned");
let name = unsafe { &*(section.name() as *const str) };
if start_addr >= KERNEL_OFFSET {
start_addr -= KERNEL_OFFSET;
end_addr -= KERNEL_OFFSET;
}
MemoryArea::new_physical(start_addr, end_addr, KERNEL_OFFSET, memory_attr_from(section.flags()), name)
}
fn memory_attr_from(elf_flags: ElfSectionFlags) -> MemoryAttr {
let mut flags = MemoryAttr::default();
if !elf_flags.contains(ElfSectionFlags::ALLOCATED) { flags = flags.hide(); }
if !elf_flags.contains(ElfSectionFlags::WRITABLE) { flags = flags.readonly(); }
if elf_flags.contains(ElfSectionFlags::EXECUTABLE) { flags = flags.execute(); }
flags
}
pub mod test {
pub fn cow() {
use super::*;
use ucore_memory::cow::test::test_with;
test_with(&mut active_table());
}
}
//pub mod test {
// pub fn cow() {
// use super::*;
// use ucore_memory::cow::test::test_with;
// test_with(&mut active_table());
// }
//}
Loading…
Cancel
Save