Fix rsp. Use PhysicalAddress.

toolchain_update
WangRunji 7 years ago
parent ca683e4787
commit 03d4adfcfe

@ -1,6 +1,8 @@
global long_mode_start
extern rust_main
KERNEL_OFFSET equ 0xffff_ff00_0000_0000
section .text32
bits 64
long_mode_start:
@ -12,6 +14,10 @@ long_mode_start:
mov fs, ax
mov gs, ax
; translate rsp to virtual address
mov rax, KERNEL_OFFSET
add rsp, rax
; call the rust main
extern rust_main
mov rax, rust_main

@ -19,6 +19,8 @@ pub const MAX_CPU_NUM: usize = 8;
pub const KERNEL_OFFSET: usize = RECURSIVE_PAGE_OFFSET - PML4_SIZE;
pub const KERNEL_PML4: usize = (KERNEL_OFFSET & PML4_MASK)/PML4_SIZE;
pub const KERNEL_SIZE: usize = PML4_SIZE;
/// Offset to kernel heap
pub const KERNEL_HEAP_OFFSET: usize = KERNEL_OFFSET - PML4_SIZE;
pub const KERNEL_HEAP_PML4: usize = (KERNEL_HEAP_OFFSET & PML4_MASK)/PML4_SIZE;

@ -50,16 +50,17 @@ pub extern "C" fn rust_main(multiboot_information_address: usize) {
println!("Hello World{}", "!");
let boot_info = unsafe { multiboot2::load(multiboot_information_address) };
arch::init();
arch::init();
// set up guard page and map the heap pages
let mut memory_controller = memory::init(boot_info);
unsafe {
HEAP_ALLOCATOR.lock().init(HEAP_START, HEAP_START + HEAP_SIZE);
}
use consts::{KERNEL_HEAP_OFFSET, KERNEL_HEAP_SIZE};
HEAP_ALLOCATOR.lock().init(KERNEL_HEAP_OFFSET, KERNEL_HEAP_OFFSET + KERNEL_HEAP_SIZE);
}
// initialize our IDT
interrupts::init(&mut memory_controller);
interrupts::init(&mut memory_controller);
test!(global_allocator);
test!(guard_page);
@ -78,9 +79,6 @@ pub extern "C" fn rust_main(multiboot_information_address: usize) {
use linked_list_allocator::LockedHeap;
pub const HEAP_START: usize = 0o_000_001_000_000_0000;
pub const HEAP_SIZE: usize = 100 * 1024; // 100 KiB
#[global_allocator]
static HEAP_ALLOCATOR: LockedHeap = LockedHeap::empty();

@ -1,4 +1,4 @@
use memory::{Frame, FrameAllocator};
use memory::{Frame, FrameAllocator, PhysicalAddress};
use multiboot2::{MemoryAreaIter, MemoryArea};
pub struct AreaFrameAllocator {
@ -55,18 +55,18 @@ impl FrameAllocator for AreaFrameAllocator {
}
impl AreaFrameAllocator {
pub fn new(kernel_start: usize, kernel_end: usize,
multiboot_start: usize, multiboot_end: usize,
pub fn new(kernel_start: PhysicalAddress, kernel_end: PhysicalAddress,
multiboot_start: PhysicalAddress, multiboot_end: PhysicalAddress,
memory_areas: MemoryAreaIter) -> AreaFrameAllocator
{
let mut allocator = AreaFrameAllocator {
next_free_frame: Frame::containing_address(0),
current_area: None,
areas: memory_areas,
kernel_start: Frame::containing_address(kernel_start),
kernel_end: Frame::containing_address(kernel_end),
multiboot_start: Frame::containing_address(multiboot_start),
multiboot_end: Frame::containing_address(multiboot_end),
kernel_start: Frame::containing_address(kernel_start.0 as usize),
kernel_end: Frame::containing_address(kernel_end.0 as usize),
multiboot_start: Frame::containing_address(multiboot_start.0 as usize),
multiboot_end: Frame::containing_address(multiboot_end.0 as usize),
};
allocator.choose_next_area();
allocator

@ -1,7 +1,7 @@
pub use self::area_frame_allocator::AreaFrameAllocator;
pub use self::paging::remap_the_kernel;
pub use self::stack_allocator::Stack;
use self::paging::PhysicalAddress;
use self::paging::{PhysicalAddress, FromToVirtualAddress};
use multiboot2::BootInformation;
use consts::KERNEL_OFFSET;
@ -20,26 +20,28 @@ pub fn init(boot_info: &BootInformation) -> MemoryController {
let elf_sections_tag = boot_info.elf_sections_tag().expect(
"Elf sections tag required");
let kernel_start = elf_sections_tag.sections()
.filter(|s| s.is_allocated()).map(|s| s.start_address()).min().unwrap() as usize;
let kernel_end = elf_sections_tag.sections()
.filter(|s| s.is_allocated()).map(|s| s.end_address()).max().unwrap() as usize
- KERNEL_OFFSET;
let kernel_start = PhysicalAddress(elf_sections_tag.sections()
.filter(|s| s.is_allocated()).map(|s| s.start_address()).min().unwrap() as u64);
let kernel_end = PhysicalAddress::from_kernel_virtual(elf_sections_tag.sections()
.filter(|s| s.is_allocated()).map(|s| s.end_address()).max().unwrap());
let boot_info_start = PhysicalAddress(boot_info.start_address() as u64);
let boot_info_end = PhysicalAddress(boot_info.end_address() as u64);
println!("kernel start: {:#x}, kernel end: {:#x}",
kernel_start,
kernel_end);
println!("multiboot start: {:#x}, multiboot end: {:#x}",
boot_info.start_address(),
boot_info.end_address());
boot_info_start,
boot_info_end);
println!("memory area:");
for area in memory_map_tag.memory_areas() {
println!(" addr: {:#x}, size: {:#x}", area.base_addr, area.length);
}
}
let mut frame_allocator = AreaFrameAllocator::new(
kernel_start, kernel_end,
boot_info.start_address(), boot_info.end_address(),
boot_info_start, boot_info_end,
memory_map_tag.memory_areas());
let mut active_table = paging::remap_the_kernel(&mut frame_allocator,
@ -48,10 +50,10 @@ pub fn init(boot_info: &BootInformation) -> MemoryController {
println!("{:?}", active_table);
use self::paging::Page;
use {HEAP_START, HEAP_SIZE};
use consts::{KERNEL_HEAP_OFFSET, KERNEL_HEAP_SIZE};
let heap_start_page = Page::containing_address(HEAP_START);
let heap_end_page = Page::containing_address(HEAP_START + HEAP_SIZE-1);
let heap_start_page = Page::containing_address(KERNEL_HEAP_OFFSET);
let heap_end_page = Page::containing_address(KERNEL_HEAP_OFFSET + KERNEL_HEAP_SIZE-1);
for page in Page::range_inclusive(heap_start_page, heap_end_page) {
active_table.map(page, paging::WRITABLE, &mut frame_allocator);
@ -64,7 +66,7 @@ pub fn init(boot_info: &BootInformation) -> MemoryController {
stack_alloc_end);
stack_allocator::StackAllocator::new(stack_alloc_range)
};
MemoryController {
active_table: active_table,
frame_allocator: frame_allocator,

@ -1,4 +1,4 @@
use consts::KERNEL_OFFSET;
use consts::{KERNEL_OFFSET, KERNEL_SIZE};
pub use x86_64::{PhysicalAddress};
pub type VirtualAddress = usize;
@ -13,9 +13,11 @@ impl FromToVirtualAddress for PhysicalAddress {
self.0 as usize
}
fn to_kernel_virtual(&self) -> VirtualAddress {
assert!((self.0 as usize) < KERNEL_SIZE);
self.0 as usize + KERNEL_OFFSET
}
fn from_kernel_virtual(addr: VirtualAddress) -> Self {
assert!(addr >= KERNEL_OFFSET && addr < KERNEL_OFFSET + KERNEL_SIZE);
PhysicalAddress((addr - KERNEL_OFFSET) as u64)
}
}
Loading…
Cancel
Save