Update x86_64 to 0.2.3. FIXME: SMP

master
WangRunji 7 years ago
parent ce6c849f5b
commit 4817f69acb

@ -26,10 +26,10 @@ volatile = "0.1.0"
spin = "0.4.5"
multiboot2 = "0.5"
bitflags = "1.0"
x86_64 = "0.1.2"
x86_64 = "0.2.3"
once = "0.3.3"
linked_list_allocator = "0.5.0"
redox_syscall = "0.1.37"
redox_syscall = "0.1"
xmas-elf = "0.6"
arrayvec = { version = "0.4.7", default-features = false }
log = "0.4"

@ -5,24 +5,14 @@ pub fn init() {
/// Enable 'No-Execute' bit in page entry
pub fn enable_nxe_bit() {
use x86_64::registers::msr::{IA32_EFER, rdmsr, wrmsr};
let nxe_bit = 1 << 11;
// The EFER register is only allowed in kernel mode
// But we are in kernel mode. So it's safe.
unsafe {
let efer = rdmsr(IA32_EFER);
wrmsr(IA32_EFER, efer | nxe_bit);
}
use x86_64::registers::model_specific::*;
unsafe { Efer::update(|flags| flags.insert(EferFlags::NO_EXECUTE_ENABLE)); }
}
/// Enable write protection in kernel mode
pub fn enable_write_protect_bit() {
use x86_64::registers::control_regs::{cr0, cr0_write, Cr0};
// The CR0 register is only allowed in kernel mode
// But we are in kernel mode. So it's safe.
unsafe { cr0_write(cr0() | Cr0::WRITE_PROTECT) };
use x86_64::registers::control::*;
unsafe { Cr0::update(|flags| flags.insert(Cr0Flags::WRITE_PROTECT)); }
}
/// Exit qemu
@ -30,8 +20,8 @@ pub fn enable_write_protect_bit() {
/// Must run qemu with `-device isa-debug-exit`
/// The error code is `value written to 0x501` *2 +1, so it should be odd
pub unsafe fn exit_in_qemu(error_code: u8) -> ! {
use x86_64::instructions::port::outb;
use x86_64::instructions::port::Port;
assert_eq!(error_code & 1, 1, "error code should be odd");
outb(0x501, (error_code - 1) / 2);
Port::new(0x501).write((error_code - 1) / 2);
unreachable!()
}

@ -2,7 +2,6 @@
//!
//! Borrow from Rucore project. Thanks GWord!
//! Port from ucore C code.
use x86_64::instructions::port;
use spin::Mutex;
lazy_static! {
@ -314,4 +313,16 @@ struct Channels {
const CHANNELS: [(u16, u16); 2] = [(IO_BASE0, IO_CTRL0), (IO_BASE1, IO_CTRL1)];
//const IO_BASE(ideno) (CHANNELS[(ideno) >> 1].base)
//const IO_CTRL(ideno) (CHANNELS[(ideno) >> 1].ctrl)
//const IO_CTRL(ideno) (CHANNELS[(ideno) >> 1].ctrl)
mod port {
use x86_64::instructions::port::Port;
pub unsafe fn inb(port: u16) -> u8 {
Port::new(port).read()
}
pub unsafe fn outb(port: u16, value: u8) {
Port::new(port).write(value)
}
}

@ -1,8 +1,8 @@
use spin::Mutex;
use consts::KERNEL_OFFSET;
use core::ptr::Unique;
use spin::Mutex;
use volatile::Volatile;
use x86_64::instructions::port::{outw, outb};
use consts::KERNEL_OFFSET;
use x86_64::instructions::port::Port;
pub const VGA_BUFFER: Unique<VgaBuffer> = unsafe {
Unique::new_unchecked((KERNEL_OFFSET + 0xb8000) as *mut _)
@ -82,10 +82,10 @@ impl VgaBuffer {
let pos = row * BUFFER_WIDTH + col;
unsafe {
// Reference: Rustboot project
outw(0x3D4, 15u16); // WARNING verify should be u16
outb(0x3D5, pos as u8);
outw(0x3D4, 14u16);
outb(0x3D5, (pos >> 8) as u8);
Port::new(0x3d4).write(15u16);
Port::new(0x3d5).write(pos as u8);
Port::new(0x3d4).write(14u16);
Port::new(0x3d5).write((pos >> 8) as u8);
}
}
}

@ -4,7 +4,7 @@ use consts::MAX_CPU_NUM;
use core::fmt;
use core::fmt::Debug;
use spin::{Mutex, MutexGuard, Once};
use x86_64::{PrivilegeLevel, VirtualAddress};
use x86_64::{PrivilegeLevel, VirtAddr};
use x86_64::structures::gdt::SegmentSelector;
use x86_64::structures::tss::TaskStateSegment;
@ -22,7 +22,7 @@ pub fn init() {
// 设置 Double Fault 时,自动切换栈的地址
tss.interrupt_stack_table[DOUBLE_FAULT_IST_INDEX]
= VirtualAddress(double_fault_stack_top);
= VirtAddr::new(double_fault_stack_top as u64);
tss
});
@ -74,7 +74,7 @@ impl Cpu {
/// 每次进入用户态前,都要调用此函数,才能保证正确返回内核态
pub fn set_ring0_rsp(&mut self, rsp: usize) {
trace!("gdt.set_ring0_rsp: {:#x}", rsp);
self.tss.privilege_stack_table[0] = VirtualAddress(rsp);
self.tss.privilege_stack_table[0] = VirtAddr::new(rsp as u64);
}
}

@ -112,8 +112,8 @@ fn double_fault(tf: &TrapFrame) {
}
fn page_fault(tf: &mut TrapFrame) {
use x86_64::registers::control_regs::cr2;
let addr = cr2().0;
let addr: usize;
unsafe { asm!("mov %cr2, $0" : "=r" (addr)); }
error!("\nEXCEPTION: Page Fault @ {:#x}, code: {:#x}", addr, tf.error_code);
use memory::page_fault_handler;

@ -4,5 +4,4 @@ pub mod interrupt;
pub mod paging;
pub mod gdt;
pub mod idt;
pub mod smp;
pub mod syscall;
pub mod smp;

@ -39,7 +39,7 @@ pub use self::test::test_cow;
use spin::Mutex;
use super::*;
use x86_64::instructions::tlb;
use x86_64::VirtualAddress;
use x86_64::VirtAddr;
trait EntryCowExt {
fn is_shared(&self) -> bool;
@ -52,7 +52,7 @@ trait EntryCowExt {
pub trait PageTableCowExt {
fn map_to_shared(&mut self, page: Page, frame: Frame, flags: EntryFlags);
fn unmap_shared(&mut self, page: Page);
fn try_copy_on_write(&mut self, addr: VirtAddr) -> bool;
fn try_copy_on_write(&mut self, addr: usize) -> bool;
}
impl EntryCowExt for Entry {
@ -101,9 +101,9 @@ impl PageTableCowExt for ActivePageTable {
}
fn unmap_shared(&mut self, page: Page) {
self.entry_mut(page).reset();
tlb::flush(VirtualAddress(page.start_address()));
tlb::flush(VirtAddr::new(page.start_address() as u64));
}
fn try_copy_on_write(&mut self, addr: VirtAddr) -> bool {
fn try_copy_on_write(&mut self, addr: usize) -> bool {
let page = Page::of_addr(addr);
let entry = self.entry_mut(page);
if !entry.is_cow() {
@ -119,7 +119,7 @@ impl PageTableCowExt for ActivePageTable {
temp_data.copy_from_slice(page_data);
entry.copy_on_write(Some(alloc_frame()));
tlb::flush(VirtualAddress(page.start_address()));
tlb::flush(VirtAddr::new(page.start_address() as u64));
page_data.copy_from_slice(&temp_data);
}

@ -27,8 +27,8 @@ impl Entry {
}
pub fn set(&mut self, frame: Frame, flags: EntryFlags) {
assert_eq!(frame.start_address().0 & !0x000fffff_fffff000, 0);
self.0 = (frame.start_address().0) | flags.bits();
assert_eq!(frame.start_address().as_u64() & !0x000fffff_fffff000, 0);
self.0 = (frame.start_address().as_u64()) | flags.bits();
}
}

@ -25,7 +25,7 @@ impl Mapper {
pub fn translate(&self, virtual_address: VirtAddr) -> Option<PhysAddr> {
let offset = virtual_address % PAGE_SIZE;
self.translate_page(Page::of_addr(virtual_address))
.map(|frame| PhysAddr((frame.start_address().get() + offset) as u64))
.map(|frame| PhysAddr::new((frame.start_address().get() + offset) as u64))
}
pub fn translate_page(&self, page: Page) -> Option<Frame> {
@ -97,7 +97,7 @@ impl Mapper {
pub fn unmap(&mut self, page: Page) -> Frame
{
use x86_64::instructions::tlb;
use x86_64::VirtualAddress;
use x86_64::VirtAddr;
assert!(self.translate(page.start_address()).is_some());
@ -108,7 +108,7 @@ impl Mapper {
.expect("mapping code does not support huge pages");
let frame = p1[page.p1_index()].pointed_frame().unwrap();
p1[page.p1_index()].set_unused();
tlb::flush(VirtualAddress(page.start_address()));
tlb::flush(VirtAddr::new(page.start_address() as u64));
// TODO free p(1,2,3) table if empty
frame
}

@ -43,16 +43,16 @@ impl Page {
(self.number >> 0) & 0o777
}
pub fn range_inclusive(start: Page, end: Page) -> PageIter {
PageIter {
pub fn range_inclusive(start: Page, end: Page) -> PageRange {
PageRange {
start,
end,
}
}
/// Iterate pages of address [begin, end)
pub fn range_of(begin: VirtAddr, end: VirtAddr) -> PageIter {
PageIter {
pub fn range_of(begin: VirtAddr, end: VirtAddr) -> PageRange {
PageRange {
start: Page::of_addr(begin),
end: Page::of_addr(end - 1),
}
@ -69,12 +69,12 @@ impl Add<usize> for Page {
#[derive(Clone)]
pub struct PageIter {
pub struct PageRange {
start: Page,
end: Page,
}
impl Iterator for PageIter {
impl Iterator for PageRange {
type Item = Page;
fn next(&mut self) -> Option<Page> {
@ -116,12 +116,11 @@ impl ActivePageTable {
pub fn with(&mut self, table: &mut InactivePageTable, f: impl FnOnce(&mut Mapper))
{
use x86_64::instructions::tlb;
use x86_64::registers::control_regs;
use x86_64::registers::control;
let temporary_page = TemporaryPage::new();
{
let backup = Frame::of_addr(
control_regs::cr3().0 as usize);
let backup = Frame::of_addr(control::Cr3::read().0.start_address().get());
// map temporary_page to current p4 table
let p4_table = temporary_page.map_table_frame(backup.clone(), self);
@ -142,17 +141,19 @@ impl ActivePageTable {
}
pub fn switch(&mut self, new_table: InactivePageTable) -> InactivePageTable {
use x86_64::registers::control_regs;
debug!("switch table {:?} -> {:?}", Frame::of_addr(control_regs::cr3().0 as usize), new_table.p4_frame);
if new_table.p4_frame.start_address() == control_regs::cr3() {
use x86_64::structures::paging::PhysFrame;
use x86_64::registers::control::{Cr3, Cr3Flags};
debug!("switch table {:?} -> {:?}", Frame::of_addr(Cr3::read().0.start_address().get()), new_table.p4_frame);
if new_table.p4_frame.start_address() == Cr3::read().0.start_address() {
return new_table;
}
let old_table = InactivePageTable {
p4_frame: Frame::of_addr(control_regs::cr3().0 as usize),
p4_frame: Frame::of_addr(Cr3::read().0.start_address().get()),
};
unsafe {
control_regs::cr3_write(new_table.p4_frame.start_address());
Cr3::write(PhysFrame::containing_address(new_table.p4_frame.start_address()),
Cr3Flags::empty());
}
use core::mem::forget;
forget(new_table);

@ -1,7 +1,8 @@
use arch::driver::{acpi::AcpiResult, apic::start_ap};
use memory::*;
use consts::MAX_CPU_NUM;
use core::ptr::{read_volatile, write_volatile};
use x86_64::registers::control_regs::cr3;
use memory::*;
use x86_64::registers::control::Cr3;
const ENTRYOTHER_ADDR: u32 = 0x7000;
@ -16,7 +17,7 @@ pub fn start_other_cores(acpi: &AcpiResult, ms: &mut MemorySet) {
let ms = MemorySet::new(7);
*args = EntryArgs {
kstack: ms.kstack_top() as u64,
page_table: cr3().0 as u32,
page_table: Cr3::read().0.start_address().as_u64() as u32,
stack: args as *const _ as u32, // just enough stack to get us to entry64mp
};
unsafe { MS = Some(ms); }
@ -33,7 +34,6 @@ struct EntryArgs {
stack: u32,
}
use consts::MAX_CPU_NUM;
static mut STARTED: [bool; MAX_CPU_NUM] = [false; MAX_CPU_NUM];
static mut MS: Option<MemorySet> = None;

@ -1,9 +0,0 @@
use arch::interrupt::consts::*;
pub fn switch_to_user() {
unsafe { int!(T_SWITCH_TOU); }
}
pub fn switch_to_kernel() {
unsafe { int!(T_SWITCH_TOK); }
}

@ -88,7 +88,8 @@ pub extern "C" fn rust_main(multiboot_information_address: usize) -> ! {
kernel_memory.push(MemoryArea::new_identity(addr, addr + count * 0x1000, MemoryAttr::default(), "acpi"))
});
arch::smp::start_other_cores(&acpi, &mut kernel_memory);
// FIXME: page fault in SMP
// arch::smp::start_other_cores(&acpi, &mut kernel_memory);
process::init(kernel_memory);
fs::load_sfs();
@ -156,7 +157,7 @@ mod test {
pub fn guard_page() {
use x86_64;
// invoke a breakpoint exception
x86_64::instructions::interrupts::int3();
unsafe { asm!("int 3"::::"intel" "volatile"); }
fn stack_overflow() {
stack_overflow(); // for each recursion, the return address is pushed

@ -1,5 +1,6 @@
use consts::{KERNEL_OFFSET, KERNEL_SIZE};
pub use x86_64::PhysicalAddress as PhysAddr;
pub use x86_64::PhysAddr;
pub type VirtAddr = usize;
pub trait FromToVirtualAddress {
@ -11,17 +12,17 @@ pub trait FromToVirtualAddress {
impl FromToVirtualAddress for PhysAddr {
fn get(&self) -> usize {
self.0 as usize
self.as_u64() as usize
}
fn to_identity_virtual(&self) -> VirtAddr {
self.0 as usize
self.get()
}
fn to_kernel_virtual(&self) -> VirtAddr {
assert!((self.0 as usize) < KERNEL_SIZE);
self.0 as usize + KERNEL_OFFSET
assert!(self.get() < KERNEL_SIZE);
self.get() + KERNEL_OFFSET
}
fn from_kernel_virtual(addr: VirtAddr) -> Self {
assert!(addr >= KERNEL_OFFSET && addr < KERNEL_OFFSET + KERNEL_SIZE);
PhysAddr((addr - KERNEL_OFFSET) as u64)
PhysAddr::new((addr - KERNEL_OFFSET) as u64)
}
}

@ -13,7 +13,7 @@ impl Frame {
}
//TODO: Set private
pub fn start_address(&self) -> PhysAddr {
PhysAddr((self.number * PAGE_SIZE) as u64)
PhysAddr::new((self.number * PAGE_SIZE) as u64)
}
pub fn clone(&self) -> Frame {

@ -1,6 +1,6 @@
use alloc::vec::Vec;
use core::fmt::{Debug, Error, Formatter};
use super::*;
use core::fmt::{Debug, Formatter, Error};
/// 一片连续内存空间,有相同的访问权限
/// 对应ucore中 `vma_struct`
@ -29,7 +29,7 @@ impl MemoryArea {
MemoryArea {
start_addr,
end_addr,
phys_start_addr: Some(PhysAddr(start_addr as u64)),
phys_start_addr: Some(PhysAddr::new(start_addr as u64)),
flags,
name,
}

@ -138,7 +138,7 @@ fn get_init_kstack_and_set_guard_page() -> Stack {
assert_has_not_been_called!();
extern { fn stack_bottom(); }
let stack_bottom = PhysAddr(stack_bottom as u64).to_kernel_virtual();
let stack_bottom = PhysAddr::new(stack_bottom as u64).to_kernel_virtual();
let stack_bottom_page = Page::of_addr(stack_bottom);
// turn the stack bottom into a guard page

@ -1,12 +1,13 @@
use memory::paging::{Page, PageIter, ActivePageTable, EntryFlags};
use arch::paging::{ActivePageTable, EntryFlags, Page, PageRange};
use memory::PAGE_SIZE;
// TODO: use BitAllocator & alloc fixed size stack
pub struct StackAllocator {
range: PageIter,
range: PageRange,
}
impl StackAllocator {
pub fn new(page_range: PageIter) -> StackAllocator {
pub fn new(page_range: PageRange) -> StackAllocator {
StackAllocator { range: page_range }
}
}

Loading…
Cancel
Save