Update crate `bitflags` to 1.0

master
WangRunji 7 years ago
parent fa2e9866de
commit 7b3b59bf1e

@ -17,7 +17,7 @@ rlibc = "1.0"
volatile = "0.1.0" volatile = "0.1.0"
spin = "0.4.5" spin = "0.4.5"
multiboot2 = "0.1.0" multiboot2 = "0.1.0"
bitflags = "0.7.0" bitflags = "1.0"
x86_64 = "0.1.2" x86_64 = "0.1.2"
once = "0.3.3" once = "0.3.3"
linked_list_allocator = "0.5.0" linked_list_allocator = "0.5.0"

@ -12,12 +12,12 @@ use spin::Mutex;
pub fn init(ioapic_id: u8) pub fn init(ioapic_id: u8)
{ {
let mut ioapic = IOAPIC.lock(); let mut ioapic = IOAPIC.lock();
assert!(ioapic.id() == ioapic_id, "ioapic.init: id isn't equal to ioapicid; not a MP"); assert_eq!(ioapic.id(), ioapic_id, "ioapic.init: id isn't equal to ioapicid; not a MP");
// Mark all interrupts edge-triggered, active high, disabled, // Mark all interrupts edge-triggered, active high, disabled,
// and not routed to any CPUs. // and not routed to any CPUs.
for i in 0.. ioapic.maxintr() + 1 { for i in 0.. ioapic.maxintr() + 1 {
ioapic.write_irq(i, DISABLED, 0); ioapic.write_irq(i, RedirectionEntry::DISABLED, 0);
} }
debug!("ioapic: init end"); debug!("ioapic: init end");
} }
@ -35,12 +35,12 @@ const REG_TABLE : u8 = 0x10; // Redirection table base
// CPUs can serve that interrupt. // CPUs can serve that interrupt.
bitflags! { bitflags! {
flags RedirectionEntry: u32 { struct RedirectionEntry: u32 {
const DISABLED = 0x00010000, // Interrupt disabled const DISABLED = 0x00010000; // Interrupt disabled
const LEVEL = 0x00008000, // Level-triggered (vs edge-) const LEVEL = 0x00008000; // Level-triggered (vs edge-)
const ACTIVELOW = 0x00002000, // Active low (vs high) const ACTIVELOW = 0x00002000; // Active low (vs high)
const LOGICAL = 0x00000800, // Destination is CPU id (vs APIC ID) const LOGICAL = 0x00000800; // Destination is CPU id (vs APIC ID)
const NONE = 0x00000000, const NONE = 0x00000000;
} }
} }
@ -85,7 +85,7 @@ impl IoApic {
// Mark interrupt edge-triggered, active high, // Mark interrupt edge-triggered, active high,
// enabled, and routed to the given cpunum, // enabled, and routed to the given cpunum,
// which happens to be that cpu's APIC ID. // which happens to be that cpu's APIC ID.
self.write_irq(irq, NONE, cpunum); self.write_irq(irq, RedirectionEntry::NONE, cpunum);
} }
fn id(&mut self) -> u8 { fn id(&mut self) -> u8 {
self.read(REG_ID).get_bits(24..28) as u8 self.read(REG_ID).get_bits(24..28) as u8

@ -84,7 +84,7 @@ impl<T: Io<Value = u8>> SerialPort<T> {
} }
pub fn receive(&mut self) { pub fn receive(&mut self) {
while self.line_sts().contains(INPUT_FULL) { while self.line_sts().contains(LineStsFlags::INPUT_FULL) {
let data = self.data.read(); let data = self.data.read();
write!(self, "serial receive {}", data); write!(self, "serial receive {}", data);
// TODO handle received data // TODO handle received data
@ -92,7 +92,7 @@ impl<T: Io<Value = u8>> SerialPort<T> {
} }
fn wait(&self) { fn wait(&self) {
while ! self.line_sts().contains(OUTPUT_EMPTY) {} while ! self.line_sts().contains(LineStsFlags::OUTPUT_EMPTY) {}
} }
pub fn send(&mut self, data: u8) { pub fn send(&mut self, data: u8) {
@ -124,21 +124,21 @@ impl<T: Io<Value = u8>> Write for SerialPort<T> {
bitflags! { bitflags! {
/// Interrupt enable flags /// Interrupt enable flags
flags IntEnFlags: u8 { struct IntEnFlags: u8 {
const RECEIVED = 1, const RECEIVED = 1 << 0;
const SENT = 1 << 1, const SENT = 1 << 1;
const ERRORED = 1 << 2, const ERRORED = 1 << 2;
const STATUS_CHANGE = 1 << 3, const STATUS_CHANGE = 1 << 3;
// 4 to 7 are unused // 4 to 7 are unused
} }
} }
bitflags! { bitflags! {
/// Line status flags /// Line status flags
flags LineStsFlags: u8 { struct LineStsFlags: u8 {
const INPUT_FULL = 1, const INPUT_FULL = 1 << 0;
// 1 to 4 unknown // 1 to 4 unknown
const OUTPUT_EMPTY = 1 << 5, const OUTPUT_EMPTY = 1 << 5;
// 6 and 7 unknown // 6 and 7 unknown
} }
} }

@ -107,18 +107,13 @@ pub enum Descriptor {
} }
impl Descriptor { impl Descriptor {
pub fn kernel_code_segment() -> Descriptor {
let flags = USER_SEGMENT | PRESENT | EXECUTABLE | LONG_MODE;
Descriptor::UserSegment(flags.bits())
}
pub fn tss_segment(tss: &'static TaskStateSegment) -> Descriptor { pub fn tss_segment(tss: &'static TaskStateSegment) -> Descriptor {
use core::mem::size_of; use core::mem::size_of;
use bit_field::BitField; use bit_field::BitField;
let ptr = tss as *const _ as u64; let ptr = tss as *const _ as u64;
let mut low = PRESENT.bits(); let mut low = DescriptorFlags::PRESENT.bits();
// base // base
low.set_bits(16..40, ptr.get_bits(0..24)); low.set_bits(16..40, ptr.get_bits(0..24));
low.set_bits(56..64, ptr.get_bits(24..32)); low.set_bits(56..64, ptr.get_bits(24..32));
@ -136,16 +131,16 @@ impl Descriptor {
bitflags! { bitflags! {
/// Reference: https://wiki.osdev.org/GDT /// Reference: https://wiki.osdev.org/GDT
flags DescriptorFlags: u64 { struct DescriptorFlags: u64 {
const ACCESSED = 1 << 40, const ACCESSED = 1 << 40;
const DATA_WRITABLE = 1 << 41, const DATA_WRITABLE = 1 << 41;
const CODE_READABLE = 1 << 41, const CODE_READABLE = 1 << 41;
const CONFORMING = 1 << 42, const CONFORMING = 1 << 42;
const EXECUTABLE = 1 << 43, const EXECUTABLE = 1 << 43;
const USER_SEGMENT = 1 << 44, const USER_SEGMENT = 1 << 44;
const USER_MODE = 1 << 45 | 1 << 46, const USER_MODE = 1 << 45 | 1 << 46;
const PRESENT = 1 << 47, const PRESENT = 1 << 47;
const LONG_MODE = 1 << 53, const LONG_MODE = 1 << 53;
} }
} }

@ -17,7 +17,7 @@ impl Entry {
} }
pub fn pointed_frame(&self) -> Option<Frame> { pub fn pointed_frame(&self) -> Option<Frame> {
if self.flags().contains(PRESENT) { if self.flags().contains(EntryFlags::PRESENT) {
Some(Frame::containing_address( Some(Frame::containing_address(
self.0 as usize & 0x000fffff_fffff000 self.0 as usize & 0x000fffff_fffff000
)) ))
@ -33,17 +33,17 @@ impl Entry {
} }
bitflags! { bitflags! {
pub flags EntryFlags: u64 { pub struct EntryFlags: u64 {
const PRESENT = 1 << 0, const PRESENT = 1 << 0;
const WRITABLE = 1 << 1, const WRITABLE = 1 << 1;
const USER_ACCESSIBLE = 1 << 2, const USER_ACCESSIBLE = 1 << 2;
const WRITE_THROUGH = 1 << 3, const WRITE_THROUGH = 1 << 3;
const NO_CACHE = 1 << 4, const NO_CACHE = 1 << 4;
const ACCESSED = 1 << 5, const ACCESSED = 1 << 5;
const DIRTY = 1 << 6, const DIRTY = 1 << 6;
const HUGE_PAGE = 1 << 7, const HUGE_PAGE = 1 << 7;
const GLOBAL = 1 << 8, const GLOBAL = 1 << 8;
const NO_EXECUTE = 1 << 63, const NO_EXECUTE = 1 << 63;
} }
} }
@ -57,13 +57,13 @@ impl EntryFlags {
if section.flags().contains(ELF_SECTION_ALLOCATED) { if section.flags().contains(ELF_SECTION_ALLOCATED) {
// section is loaded to memory // section is loaded to memory
flags = flags | PRESENT; flags = flags | EntryFlags::PRESENT;
} }
if section.flags().contains(ELF_SECTION_WRITABLE) { if section.flags().contains(ELF_SECTION_WRITABLE) {
flags = flags | WRITABLE; flags = flags | EntryFlags::WRITABLE;
} }
if !section.flags().contains(ELF_SECTION_EXECUTABLE) { if !section.flags().contains(ELF_SECTION_EXECUTABLE) {
flags = flags | NO_EXECUTE; flags = flags | EntryFlags::NO_EXECUTE;
} }
flags flags

@ -1,4 +1,4 @@
use super::{Page, ENTRY_COUNT}; use super::{Page, ENTRY_COUNT, EntryFlags};
use super::table::{self, Table, Level4, Level1}; use super::table::{self, Table, Level4, Level1};
use memory::*; use memory::*;
use core::ptr::Unique; use core::ptr::Unique;
@ -36,7 +36,7 @@ impl Mapper {
let p3_entry = &p3[page.p3_index()]; let p3_entry = &p3[page.p3_index()];
// 1GiB page? // 1GiB page?
if let Some(start_frame) = p3_entry.pointed_frame() { if let Some(start_frame) = p3_entry.pointed_frame() {
if p3_entry.flags().contains(HUGE_PAGE) { if p3_entry.flags().contains(EntryFlags::HUGE_PAGE) {
// address must be 1GiB aligned // address must be 1GiB aligned
assert!(start_frame.start_address().get() % (ENTRY_COUNT * ENTRY_COUNT * PAGE_SIZE) == 0); assert!(start_frame.start_address().get() % (ENTRY_COUNT * ENTRY_COUNT * PAGE_SIZE) == 0);
return Some(Frame::containing_address( return Some(Frame::containing_address(
@ -49,7 +49,7 @@ impl Mapper {
let p2_entry = &p2[page.p2_index()]; let p2_entry = &p2[page.p2_index()];
// 2MiB page? // 2MiB page?
if let Some(start_frame) = p2_entry.pointed_frame() { if let Some(start_frame) = p2_entry.pointed_frame() {
if p2_entry.flags().contains(HUGE_PAGE) { if p2_entry.flags().contains(EntryFlags::HUGE_PAGE) {
// address must be 2MiB aligned // address must be 2MiB aligned
assert!(start_frame.start_address().get() % ENTRY_COUNT == 0); assert!(start_frame.start_address().get() % ENTRY_COUNT == 0);
return Some(Frame::containing_address( return Some(Frame::containing_address(
@ -78,7 +78,7 @@ impl Mapper {
let mut p1 = p2.next_table_create(page.p2_index(), allocator); let mut p1 = p2.next_table_create(page.p2_index(), allocator);
assert!(p1[page.p1_index()].is_unused()); assert!(p1[page.p1_index()].is_unused());
p1[page.p1_index()].set(frame, flags | PRESENT); p1[page.p1_index()].set(frame, flags | EntryFlags::PRESENT);
} }
pub fn map<A>(&mut self, page: Page, flags: EntryFlags, allocator: &mut A) pub fn map<A>(&mut self, page: Page, flags: EntryFlags, allocator: &mut A)

@ -120,14 +120,14 @@ impl ActivePageTable {
let p4_table = temporary_page.map_table_frame(backup.clone(), self); let p4_table = temporary_page.map_table_frame(backup.clone(), self);
// overwrite recursive mapping // overwrite recursive mapping
self.p4_mut()[511].set(table.p4_frame.clone(), PRESENT | WRITABLE); self.p4_mut()[511].set(table.p4_frame.clone(), EntryFlags::PRESENT | EntryFlags::WRITABLE);
tlb::flush_all(); tlb::flush_all();
// execute f in the new context // execute f in the new context
f(self); f(self);
// restore recursive mapping to original p4 table // restore recursive mapping to original p4 table
p4_table[511].set(backup, PRESENT | WRITABLE); p4_table[511].set(backup, EntryFlags::PRESENT | EntryFlags::WRITABLE);
tlb::flush_all(); tlb::flush_all();
} }
@ -167,7 +167,7 @@ impl InactivePageTable {
// now we are able to zero the table // now we are able to zero the table
table.zero(); table.zero();
// set up recursive mapping for the table // set up recursive mapping for the table
table[511].set(frame.clone(), PRESENT | WRITABLE); table[511].set(frame.clone(), EntryFlags::PRESENT | EntryFlags::WRITABLE);
} }
temporary_page.unmap(active_table); temporary_page.unmap(active_table);

@ -22,7 +22,7 @@ impl<L> Table<L> where L: TableLevel {
impl<L> Table<L> where L: HierarchicalLevel { impl<L> Table<L> where L: HierarchicalLevel {
fn next_table_address(&self, index: usize) -> Option<usize> { fn next_table_address(&self, index: usize) -> Option<usize> {
let entry_flags = self[index].flags(); let entry_flags = self[index].flags();
if entry_flags.contains(PRESENT) && !entry_flags.contains(HUGE_PAGE) { if entry_flags.contains(EntryFlags::PRESENT) && !entry_flags.contains(EntryFlags::HUGE_PAGE) {
let table_address = self as *const _ as usize; let table_address = self as *const _ as usize;
Some((table_address << 9) | (index << 12)) Some((table_address << 9) | (index << 12))
} else { } else {
@ -47,10 +47,10 @@ impl<L> Table<L> where L: HierarchicalLevel {
where A: FrameAllocator where A: FrameAllocator
{ {
if self.next_table(index).is_none() { if self.next_table(index).is_none() {
assert!(!self.entries[index].flags().contains(HUGE_PAGE), assert!(!self.entries[index].flags().contains(EntryFlags::HUGE_PAGE),
"mapping code does not support huge pages"); "mapping code does not support huge pages");
let frame = allocator.allocate_frame().expect("no frames available"); let frame = allocator.allocate_frame().expect("no frames available");
self.entries[index].set(frame, PRESENT | WRITABLE); self.entries[index].set(frame, EntryFlags::PRESENT | EntryFlags::WRITABLE);
self.next_table_mut(index).unwrap().zero(); self.next_table_mut(index).unwrap().zero();
} }
self.next_table_mut(index).unwrap() self.next_table_mut(index).unwrap()

@ -22,11 +22,11 @@ impl TemporaryPage {
pub fn map(&mut self, frame: Frame, active_table: &mut ActivePageTable) pub fn map(&mut self, frame: Frame, active_table: &mut ActivePageTable)
-> VirtualAddress -> VirtualAddress
{ {
use super::entry::WRITABLE; use super::entry::EntryFlags;
assert!(active_table.translate_page(self.page).is_none(), assert!(active_table.translate_page(self.page).is_none(),
"temporary page is already mapped"); "temporary page is already mapped");
active_table.map_to(self.page, frame, WRITABLE, &mut self.allocator); active_table.map_to(self.page, frame, EntryFlags::WRITABLE, &mut self.allocator);
self.page.start_address() self.page.start_address()
} }

@ -7,7 +7,7 @@ pub use self::frame::*;
use multiboot2::BootInformation; use multiboot2::BootInformation;
use consts::KERNEL_OFFSET; use consts::KERNEL_OFFSET;
use arch::paging; use arch::paging;
use arch::paging::EntryFlags;
mod area_frame_allocator; mod area_frame_allocator;
pub mod heap_allocator; pub mod heap_allocator;
mod stack_allocator; mod stack_allocator;
@ -55,7 +55,7 @@ pub fn init(boot_info: &BootInformation) -> MemoryController {
let heap_end_page = Page::containing_address(KERNEL_HEAP_OFFSET + KERNEL_HEAP_SIZE-1); let heap_end_page = Page::containing_address(KERNEL_HEAP_OFFSET + KERNEL_HEAP_SIZE-1);
for page in Page::range_inclusive(heap_start_page, heap_end_page) { for page in Page::range_inclusive(heap_start_page, heap_end_page) {
active_table.map(page, paging::WRITABLE, &mut frame_allocator); active_table.map(page, EntryFlags::WRITABLE, &mut frame_allocator);
} }
let stack_allocator = { let stack_allocator = {
@ -119,13 +119,13 @@ pub fn remap_the_kernel<A>(allocator: &mut A, boot_info: &BootInformation)
// identity map the VGA text buffer // identity map the VGA text buffer
let vga_buffer_frame = Frame::containing_address(0xb8000); let vga_buffer_frame = Frame::containing_address(0xb8000);
mapper.identity_map(vga_buffer_frame, WRITABLE, allocator); mapper.identity_map(vga_buffer_frame, EntryFlags::WRITABLE, allocator);
// identity map the multiboot info structure // identity map the multiboot info structure
let multiboot_start = Frame::containing_address(boot_info.start_address()); let multiboot_start = Frame::containing_address(boot_info.start_address());
let multiboot_end = Frame::containing_address(boot_info.end_address() - 1); let multiboot_end = Frame::containing_address(boot_info.end_address() - 1);
for frame in Frame::range_inclusive(multiboot_start, multiboot_end) { for frame in Frame::range_inclusive(multiboot_start, multiboot_end) {
mapper.identity_map(frame, PRESENT, allocator); mapper.identity_map(frame, EntryFlags::PRESENT, allocator);
} }
}); });
@ -160,9 +160,8 @@ impl MemoryController {
size_in_pages) size_in_pages)
} }
pub fn map_page_identity(&mut self, addr: usize) { pub fn map_page_identity(&mut self, addr: usize) {
use self::paging::{WRITABLE};
let frame = Frame::containing_address(addr); let frame = Frame::containing_address(addr);
let flags = WRITABLE; let flags = EntryFlags::WRITABLE;
self.active_table.identity_map(frame, flags, &mut self.frame_allocator); self.active_table.identity_map(frame, flags, &mut self.frame_allocator);
} }
pub fn print_page_table(&self) { pub fn print_page_table(&self) {

@ -1,4 +1,4 @@
use memory::paging::{self, Page, PageIter, ActivePageTable}; use memory::paging::{Page, PageIter, ActivePageTable, EntryFlags};
use memory::{PAGE_SIZE, FrameAllocator}; use memory::{PAGE_SIZE, FrameAllocator};
pub struct StackAllocator { pub struct StackAllocator {
@ -42,7 +42,7 @@ impl StackAllocator {
// map stack pages to physical frames // map stack pages to physical frames
for page in Page::range_inclusive(start, end) { for page in Page::range_inclusive(start, end) {
active_table.map(page, paging::WRITABLE, frame_allocator); active_table.map(page, EntryFlags::WRITABLE, frame_allocator);
} }
// create a new stack // create a new stack

Loading…
Cancel
Save