Fix some warnings. Optimize imports.

master
WangRunji 7 years ago
parent 7b57e64ff6
commit 316d32496c

@ -1,8 +1,8 @@
// Copy from Redox // Copy from Redox
use core::fmt::{self, Write}; use core::fmt::{self, Write};
use redox_syscall::io::{Io, Mmio, Pio, ReadOnly};
use spin::Mutex; use spin::Mutex;
use redox_syscall::io::{Io, Pio, Mmio, ReadOnly};
pub static COM1: Mutex<Serial> = Mutex::new(Serial::new(0x3F8)); pub static COM1: Mutex<Serial> = Mutex::new(Serial::new(0x3F8));
pub static COM2: Mutex<Serial> = Mutex::new(Serial::new(0x2F8)); pub static COM2: Mutex<Serial> = Mutex::new(Serial::new(0x2F8));
@ -85,7 +85,7 @@ impl<T: Io<Value = u8>> SerialPort<T> {
pub fn receive(&mut self) { pub fn receive(&mut self) {
while self.line_sts().contains(LineStsFlags::INPUT_FULL) { while self.line_sts().contains(LineStsFlags::INPUT_FULL) {
let data = self.data.read(); let data = self.data.read();
write!(self, "serial receive {}", data); write!(self, "serial receive {}", data).unwrap();
// TODO handle received data // TODO handle received data
} }
} }

@ -1,24 +1,23 @@
use core::fmt;
use core::fmt::Debug;
use x86_64::structures::tss::TaskStateSegment;
use x86_64::structures::gdt::SegmentSelector;
use x86_64::{PrivilegeLevel, VirtualAddress};
use spin::{Once, Mutex, MutexGuard};
use alloc::boxed::Box; use alloc::boxed::Box;
use arch::driver::apic::lapic_id; use arch::driver::apic::lapic_id;
use consts::MAX_CPU_NUM; use consts::MAX_CPU_NUM;
use core::fmt;
use core::fmt::Debug;
use spin::{Mutex, MutexGuard, Once};
use x86_64::{PrivilegeLevel, VirtualAddress};
use x86_64::structures::gdt::SegmentSelector;
use x86_64::structures::tss::TaskStateSegment;
/// Alloc TSS & GDT at kernel heap, then init and load it. /// Alloc TSS & GDT at kernel heap, then init and load it.
/// The double fault stack will be allocated at kernel heap too. /// The double fault stack will be allocated at kernel heap too.
pub fn init() { pub fn init() {
use x86_64::structures::gdt::SegmentSelector;
use x86_64::instructions::segmentation::set_cs; use x86_64::instructions::segmentation::set_cs;
use x86_64::instructions::tables::load_tss; use x86_64::instructions::tables::load_tss;
let double_fault_stack_top = Box::into_raw(Box::new([0u8; 4096])) as usize + 4096; let double_fault_stack_top = Box::into_raw(Box::new([0u8; 4096])) as usize + 4096;
debug!("Double fault stack top @ {:#x}", double_fault_stack_top); debug!("Double fault stack top @ {:#x}", double_fault_stack_top);
let mut tss = Box::new({ let tss = Box::new({
let mut tss = TaskStateSegment::new(); let mut tss = TaskStateSegment::new();
// 设置 Double Fault 时,自动切换栈的地址 // 设置 Double Fault 时,自动切换栈的地址
@ -75,9 +74,7 @@ impl Cpu {
/// 每次进入用户态前,都要调用此函数,才能保证正确返回内核态 /// 每次进入用户态前,都要调用此函数,才能保证正确返回内核态
pub fn set_ring0_rsp(&mut self, rsp: usize) { pub fn set_ring0_rsp(&mut self, rsp: usize) {
trace!("gdt.set_ring0_rsp: {:#x}", rsp); trace!("gdt.set_ring0_rsp: {:#x}", rsp);
unsafe { self.tss.privilege_stack_table[0] = VirtualAddress(rsp);
self.tss.privilege_stack_table[0] = VirtualAddress(rsp);
}
} }
} }

@ -1,4 +1,3 @@
use alloc::boxed::Box;
use x86_64::structures::idt::*; use x86_64::structures::idt::*;
pub fn init() { pub fn init() {

@ -64,6 +64,7 @@
//! deactivate CPU //! deactivate CPU
//! ``` //! ```
use super::consts::*;
use super::TrapFrame; use super::TrapFrame;
#[no_mangle] #[no_mangle]
@ -74,7 +75,7 @@ pub extern fn rust_trap(tf: &mut TrapFrame) {
T_BRKPT => breakpoint(), T_BRKPT => breakpoint(),
T_DBLFLT => double_fault(tf), T_DBLFLT => double_fault(tf),
T_PGFLT => page_fault(tf), T_PGFLT => page_fault(tf),
T_IRQ0...64 => { T_IRQ0...63 => {
let irq = tf.trap_num as u8 - T_IRQ0; let irq = tf.trap_num as u8 - T_IRQ0;
match irq { match irq {
IRQ_TIMER => timer(), IRQ_TIMER => timer(),
@ -123,8 +124,6 @@ fn page_fault(tf: &mut TrapFrame) {
error(tf); error(tf);
} }
use super::consts::*;
fn keyboard() { fn keyboard() {
use arch::driver::keyboard; use arch::driver::keyboard;
info!("\nInterupt: Keyboard"); info!("\nInterupt: Keyboard");

@ -34,10 +34,10 @@
//! //!
//! * 为了维护引用计数,开一个全局映射 `RC_MAP`: Frame -> (read_count, write_count) //! * 为了维护引用计数,开一个全局映射 `RC_MAP`: Frame -> (read_count, write_count)
use super::*;
use alloc::rc::Rc;
use alloc::BTreeMap; use alloc::BTreeMap;
use spin::{Once, Mutex}; pub use self::test::test_cow;
use spin::Mutex;
use super::*;
use x86_64::instructions::tlb; use x86_64::instructions::tlb;
use x86_64::VirtualAddress; use x86_64::VirtualAddress;
@ -163,8 +163,6 @@ impl FrameRcMap {
} }
} }
pub use self::test::test_cow;
mod test { mod test {
use super::*; use super::*;

@ -49,13 +49,3 @@ bitflags! {
const COW = 2 << 9; const COW = 2 << 9;
} }
} }
use core::fmt;
use core::fmt::Debug;
impl Debug for Entry {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(f, "{:#014X} {:?}", self.0 & 0x000fffff_fffff000, self.flags());
Ok(())
}
}

@ -1,7 +1,7 @@
use super::{Page, ENTRY_COUNT, EntryFlags};
use super::table::{self, Table, Level4, Level1};
use memory::*;
use core::ptr::Unique; use core::ptr::Unique;
use memory::*;
use super::{ENTRY_COUNT, EntryFlags, Page};
use super::table::{self, Level1, Level4, Table};
pub struct Mapper { pub struct Mapper {
p4: Unique<Table<Level4>>, p4: Unique<Table<Level4>>,
@ -71,9 +71,9 @@ impl Mapper {
pub(super) fn entry_mut(&mut self, page: Page) -> &mut Entry { pub(super) fn entry_mut(&mut self, page: Page) -> &mut Entry {
use core::ops::IndexMut; use core::ops::IndexMut;
let p4 = self.p4_mut(); let p4 = self.p4_mut();
let mut p3 = p4.next_table_create(page.p4_index()); let p3 = p4.next_table_create(page.p4_index());
let mut p2 = p3.next_table_create(page.p3_index()); let p2 = p3.next_table_create(page.p3_index());
let mut p1 = p2.next_table_create(page.p2_index()); let p1 = p2.next_table_create(page.p2_index());
p1.index_mut(page.p1_index()) p1.index_mut(page.p1_index())
} }

@ -1,9 +1,9 @@
use core::ops::{Add, Deref, DerefMut};
use memory::*;
pub use self::cow::*;
pub use self::entry::*; pub use self::entry::*;
pub use self::mapper::Mapper; pub use self::mapper::Mapper;
use core::ops::{Deref, DerefMut, Add};
use memory::*;
pub use self::temporary_page::TemporaryPage; pub use self::temporary_page::TemporaryPage;
pub use self::cow::*;
mod entry; mod entry;
mod table; mod table;
@ -118,7 +118,7 @@ impl ActivePageTable {
use x86_64::instructions::tlb; use x86_64::instructions::tlb;
use x86_64::registers::control_regs; use x86_64::registers::control_regs;
let mut temporary_page = TemporaryPage::new(Page::of_addr(0xcafebabe)); let temporary_page = TemporaryPage::new();
{ {
let backup = Frame::of_addr( let backup = Frame::of_addr(
control_regs::cr3().0 as usize); control_regs::cr3().0 as usize);
@ -142,7 +142,6 @@ impl ActivePageTable {
} }
pub fn switch(&mut self, new_table: InactivePageTable) -> InactivePageTable { pub fn switch(&mut self, new_table: InactivePageTable) -> InactivePageTable {
use x86_64::PhysicalAddress;
use x86_64::registers::control_regs; use x86_64::registers::control_regs;
debug!("switch table {:?} -> {:?}", Frame::of_addr(control_regs::cr3().0 as usize), new_table.p4_frame); debug!("switch table {:?} -> {:?}", Frame::of_addr(control_regs::cr3().0 as usize), new_table.p4_frame);
if new_table.p4_frame.start_address() == control_regs::cr3() { if new_table.p4_frame.start_address() == control_regs::cr3() {
@ -168,7 +167,7 @@ pub struct InactivePageTable {
impl InactivePageTable { impl InactivePageTable {
pub fn new(frame: Frame, active_table: &mut ActivePageTable) -> InactivePageTable { pub fn new(frame: Frame, active_table: &mut ActivePageTable) -> InactivePageTable {
let mut temporary_page = TemporaryPage::new(Page::of_addr(0xcafebabe)); let temporary_page = TemporaryPage::new();
{ {
let table = temporary_page.map_table_frame(frame.clone(), let table = temporary_page.map_table_frame(frame.clone(),
active_table); active_table);

@ -1,21 +1,18 @@
use super::{Page, ActivePageTable}; use super::*;
use super::table::{Table, Level1}; use super::table::{Level1, Table};
use memory::{Frame, FrameAllocator, VirtAddr};
pub struct TemporaryPage { pub struct TemporaryPage {
page: Page, page: Page,
} }
impl TemporaryPage { impl TemporaryPage {
pub fn new(page: Page) -> TemporaryPage { pub fn new() -> TemporaryPage {
TemporaryPage { page } TemporaryPage { page: Page::of_addr(0xcafebabe) }
} }
/// Maps the temporary page to the given frame in the active table. /// Maps the temporary page to the given frame in the active table.
/// Returns the start address of the temporary page. /// Returns the start address of the temporary page.
pub fn map(&self, frame: Frame, active_table: &mut ActivePageTable) pub fn map(&self, frame: Frame, active_table: &mut ActivePageTable) -> VirtAddr {
-> VirtAddr
{
use super::entry::EntryFlags; use super::entry::EntryFlags;
assert!(active_table.translate_page(self.page).is_none(), assert!(active_table.translate_page(self.page).is_none(),
@ -31,10 +28,7 @@ impl TemporaryPage {
/// Maps the temporary page to the given page table frame in the active /// Maps the temporary page to the given page table frame in the active
/// table. Returns a reference to the now mapped table. /// table. Returns a reference to the now mapped table.
pub fn map_table_frame(&self, pub fn map_table_frame(&self, frame: Frame, active_table: &mut ActivePageTable) -> &mut Table<Level1> {
frame: Frame,
active_table: &mut ActivePageTable)
-> &mut Table<Level1> {
unsafe { &mut *(self.map(frame, active_table) as *mut Table<Level1>) } unsafe { &mut *(self.map(frame, active_table) as *mut Table<Level1>) }
} }
} }

@ -1,5 +1,8 @@
use core::fmt;
use arch::driver::serial::COM1; use arch::driver::serial::COM1;
use arch::driver::vga::Color;
use core::fmt;
use log;
use log::{Level, LevelFilter, Log, Metadata, Record};
mod vga_writer; mod vga_writer;
@ -35,11 +38,9 @@ macro_rules! with_color {
}}; }};
} }
use arch::driver::vga::Color;
fn print_in_color(args: fmt::Arguments, color: Color) { fn print_in_color(args: fmt::Arguments, color: Color) {
use core::fmt::Write; use core::fmt::Write;
use arch::driver::vga::*; // use arch::driver::vga::*;
// { // {
// let mut writer = vga_writer::VGA_WRITER.lock(); // let mut writer = vga_writer::VGA_WRITER.lock();
// writer.set_color(color); // writer.set_color(color);
@ -57,9 +58,6 @@ pub fn print(args: fmt::Arguments) {
COM1.lock().write_fmt(args).unwrap(); COM1.lock().write_fmt(args).unwrap();
} }
use log;
use log::{Record, Level, Metadata, Log, SetLoggerError, LevelFilter};
struct SimpleLogger; struct SimpleLogger;
impl Log for SimpleLogger { impl Log for SimpleLogger {

@ -17,7 +17,6 @@ macro_rules! test {
($func:ident) => ( ($func:ident) => (
if cfg!(feature = "test") { if cfg!(feature = "test") {
println!("Testing: {}", stringify!($func)); println!("Testing: {}", stringify!($func));
use self::test::$func;
test::$func(); test::$func();
println!("Success: {}", stringify!($func)); println!("Success: {}", stringify!($func));
} }

@ -1,5 +1,4 @@
use super::address::PhysAddr; use super::address::PhysAddr;
use memory::FRAME_ALLOCATOR;
pub const PAGE_SIZE: usize = 4096; pub const PAGE_SIZE: usize = 4096;
@ -20,34 +19,8 @@ impl Frame {
pub fn clone(&self) -> Frame { pub fn clone(&self) -> Frame {
Frame { number: self.number } Frame { number: self.number }
} }
//TODO: Set private
pub fn range_inclusive(start: Frame, end: Frame) -> FrameIter {
FrameIter {
start,
end,
}
}
}
pub struct FrameIter {
start: Frame,
end: Frame,
} }
impl Iterator for FrameIter {
type Item = Frame;
fn next(&mut self) -> Option<Frame> {
if self.start <= self.end {
let frame = self.start.clone();
self.start.number += 1;
Some(frame)
} else {
None
}
}
}
pub trait FrameAllocator { pub trait FrameAllocator {
fn allocate_frame(&mut self) -> Option<Frame>; fn allocate_frame(&mut self) -> Option<Frame>;
fn deallocate_frame(&mut self, frame: Frame); fn deallocate_frame(&mut self, frame: Frame);

@ -1,62 +0,0 @@
use alloc::heap::{Alloc, AllocErr, Layout};
use core::sync::atomic::{AtomicUsize, Ordering};
/// A simple allocator that allocates memory linearly and ignores freed memory.
#[derive(Debug)]
pub struct BumpAllocator {
heap_start: usize,
heap_end: usize,
next: AtomicUsize,
}
impl BumpAllocator {
pub const fn new(heap_start: usize, heap_end: usize) -> Self {
Self { heap_start, heap_end, next: AtomicUsize::new(heap_start) }
}
}
unsafe impl<'a> Alloc for &'a BumpAllocator {
unsafe fn alloc(&mut self, layout: Layout) -> Result<*mut u8, AllocErr> {
loop {
// load current state of the `next` field
let current_next = self.next.load(Ordering::Relaxed);
let alloc_start = align_up(current_next, layout.align());
let alloc_end = alloc_start.saturating_add(layout.size());
if alloc_end <= self.heap_end {
// update the `next` pointer if it still has the value `current_next`
let next_now = self.next.compare_and_swap(current_next, alloc_end,
Ordering::Relaxed);
if next_now == current_next {
// next address was successfully updated, allocation succeeded
return Ok(alloc_start as *mut u8);
}
} else {
return Err(AllocErr::Exhausted{ request: layout })
}
}
}
unsafe fn dealloc(&mut self, ptr: *mut u8, layout: Layout) {
// do nothing, leak memory
}
}
/// Align downwards. Returns the greatest x with alignment `align`
/// so that x <= addr. The alignment must be a power of 2.
pub fn align_down(addr: usize, align: usize) -> usize {
if align.is_power_of_two() {
addr & !(align - 1)
} else if align == 0 {
addr
} else {
panic!("`align` must be a power of 2");
}
}
/// Align upwards. Returns the smallest x with alignment `align`
/// so that x >= addr. The alignment must be a power of 2.
pub fn align_up(addr: usize, align: usize) -> usize {
align_down(addr + align - 1, align)
}

@ -1,17 +1,17 @@
pub use arch::paging::*; pub use arch::paging::*;
pub use self::stack_allocator::*; use arch::paging;
use bit_allocator::{BitAlloc, BitAlloc64K};
use consts::KERNEL_OFFSET;
use multiboot2::{ElfSection, ElfSectionFlags, ElfSectionsTag};
use multiboot2::BootInformation;
pub use self::address::*; pub use self::address::*;
pub use self::frame::*; pub use self::frame::*;
pub use self::memory_set::*; pub use self::memory_set::*;
pub use self::stack_allocator::*;
use multiboot2::BootInformation;
use consts::KERNEL_OFFSET;
use arch::paging;
use spin::{Mutex, MutexGuard}; use spin::{Mutex, MutexGuard};
use super::HEAP_ALLOCATOR; use super::HEAP_ALLOCATOR;
mod memory_set; mod memory_set;
pub mod heap_allocator;
mod stack_allocator; mod stack_allocator;
mod address; mod address;
mod frame; mod frame;
@ -74,8 +74,6 @@ pub fn init(boot_info: BootInformation) -> MemorySet {
kernel_memory kernel_memory
} }
use bit_allocator::{BitAlloc64K, BitAlloc};
impl FrameAllocator for BitAlloc64K { impl FrameAllocator for BitAlloc64K {
fn allocate_frame(&mut self) -> Option<Frame> { fn allocate_frame(&mut self) -> Option<Frame> {
self.alloc().map(|x| Frame { number: x }) self.alloc().map(|x| Frame { number: x })
@ -150,8 +148,6 @@ fn get_init_kstack_and_set_guard_page() -> Stack {
Stack::new(stack_bottom + 8 * PAGE_SIZE, stack_bottom + 1 * PAGE_SIZE) Stack::new(stack_bottom + 8 * PAGE_SIZE, stack_bottom + 1 * PAGE_SIZE)
} }
use multiboot2::{ElfSectionsTag, ElfSection, ElfSectionFlags};
impl From<ElfSectionsTag> for MemorySet { impl From<ElfSectionsTag> for MemorySet {
fn from(sections: ElfSectionsTag) -> Self { fn from(sections: ElfSectionsTag) -> Self {
assert_has_not_been_called!(); assert_has_not_been_called!();
@ -167,7 +163,6 @@ impl From<ElfSectionsTag> for MemorySet {
impl From<ElfSection> for MemoryArea { impl From<ElfSection> for MemoryArea {
fn from(section: ElfSection) -> Self { fn from(section: ElfSection) -> Self {
use self::address::FromToVirtualAddress;
let mut start_addr = section.start_address() as usize; let mut start_addr = section.start_address() as usize;
let mut end_addr = section.end_address() as usize; let mut end_addr = section.end_address() as usize;
assert_eq!(start_addr % PAGE_SIZE, 0, "sections need to be page aligned"); assert_eq!(start_addr % PAGE_SIZE, 0, "sections need to be page aligned");

@ -1,18 +1,16 @@
use memory::MemorySet;
use spin::Once;
use sync::SpinNoIrqLock;
use core::slice;
use alloc::String; use alloc::String;
use memory::MemorySet;
use self::process::*; use self::process::*;
pub use self::processor::*; pub use self::processor::*;
use spin::Once;
use sync::SpinNoIrqLock;
mod process; mod process;
mod processor; mod processor;
mod scheduler; mod scheduler;
pub fn init(mut ms: MemorySet) { pub fn init(ms: MemorySet) {
PROCESSOR.call_once(|| { PROCESSOR.call_once(|| {
SpinNoIrqLock::new({ SpinNoIrqLock::new({
let initproc = Process::new_init(ms); let initproc = Process::new_init(ms);
@ -26,7 +24,7 @@ pub fn init(mut ms: MemorySet) {
pub static PROCESSOR: Once<SpinNoIrqLock<Processor>> = Once::new(); pub static PROCESSOR: Once<SpinNoIrqLock<Processor>> = Once::new();
extern fn idle_thread(arg: usize) -> ! { extern fn idle_thread(_arg: usize) -> ! {
loop {} loop {}
} }
@ -39,7 +37,7 @@ pub fn add_user_process(name: impl AsRef<str>, data: &[u8]) {
pub fn add_kernel_process(entry: extern fn(usize) -> !, arg: usize) -> Pid { pub fn add_kernel_process(entry: extern fn(usize) -> !, arg: usize) -> Pid {
let mut processor = PROCESSOR.try().unwrap().lock(); let mut processor = PROCESSOR.try().unwrap().lock();
let mut new = Process::new("", entry, arg); let new = Process::new("", entry, arg);
processor.add(new) processor.add(new)
} }

@ -1,9 +1,7 @@
use super::*; use alloc::String;
use memory::{self, Stack, InactivePageTable, MemoryAttr};
use xmas_elf::{ElfFile, program::{Flags, ProgramHeader}, header::HeaderPt2};
use core::slice;
use alloc::{rc::Rc, String};
use arch::interrupt::*; use arch::interrupt::*;
use memory::{MemoryArea, MemoryAttr, MemorySet};
use xmas_elf::{ElfFile, header::HeaderPt2, program::{Flags, ProgramHeader}};
#[derive(Debug)] #[derive(Debug)]
pub struct Process { pub struct Process {
@ -66,7 +64,6 @@ impl Process {
/// uCore x86 32bit program is planned to be supported. /// uCore x86 32bit program is planned to be supported.
pub fn new_user(data: &[u8]) -> Self { pub fn new_user(data: &[u8]) -> Self {
// Parse elf // Parse elf
let begin = data.as_ptr() as usize;
let elf = ElfFile::new(data).expect("failed to read elf"); let elf = ElfFile::new(data).expect("failed to read elf");
let is32 = match elf.header.pt2 { let is32 = match elf.header.pt2 {
HeaderPt2::Header32(_) => true, HeaderPt2::Header32(_) => true,
@ -97,6 +94,7 @@ impl Process {
ProgramHeader::Ph32(ph) => (ph.virtual_addr as usize, ph.offset as usize, ph.file_size as usize), ProgramHeader::Ph32(ph) => (ph.virtual_addr as usize, ph.offset as usize, ph.file_size as usize),
ProgramHeader::Ph64(ph) => (ph.virtual_addr as usize, ph.offset as usize, ph.file_size as usize), ProgramHeader::Ph64(ph) => (ph.virtual_addr as usize, ph.offset as usize, ph.file_size as usize),
}; };
use core::slice;
let target = unsafe { slice::from_raw_parts_mut(virt_addr as *mut u8, file_size) }; let target = unsafe { slice::from_raw_parts_mut(virt_addr as *mut u8, file_size) };
target.copy_from_slice(&data[offset..offset + file_size]); target.copy_from_slice(&data[offset..offset + file_size]);
} }
@ -168,8 +166,6 @@ impl Process {
} }
} }
use memory::{MemorySet, MemoryArea, PhysAddr, FromToVirtualAddress, EntryFlags};
impl<'a> From<&'a ElfFile<'a>> for MemorySet { impl<'a> From<&'a ElfFile<'a>> for MemorySet {
fn from(elf: &'a ElfFile<'a>) -> Self { fn from(elf: &'a ElfFile<'a>) -> Self {
let mut set = MemorySet::new(7); let mut set = MemorySet::new(7);

@ -1,11 +1,8 @@
use alloc::BTreeMap; use alloc::BTreeMap;
use memory::{ActivePageTable, InactivePageTable}; use core::fmt::{Debug, Error, Formatter};
use super::process::*; use super::process::*;
use super::scheduler::*; use super::scheduler::*;
use core::cell::RefCell;
use core::fmt::{Debug, Formatter, Error};
use util::{EventHub, GetMut2}; use util::{EventHub, GetMut2};
use arch::interrupt::*;
pub struct Processor { pub struct Processor {
procs: BTreeMap<Pid, Process>, procs: BTreeMap<Pid, Process>,

@ -26,10 +26,12 @@
//! `MutexSupport`提供了若干接口,它们会在操作锁的不同时间点被调用。 //! `MutexSupport`提供了若干接口,它们会在操作锁的不同时间点被调用。
//! 注意这个接口实际是取了几种实现的并集,并不是很通用。 //! 注意这个接口实际是取了几种实现的并集,并不是很通用。
use core::sync::atomic::{AtomicBool, ATOMIC_BOOL_INIT, Ordering}; use arch::interrupt;
use core::cell::UnsafeCell; use core::cell::UnsafeCell;
use core::ops::{Deref, DerefMut};
use core::fmt; use core::fmt;
use core::ops::{Deref, DerefMut};
use core::sync::atomic::{ATOMIC_BOOL_INIT, AtomicBool, Ordering};
use super::Condvar;
pub type SpinLock<T> = Mutex<T, Spin>; pub type SpinLock<T> = Mutex<T, Spin>;
pub type SpinNoIrqLock<T> = Mutex<T, SpinNoIrq>; pub type SpinNoIrqLock<T> = Mutex<T, SpinNoIrq>;
@ -89,7 +91,7 @@ impl<T, S: MutexSupport> Mutex<T, S>
// We know statically that there are no outstanding references to // We know statically that there are no outstanding references to
// `self` so there's no need to lock. // `self` so there's no need to lock.
let Mutex { data, .. } = self; let Mutex { data, .. } = self;
unsafe { data.into_inner() } data.into_inner()
} }
} }
@ -222,8 +224,6 @@ impl MutexSupport for Spin {
fn after_unlock(&self) {} fn after_unlock(&self) {}
} }
use arch::interrupt;
/// Spin & no-interrupt lock /// Spin & no-interrupt lock
#[derive(Debug)] #[derive(Debug)]
pub struct SpinNoIrq; pub struct SpinNoIrq;
@ -251,10 +251,6 @@ impl MutexSupport for SpinNoIrq {
fn after_unlock(&self) {} fn after_unlock(&self) {}
} }
use thread;
use alloc::VecDeque;
use super::Condvar;
impl MutexSupport for Condvar { impl MutexSupport for Condvar {
type GuardData = (); type GuardData = ();
fn new() -> Self { fn new() -> Self {

@ -1,5 +1,7 @@
//! 系统调用解析执行模块 //! 系统调用解析执行模块
#![allow(unused)]
use arch::interrupt::TrapFrame; use arch::interrupt::TrapFrame;
use process::*; use process::*;
use thread; use thread;

@ -1,3 +1,6 @@
use core::fmt::Debug;
pub use self::event::EventHub;
pub fn bytes_sum<T>(p: &T) -> u8 { pub fn bytes_sum<T>(p: &T) -> u8 {
use core::mem::size_of_val; use core::mem::size_of_val;
let len = size_of_val(p); let len = size_of_val(p);
@ -26,9 +29,6 @@ pub unsafe fn from_cstr(s: *const u8) -> &'static str {
str::from_utf8(slice::from_raw_parts(s, len)).unwrap() str::from_utf8(slice::from_raw_parts(s, len)).unwrap()
} }
use core::ops::IndexMut;
use core::fmt::Debug;
/// Get values by 2 diff keys at the same time /// Get values by 2 diff keys at the same time
pub trait GetMut2<Idx: Debug + Eq> { pub trait GetMut2<Idx: Debug + Eq> {
type Output; type Output;
@ -44,8 +44,6 @@ pub trait GetMut2<Idx: Debug + Eq> {
} }
pub use self::event::EventHub;
mod event { mod event {
use alloc::BinaryHeap; use alloc::BinaryHeap;
use core::cmp::{Ordering, PartialOrd}; use core::cmp::{Ordering, PartialOrd};

Loading…
Cancel
Save