Merge branch 'dev' into mipsel

master
Harry Chen 6 years ago
commit 9203a612f8

@ -11,30 +11,19 @@ pub struct Page {
}
impl Page {
/*
** @brief get the virtual address of beginning of the page
** @retval VirtAddr the virtual address of beginning of the page
*/
/// Returns the start address of the page.
pub fn start_address(&self) -> VirtAddr {
self.number * PAGE_SIZE
}
/*
** @brief get the page of a given virtual address
** @param addr: VirtAddr the given virtual address
** @retval Page the page of the given virtual address
*/
/// Returns the page that contains the given virtual address.
pub fn of_addr(addr: VirtAddr) -> Self {
Page {
number: addr / PAGE_SIZE,
}
}
/*
** @brief get a pageRange between two virtual address
** @param begin: VirtAddr the virtual address of the beginning
** @param end: VirtAddr the virtual address of the end
** @retval PageRange the page of the given virtual address
*/
/// Returns a range of pages between address `begin` and `end`
pub fn range_of(begin: VirtAddr, end: VirtAddr) -> PageRange {
PageRange {
start: Page::of_addr(begin),
@ -79,45 +68,3 @@ impl Iterator for PageRange {
}
}
}
/// frame for the swapmanager
#[derive(Debug, Copy, Clone, PartialOrd, Ord)]
#[repr(C)]
pub struct Frame {
/// the raw pointer for the frame's memory set's inactive page table
page_table: usize,
/// the virtual addr for the frame
virtaddr: VirtAddr,
/// the token for frame
token: usize,
}
impl Frame {
pub fn get_page_table(&self) -> usize {
self.page_table
}
pub fn get_virtaddr(&self) -> VirtAddr {
self.virtaddr
}
pub fn get_token(&self) -> usize {
self.token
}
pub fn new(pt: usize, addr: VirtAddr, pttoken: usize) -> Self {
Frame {
page_table: pt,
virtaddr: addr,
token: pttoken,
}
}
}
impl PartialEq for Frame {
fn eq(&self, other: &Frame) -> bool {
self.token == other.token && self.virtaddr == other.virtaddr
}
}
impl Eq for Frame {}

@ -11,7 +11,7 @@ pub mod cow;
pub mod memory_set;
pub mod no_mmu;
pub mod paging;
pub mod swap;
//pub mod swap;
pub use crate::addr::*;

@ -25,15 +25,13 @@ impl<T: FrameAllocator> MemoryHandler for ByFrame<T> {
fn clone_map(
&self,
pt: &mut PageTable,
with: &Fn(&mut FnMut()),
src_pt: &mut PageTable,
addr: VirtAddr,
attr: &MemoryAttr,
) {
let data = Vec::from(pt.get_page_slice_mut(addr));
with(&mut || {
self.map(pt, addr, attr);
pt.get_page_slice_mut(addr).copy_from_slice(&data);
});
self.map(pt, addr, attr);
let data = src_pt.get_page_slice_mut(addr);
pt.get_page_slice_mut(addr).copy_from_slice(data);
}
fn handle_page_fault(&self, _pt: &mut PageTable, _addr: VirtAddr) -> bool {

@ -30,24 +30,21 @@ impl<T: FrameAllocator> MemoryHandler for Delay<T> {
fn clone_map(
&self,
pt: &mut PageTable,
with: &Fn(&mut FnMut()),
src_pt: &mut PageTable,
addr: VirtAddr,
attr: &MemoryAttr,
) {
let entry = pt.get_entry(addr).expect("failed to get entry");
let entry = src_pt.get_entry(addr).expect("failed to get entry");
if entry.present() {
// eager map and copy data
let data = Vec::from(pt.get_page_slice_mut(addr));
with(&mut || {
let target = self.allocator.alloc().expect("failed to alloc frame");
let target_data = pt.get_page_slice_mut(addr);
let entry = pt.map(addr, target);
target_data.copy_from_slice(&data);
attr.apply(entry);
});
let data = src_pt.get_page_slice_mut(addr);
let target = self.allocator.alloc().expect("failed to alloc frame");
let entry = pt.map(addr, target);
attr.apply(entry);
pt.get_page_slice_mut(addr).copy_from_slice(data);
} else {
// delay map
with(&mut || self.map(pt, addr, attr));
self.map(pt, addr, attr);
}
}

@ -39,24 +39,21 @@ impl<F: Read, T: FrameAllocator> MemoryHandler for File<F, T> {
fn clone_map(
&self,
pt: &mut PageTable,
with: &Fn(&mut FnMut()),
src_pt: &mut PageTable,
addr: usize,
attr: &MemoryAttr,
) {
let entry = pt.get_entry(addr).expect("failed to get entry");
let entry = src_pt.get_entry(addr).expect("failed to get entry");
if entry.present() && !attr.readonly {
// eager map and copy data
let data = Vec::from(pt.get_page_slice_mut(addr));
with(&mut || {
let target = self.allocator.alloc().expect("failed to alloc frame");
let target_data = pt.get_page_slice_mut(addr);
let entry = pt.map(addr, target);
target_data.copy_from_slice(&data);
attr.apply(entry);
});
let data = src_pt.get_page_slice_mut(addr);
let target = self.allocator.alloc().expect("failed to alloc frame");
let entry = pt.map(addr, target);
attr.apply(entry);
pt.get_page_slice_mut(addr).copy_from_slice(data);
} else {
// delay map
with(&mut || self.map(pt, addr, attr));
self.map(pt, addr, attr);
}
}
@ -69,16 +66,9 @@ impl<F: Read, T: FrameAllocator> MemoryHandler for File<F, T> {
let frame = self.allocator.alloc().expect("failed to alloc frame");
entry.set_target(frame);
entry.set_present(true);
let writable = entry.writable();
entry.set_writable(true);
entry.update();
self.fill_data(pt, addr);
let entry = pt.get_entry(addr).expect("failed to get entry");
entry.set_writable(writable);
entry.update();
true
}
}

@ -23,11 +23,11 @@ impl MemoryHandler for Linear {
fn clone_map(
&self,
pt: &mut PageTable,
with: &Fn(&mut FnMut()),
_src_pt: &mut PageTable,
addr: VirtAddr,
attr: &MemoryAttr,
) {
with(&mut || self.map(pt, addr, attr));
self.map(pt, addr, attr);
}
fn handle_page_fault(&self, _pt: &mut PageTable, _addr: VirtAddr) -> bool {

@ -5,20 +5,17 @@ pub trait MemoryHandler: Debug + Send + Sync + 'static {
fn box_clone(&self) -> Box<MemoryHandler>;
/// Map `addr` in the page table
/// Should set page flags here instead of in page_fault_handler
/// Should set page flags here instead of in `page_fault_handler`
fn map(&self, pt: &mut PageTable, addr: VirtAddr, attr: &MemoryAttr);
/// Unmap `addr` in the page table
fn unmap(&self, pt: &mut PageTable, addr: VirtAddr);
/// Clone map `addr` from one page table to another.
/// `pt` is the current active page table.
/// `with` is the `InactivePageTable::with` function.
/// Call `with` then use `pt` as target page table inside.
/// Clone map `addr` from page table `src_pt` to `pt`.
fn clone_map(
&self,
pt: &mut PageTable,
with: &Fn(&mut FnMut()),
src_pt: &mut PageTable,
addr: VirtAddr,
attr: &MemoryAttr,
);

@ -1,5 +1,4 @@
//! memory set, area
//! and the inactive page table
//! Memory management structures
use alloc::{boxed::Box, string::String, vec::Vec};
use core::fmt::{Debug, Error, Formatter};
@ -13,8 +12,7 @@ use self::handler::MemoryHandler;
pub mod handler;
/// a continuous memory space when the same attribute
/// like `vma_struct` in ucore
/// A continuous memory space when the same attribute
#[derive(Debug, Clone)]
pub struct MemoryArea {
start_addr: VirtAddr,
@ -25,31 +23,7 @@ pub struct MemoryArea {
}
impl MemoryArea {
/*
** @brief get slice of the content in the memory area
** @retval &[u8] the slice of the content in the memory area
*/
pub unsafe fn as_slice(&self) -> &[u8] {
::core::slice::from_raw_parts(
self.start_addr as *const u8,
self.end_addr - self.start_addr,
)
}
/*
** @brief get mutable slice of the content in the memory area
** @retval &mut[u8] the mutable slice of the content in the memory area
*/
pub unsafe fn as_slice_mut(&self) -> &mut [u8] {
::core::slice::from_raw_parts_mut(
self.start_addr as *mut u8,
self.end_addr - self.start_addr,
)
}
/*
** @brief test whether a virtual address is in the memory area
** @param addr: VirtAddr the virtual address to test
** @retval bool whether the virtual address is in the memory area
*/
/// Test whether a virtual address is in the memory area
pub fn contains(&self, addr: VirtAddr) -> bool {
addr >= self.start_addr && addr < self.end_addr
}
@ -121,42 +95,22 @@ pub struct MemoryAttr {
}
impl MemoryAttr {
/*
** @brief set the memory attribute's user bit
** @retval MemoryAttr the memory attribute itself
*/
pub fn user(mut self) -> Self {
self.user = true;
self
}
/*
** @brief set the memory attribute's readonly bit
** @retval MemoryAttr the memory attribute itself
*/
pub fn readonly(mut self) -> Self {
self.readonly = true;
self
}
/*
** @brief unset the memory attribute's readonly bit
** @retval MemoryAttr the memory attribute itself
*/
pub fn writable(mut self) -> Self {
self.readonly = false;
self
}
/*
** @brief set the memory attribute's execute bit
** @retval MemoryAttr the memory attribute itself
*/
pub fn execute(mut self) -> Self {
self.execute = true;
self
}
/*
** @brief set the MMIO type
** @retval MemoryAttr the memory attribute itself
*/
pub fn mmio(mut self, value: u8) -> Self {
self.mmio = value;
self
@ -172,26 +126,23 @@ impl MemoryAttr {
}
}
/// set of memory space with multiple memory area with associated page table and stack space
/// like `mm_struct` in ucore
/// A set of memory space with multiple memory areas with associated page table
/// NOTE: Don't remove align(64), or you will fail to run MIPS.
#[repr(align(64))]
pub struct MemorySet<T: InactivePageTable> {
pub struct MemorySet<T: PageTableExt> {
areas: Vec<MemoryArea>,
page_table: T,
}
impl<T: InactivePageTable> MemorySet<T> {
/*
** @brief create a memory set
** @retval MemorySet<T> the memory set created
*/
impl<T: PageTableExt> MemorySet<T> {
/// Create a new `MemorySet`
pub fn new() -> Self {
MemorySet {
areas: Vec::new(),
page_table: T::new(),
}
}
/// Create a new `MemorySet` for kernel remap
pub fn new_bare() -> Self {
MemorySet {
areas: Vec::new(),
@ -284,11 +235,7 @@ impl<T: InactivePageTable> MemorySet<T> {
.find(|area| area.is_overlap_with(start_addr, end_addr))
.is_none()
}
/*
** @brief add the memory area to the memory set
** @param area: MemoryArea the memory area to add
** @retval none
*/
/// Add an area to this set
pub fn push(
&mut self,
start_addr: VirtAddr,
@ -309,7 +256,7 @@ impl<T: InactivePageTable> MemorySet<T> {
handler: Box::new(handler),
name,
};
self.page_table.edit(|pt| area.map(pt));
area.map(&mut self.page_table);
// keep order by start address
let idx = self
.areas
@ -321,28 +268,21 @@ impl<T: InactivePageTable> MemorySet<T> {
self.areas.insert(idx, area);
}
/*
** @brief remove the memory area from the memory set
** @param area: MemoryArea the memory area to remove
** @retval none
*/
/// Remove the area `[start_addr, end_addr)` from `MemorySet`
pub fn pop(&mut self, start_addr: VirtAddr, end_addr: VirtAddr) {
assert!(start_addr <= end_addr, "invalid memory area");
for i in 0..self.areas.len() {
if self.areas[i].start_addr == start_addr && self.areas[i].end_addr == end_addr {
let area = self.areas.remove(i);
self.page_table.edit(|pt| area.unmap(pt));
area.unmap(&mut self.page_table);
return;
}
}
panic!("no memory area found");
}
/*
** @brief remove the memory area from the memory set and split existed ones when necessary
** @param area: MemoryArea the memory area to remove
** @retval none
*/
/// Remove the area `[start_addr, end_addr)` from `MemorySet`
/// and split existed ones when necessary.
pub fn pop_with_split(&mut self, start_addr: VirtAddr, end_addr: VirtAddr) {
assert!(start_addr <= end_addr, "invalid memory area");
let mut i = 0;
@ -351,7 +291,7 @@ impl<T: InactivePageTable> MemorySet<T> {
if self.areas[i].start_addr >= start_addr && self.areas[i].end_addr <= end_addr {
// subset
let area = self.areas.remove(i);
self.page_table.edit(|pt| area.unmap(pt));
area.unmap(&mut self.page_table);
i -= 1;
} else if self.areas[i].start_addr >= start_addr
&& self.areas[i].start_addr < end_addr
@ -365,7 +305,7 @@ impl<T: InactivePageTable> MemorySet<T> {
handler: area.handler.box_clone(),
name: area.name,
};
self.page_table.edit(|pt| dead_area.unmap(pt));
dead_area.unmap(&mut self.page_table);
let new_area = MemoryArea {
start_addr: end_addr,
end_addr: area.end_addr,
@ -379,13 +319,13 @@ impl<T: InactivePageTable> MemorySet<T> {
// postfix
let area = self.areas.remove(i);
let dead_area = MemoryArea {
start_addr: start_addr,
start_addr,
end_addr: area.end_addr,
attr: area.attr,
handler: area.handler.box_clone(),
name: area.name,
};
self.page_table.edit(|pt| dead_area.unmap(pt));
dead_area.unmap(&mut self.page_table);
let new_area = MemoryArea {
start_addr: area.start_addr,
end_addr: start_addr,
@ -398,13 +338,13 @@ impl<T: InactivePageTable> MemorySet<T> {
// superset
let area = self.areas.remove(i);
let dead_area = MemoryArea {
start_addr: start_addr,
end_addr: end_addr,
start_addr,
end_addr,
attr: area.attr,
handler: area.handler.box_clone(),
name: area.name,
};
self.page_table.edit(|pt| dead_area.unmap(pt));
dead_area.unmap(&mut self.page_table);
let new_area_left = MemoryArea {
start_addr: area.start_addr,
end_addr: start_addr,
@ -428,74 +368,50 @@ impl<T: InactivePageTable> MemorySet<T> {
}
}
/*
** @brief get iterator of the memory area
** @retval impl Iterator<Item=&MemoryArea>
** the memory area iterator
*/
/// Get iterator of areas
pub fn iter(&self) -> impl Iterator<Item = &MemoryArea> {
self.areas.iter()
}
pub fn edit(&mut self, f: impl FnOnce(&mut T::Active)) {
self.page_table.edit(f);
}
/*
** @brief execute function with the associated page table
** @param f: impl FnOnce() the function to be executed
** @retval none
*/
/// Execute function `f` with the associated page table
pub unsafe fn with(&self, f: impl FnOnce()) {
self.page_table.with(f);
}
/*
** @brief activate the associated page table
** @retval none
*/
/// Activate the associated page table
pub unsafe fn activate(&self) {
self.page_table.activate();
}
/*
** @brief get the token of the associated page table
** @retval usize the token of the inactive page table
*/
/// Get the token of the associated page table
pub fn token(&self) -> usize {
self.page_table.token()
}
/*
** @brief clear the memory set
** @retval none
*/
/// Clear and unmap all areas
pub fn clear(&mut self) {
let Self {
ref mut page_table,
ref mut areas,
..
} = self;
page_table.edit(|pt| {
for area in areas.iter() {
area.unmap(pt);
}
});
for area in areas.iter() {
area.unmap(page_table);
}
areas.clear();
}
/// Get physical address of the page of given virtual `addr`
pub fn translate(&mut self, addr: VirtAddr) -> Option<PhysAddr> {
self.page_table.edit(|pt| {
pt.get_entry(addr).and_then(|entry| {
if entry.user() {
Some(entry.target())
} else {
None
}
})
self.page_table.get_entry(addr).and_then(|entry| {
if entry.user() {
Some(entry.target())
} else {
None
}
})
}
/*
** @brief get the mutable reference for the inactive page table
** @retval: &mut T the mutable reference of the inactive page table
*/
/// Get the reference of inner page table
pub fn get_page_table_mut(&mut self) -> &mut T {
&mut self.page_table
}
@ -503,32 +419,28 @@ impl<T: InactivePageTable> MemorySet<T> {
pub fn handle_page_fault(&mut self, addr: VirtAddr) -> bool {
let area = self.areas.iter().find(|area| area.contains(addr));
match area {
Some(area) => self
.page_table
.edit(|pt| area.handler.handle_page_fault(pt, addr)),
Some(area) => area.handler.handle_page_fault(&mut self.page_table, addr),
None => false,
}
}
pub fn clone(&mut self) -> Self {
let new_page_table = T::new();
let mut new_page_table = T::new();
let Self {
ref mut page_table,
ref areas,
..
} = self;
page_table.edit(|pt| {
for area in areas.iter() {
for page in Page::range_of(area.start_addr, area.end_addr) {
area.handler.clone_map(
pt,
&|f| unsafe { new_page_table.with(f) },
page.start_address(),
&area.attr,
);
}
for area in areas.iter() {
for page in Page::range_of(area.start_addr, area.end_addr) {
area.handler.clone_map(
&mut new_page_table,
page_table,
page.start_address(),
&area.attr,
);
}
});
}
MemorySet {
areas: areas.clone(),
page_table: new_page_table,
@ -536,13 +448,13 @@ impl<T: InactivePageTable> MemorySet<T> {
}
}
impl<T: InactivePageTable> Drop for MemorySet<T> {
impl<T: PageTableExt> Drop for MemorySet<T> {
fn drop(&mut self) {
self.clear();
}
}
impl<T: InactivePageTable> Debug for MemorySet<T> {
impl<T: PageTableExt> Debug for MemorySet<T> {
fn fmt(&self, f: &mut Formatter) -> Result<(), Error> {
f.debug_list().entries(self.areas.iter()).finish()
}

@ -1,23 +0,0 @@
//! Helper functions
use super::*;
pub trait PageTableExt: PageTable {
// Take some special care here.
// TEMP_PAGE_ADDR mapping might be overwritten in the `f` below.
// So this should be really high in kernel space when necessary.
const TEMP_PAGE_ADDR: VirtAddr = 0xcafeb000;
fn with_temporary_map<T, D>(
&mut self,
target: PhysAddr,
f: impl FnOnce(&mut Self, &mut D) -> T,
) -> T {
self.map(Self::TEMP_PAGE_ADDR, target);
let data =
unsafe { &mut *(self.get_page_slice_mut(Self::TEMP_PAGE_ADDR).as_ptr() as *mut D) };
let ret = f(self, data);
self.unmap(Self::TEMP_PAGE_ADDR);
ret
}
}

@ -2,12 +2,10 @@
//!
//! Implemented for every architecture, used by OS.
pub use self::ext::*;
#[cfg(test)]
pub use self::mock_page_table::MockPageTable;
use super::*;
mod ext;
#[cfg(test)]
mod mock_page_table;
@ -26,31 +24,18 @@ pub trait PageTable {
fn get_entry(&mut self, addr: VirtAddr) -> Option<&mut Entry>;
/// Get a mutable reference of the content of a page of virtual address `addr`
/// Used for testing with mock
fn get_page_slice_mut<'a>(&mut self, addr: VirtAddr) -> &'a mut [u8] {
unsafe { core::slice::from_raw_parts_mut((addr & !(PAGE_SIZE - 1)) as *mut u8, PAGE_SIZE) }
}
fn get_page_slice_mut<'a>(&mut self, addr: VirtAddr) -> &'a mut [u8];
/// Read data from virtual address `addr`
/// Used for testing with mock
fn read(&mut self, addr: VirtAddr) -> u8 {
unsafe { (addr as *const u8).read() }
fn read(&mut self, _addr: VirtAddr) -> u8 {
unimplemented!()
}
/// Write data to virtual address `addr`
/// Used for testing with mock
fn write(&mut self, addr: VirtAddr, data: u8) {
unsafe { (addr as *mut u8).write(data) }
}
/// When `vaddr` is not mapped, map it to `paddr`.
fn map_if_not_exists(&mut self, vaddr: VirtAddr, paddr: usize) -> bool {
if let Some(entry) = self.get_entry(vaddr) {
if entry.present() {
return false;
}
}
self.map(vaddr, paddr);
true
fn write(&mut self, _addr: VirtAddr, _data: u8) {
unimplemented!()
}
}
@ -99,13 +84,8 @@ pub trait Entry {
fn set_mmio(&mut self, value: u8);
}
/// An inactive page table
/// Note: InactivePageTable is not a PageTable
/// but it can be activated and "become" a PageTable
pub trait InactivePageTable: Sized {
/// the active version of page table
type Active: PageTable;
/// Extra methods of `PageTable` for non-trait-object usage
pub trait PageTableExt: PageTable + Sized {
/// Create a new page table with kernel memory mapped
fn new() -> Self {
let mut pt = Self::new_bare();
@ -125,10 +105,6 @@ pub trait InactivePageTable: Sized {
fn active_token() -> usize;
fn flush_tlb();
/// Make this page table editable
/// Set the recursive entry of current active page table to this
fn edit<T>(&mut self, f: impl FnOnce(&mut Self::Active) -> T) -> T;
/// Activate this page table
unsafe fn activate(&self) {
let old_token = Self::active_token();

30
kernel/Cargo.lock generated

@ -78,7 +78,7 @@ source = "git+https://github.com/myrrlyn/bitvec.git#8ab20a3e33fe068fc3a4a05eda12
[[package]]
name = "bootloader"
version = "0.4.0"
source = "git+https://github.com/rcore-os/bootloader#18e4fec0d82e8a5571abceb69d1d11fc0edccba1"
source = "git+https://github.com/rcore-os/bootloader?branch=linear#cc33d7d2d2d33f5adcbd0f596964ba99127b51af"
dependencies = [
"apic 0.1.0 (git+https://github.com/rcore-os/apic-rs)",
"fixedvec 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
@ -101,6 +101,11 @@ name = "byteorder"
version = "1.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "cast"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "cc"
version = "1.0.31"
@ -354,7 +359,7 @@ dependencies = [
"bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
"bitmap-allocator 0.1.0 (git+https://github.com/rcore-os/bitmap-allocator)",
"bitvec 0.11.0 (git+https://github.com/myrrlyn/bitvec.git)",
"bootloader 0.4.0 (git+https://github.com/rcore-os/bootloader)",
"bootloader 0.4.0 (git+https://github.com/rcore-os/bootloader?branch=linear)",
"buddy_system_allocator 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
"cc 1.0.31 (registry+https://github.com/rust-lang/crates.io-index)",
"console-traits 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
@ -377,7 +382,7 @@ dependencies = [
"spin 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
"uart_16550 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
"volatile 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
"x86_64 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)",
"x86_64 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
"xmas-elf 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)",
]
@ -445,7 +450,7 @@ dependencies = [
[[package]]
name = "riscv"
version = "0.5.0"
source = "git+https://github.com/rcore-os/riscv#e8be9f93513225596709a2dccd9064324591fc3c"
source = "git+https://github.com/rcore-os/riscv#8e25d63d123773145911f4a1f718fc1bc73d80c6"
dependencies = [
"bare-metal 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)",
"bit_field 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
@ -626,6 +631,19 @@ dependencies = [
"ux 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "x86_64"
version = "0.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"array-init 0.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
"bit_field 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
"bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
"cast 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"raw-cpuid 6.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"ux 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "xmas-elf"
version = "0.6.2"
@ -650,9 +668,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
"checksum bitmap-allocator 0.1.0 (git+https://github.com/rcore-os/bitmap-allocator)" = "<none>"
"checksum bitvec 0.11.0 (git+https://github.com/myrrlyn/bitvec.git)" = "<none>"
"checksum bitvec 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "cfadef5c4e2c2e64067b9ecc061179f12ac7ec65ba613b1f60f3972bbada1f5b"
"checksum bootloader 0.4.0 (git+https://github.com/rcore-os/bootloader)" = "<none>"
"checksum bootloader 0.4.0 (git+https://github.com/rcore-os/bootloader?branch=linear)" = "<none>"
"checksum buddy_system_allocator 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "59da15ef556589ee78370281d75b67f2d69ed26465ec0e0f3961e2021502426f"
"checksum byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a019b10a2a7cdeb292db131fc8113e57ea2a908f6e7894b0c3c671893b65dbeb"
"checksum cast 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "926013f2860c46252efceabb19f4a6b308197505082c609025aa6706c011d427"
"checksum cc 1.0.31 (registry+https://github.com/rust-lang/crates.io-index)" = "c9ce8bb087aacff865633f0bd5aeaed910fe2fe55b55f4739527f2e023a2e53d"
"checksum cfg-if 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "11d43355396e872eefb45ce6342e4374ed7bc2b3a502d1b28e36d6e23c05d1f4"
"checksum console-traits 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f711b3d1d5c3f7ae7d6428901c0f3e5d5f5c800fcfac86bf0252e96373a2cec6"
@ -717,5 +736,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
"checksum x86 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)" = "841e1ca5a87068718a2a26f2473c6f93cf3b8119f9778fa0ae4b39b664d9e66a"
"checksum x86_64 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "f9258d7e2dd25008d69e8c9e9ee37865887a5e1e3d06a62f1cb3f6c209e6f177"
"checksum x86_64 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)" = "1d0a8201f52d2c7b373c7243dcdfb27c0dd5012f221ef6a126f507ee82005204"
"checksum x86_64 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d69bf2d256c74df90fcc68aaf99862dd205310609e9d56247a5c82ead2f28a93"
"checksum xmas-elf 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "22678df5df766e8d1e5d609da69f0c3132d794edf6ab5e75e7abcd2270d4cf58"
"checksum zero 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "5f1bc8a6b2005884962297587045002d8cfb8dcec9db332f4ca216ddc5de82c5"

@ -19,12 +19,10 @@ authors = [
]
[features]
default = ["sv39"]
# Page table sv39 or sv48 (for riscv64)
sv39 = []
board_u540 = ["sv39", "link_user"]
board_k210 = ["sv39", "link_user"]
board_rocket_chip = ["sv39", "link_user"]
default = []
board_u540 = ["link_user"]
board_k210 = ["link_user"]
board_rocket_chip = ["link_user"]
# (for aarch64 RaspberryPi3)
nographic = []
board_raspi3 = ["bcm2837", "link_user"]
@ -70,9 +68,9 @@ rcore-fs = { git = "https://github.com/rcore-os/rcore-fs" }
rcore-fs-sfs = { git = "https://github.com/rcore-os/rcore-fs" }
[target.'cfg(target_arch = "x86_64")'.dependencies]
bootloader = { git = "https://github.com/rcore-os/bootloader" }
bootloader = { git = "https://github.com/rcore-os/bootloader", branch = "linear" }
apic = { git = "https://github.com/rcore-os/apic-rs" }
x86_64 = "0.5"
x86_64 = "0.6"
raw-cpuid = "6.0"
uart_16550 = "0.2"
pc-keyboard = "0.5"

@ -2,12 +2,11 @@
///
pub use super::board::consts::*;
pub const MEMORY_OFFSET: usize = 0x80000000;
pub const KERNEL_OFFSET: usize = 0x80100000;
pub const MEMORY_OFFSET: usize = 0x8000_0000;
pub const PHYSICAL_MEMORY_OFFSET: usize = 0x80000000;
pub const USER_STACK_OFFSET: usize = 0x70000000 - USER_STACK_SIZE;
pub const USER_STACK_SIZE: usize = 0x10000;
pub const USER32_STACK_OFFSET: usize = 0x70000000 - USER_STACK_SIZE;
pub const MAX_DTB_SIZE: usize = 0x2000;

@ -232,7 +232,7 @@ fn reserved_inst(tf: &mut TrapFrame) -> bool {
let tls = unsafe { *(_cur_tls as *const usize) };
set_trapframe_register(rt, tls, tf);
info!("Read TLS by rdhdr {:x} to register {:?}", tls, rt);
debug!("Read TLS by rdhdr {:x} to register {:?}", tls, rt);
return true;
} else {
return false;

@ -1,28 +1,31 @@
// Depends on kernel
use crate::memory::{active_table, alloc_frame, dealloc_frame};
use log::*;
use crate::memory::{alloc_frame, dealloc_frame};
use mips::addr::*;
use mips::paging::{FrameAllocator, FrameDeallocator};
use mips::paging::{
Mapper, PageTable as MIPSPageTable, PageTableEntry, PageTableFlags as EF, TwoLevelPageTable,
FrameAllocator, FrameDeallocator, Mapper, PageTable as MIPSPageTable, PageTableEntry,
PageTableFlags as EF, TwoLevelPageTable,
};
use mips::tlb::*;
use mips::tlb::TLBEntry;
use rcore_memory::paging::*;
pub struct ActivePageTable(usize, PageEntry);
pub struct PageTableImpl {
page_table: TwoLevelPageTable<'static>,
root_frame: Frame,
entry: PageEntry,
}
/// PageTableEntry: the contents of this entry.
/// Page: this entry is the pte of page `Page`.
pub struct PageEntry(&'static mut PageTableEntry, Page);
impl PageTable for ActivePageTable {
impl PageTable for PageTableImpl {
fn map(&mut self, addr: usize, target: usize) -> &mut Entry {
// map the 4K `page` to the 4K `frame` with `flags`
let flags = EF::VALID | EF::WRITABLE | EF::CACHEABLE;
let page = Page::of_addr(VirtAddr::new(addr));
let frame = Frame::of_addr(PhysAddr::new(target));
// we may need frame allocator to alloc frame for new page table(first/second)
self.get_table()
self.page_table
.map_to(page, frame, flags, &mut FrameAllocatorForMips)
.unwrap()
.flush();
@ -31,20 +34,29 @@ impl PageTable for ActivePageTable {
fn unmap(&mut self, addr: usize) {
let page = Page::of_addr(VirtAddr::new(addr));
let (_, flush) = self.get_table().unmap(page).unwrap();
let (_, flush) = self.page_table.unmap(page).unwrap();
flush.flush();
}
fn get_entry(&mut self, vaddr: usize) -> Option<&mut Entry> {
let page = Page::of_addr(VirtAddr::new(vaddr));
if let Ok(e) = self.get_table().ref_entry(page.clone()) {
if let Ok(e) = self.page_table.ref_entry(page.clone()) {
let e = unsafe { &mut *(e as *mut PageTableEntry) };
self.1 = PageEntry(e, page);
Some(&mut self.1 as &mut Entry)
self.entry = PageEntry(e, page);
Some(&mut self.entry as &mut Entry)
} else {
None
}
}
fn get_page_slice_mut<'a>(&mut self, addr: usize) -> &'a mut [u8] {
let frame = self
.page_table
.translate_page(Page::of_addr(VirtAddr::new(addr)))
.unwrap();
let vaddr = frame.to_kernel_unmapped().as_usize();
unsafe { core::slice::from_raw_parts_mut(vaddr as *mut u8, 0x1000) }
}
}
extern "C" {
@ -54,7 +66,7 @@ extern "C" {
pub fn set_root_page_table_ptr(ptr: usize) {
unsafe {
clear_all_tlb();
TLBEntry::clear_all();
*(_root_page_table_ptr as *mut usize) = ptr;
}
}
@ -67,35 +79,11 @@ pub fn root_page_table_buffer() -> &'static mut MIPSPageTable {
unsafe { &mut *(_root_page_table_ptr as *mut MIPSPageTable) }
}
impl PageTableExt for ActivePageTable {}
static mut __page_table_with_mode: bool = false;
/// The virtual address of root page table
impl ActivePageTable {
pub unsafe fn new() -> Self {
ActivePageTable(get_root_page_table_ptr(), ::core::mem::uninitialized())
}
unsafe fn get_raw_table(&mut self) -> *mut MIPSPageTable {
if __page_table_with_mode {
get_root_page_table_ptr() as *mut MIPSPageTable
} else {
self.0 as *mut MIPSPageTable
}
}
fn get_table(&mut self) -> TwoLevelPageTable<'static> {
unsafe { TwoLevelPageTable::new(&mut *self.get_raw_table()) }
}
}
/// implementation for the Entry trait in /crate/memory/src/paging/mod.rs
impl Entry for PageEntry {
fn update(&mut self) {
unsafe {
clear_all_tlb();
TLBEntry::clear_all();
}
}
fn accessed(&self) -> bool {
@ -158,22 +146,33 @@ impl Entry for PageEntry {
fn set_mmio(&mut self, _value: u8) {}
}
#[derive(Debug)]
pub struct InactivePageTable0 {
root_frame: Frame,
impl PageTableImpl {
/// Unsafely get the current active page table.
/// WARN: You MUST call `core::mem::forget` for it after use!
pub unsafe fn active() -> Self {
let frame = Frame::of_addr(PhysAddr::new(get_root_page_table_ptr() & 0x7fffffff));
let table = root_page_table_buffer();
PageTableImpl {
page_table: TwoLevelPageTable::new(table),
root_frame: frame,
entry: unsafe { core::mem::uninitialized() },
}
}
}
impl InactivePageTable for InactivePageTable0 {
type Active = ActivePageTable;
impl PageTableExt for PageTableImpl {
fn new_bare() -> Self {
let target = alloc_frame().expect("failed to allocate frame");
let frame = Frame::of_addr(PhysAddr::new(target));
let table = unsafe { &mut *(target as *mut MIPSPageTable) };
table.zero();
InactivePageTable0 { root_frame: frame }
PageTableImpl {
page_table: TwoLevelPageTable::new(table),
root_frame: frame,
entry: unsafe { core::mem::uninitialized() },
}
}
fn map_kernel(&mut self) {
@ -194,61 +193,12 @@ impl InactivePageTable for InactivePageTable0 {
fn flush_tlb() {
unsafe {
clear_all_tlb();
}
}
fn edit<T>(&mut self, f: impl FnOnce(&mut Self::Active) -> T) -> T {
unsafe {
clear_all_tlb();
}
debug!(
"edit table {:x?} -> {:x?}",
Self::active_token(),
self.token()
);
let mut active = unsafe { ActivePageTable(self.token(), ::core::mem::uninitialized()) };
let ret = f(&mut active);
debug!("finish table");
unsafe {
clear_all_tlb();
}
ret
}
unsafe fn with<T>(&self, f: impl FnOnce() -> T) -> T {
let old_token = Self::active_token();
let new_token = self.token();
let old_mode = unsafe { __page_table_with_mode };
unsafe {
__page_table_with_mode = true;
}
debug!("switch table {:x?} -> {:x?}", old_token, new_token);
if old_token != new_token {
Self::set_token(new_token);
Self::flush_tlb();
TLBEntry::clear_all();
}
let ret = f();
debug!("switch table {:x?} -> {:x?}", new_token, old_token);
if old_token != new_token {
Self::set_token(old_token);
Self::flush_tlb();
}
unsafe {
__page_table_with_mode = old_mode;
}
ret
}
}
impl Drop for InactivePageTable0 {
impl Drop for PageTableImpl {
fn drop(&mut self) {
dealloc_frame(self.root_frame.start_address().as_usize());
}

@ -1,22 +1,20 @@
use super::consts::KERNEL_OFFSET;
use crate::memory::active_table;
use rcore_memory::paging::PageTable;
use crate::memory::phys_to_virt;
/// Mask all external interrupt except serial.
pub unsafe fn init_external_interrupt() {
const HART0_S_MODE_INTERRUPT_ENABLES: *mut u64 = (KERNEL_OFFSET + 0x0C00_2080) as *mut u64;
const HART0_S_MODE_INTERRUPT_ENABLES: *mut u64 = phys_to_virt(0x0C00_2080) as *mut u64;
// enable all external interrupts
HART0_S_MODE_INTERRUPT_ENABLES.write_volatile(0xf);
// mask interrupts first
const AXI_INTC_IER: *mut u32 = (KERNEL_OFFSET + 0x1810_0008) as *mut u32;
const AXI_INTC_IER: *mut u32 = phys_to_virt(0x6120_0008) as *mut u32;
AXI_INTC_IER.write_volatile(0x0);
// acknowledge all interrupts
const AXI_INTC_IAR: *mut u32 = (KERNEL_OFFSET + 0x1810_000C) as *mut u32;
const AXI_INTC_IAR: *mut u32 = phys_to_virt(0x6120_000C) as *mut u32;
AXI_INTC_IAR.write_volatile(0xffffffff);
const AXI_INTC_MER: *mut u32 = (KERNEL_OFFSET + 0x1810_001C) as *mut u32;
const AXI_INTC_MER: *mut u32 = phys_to_virt(0x6120_001C) as *mut u32;
// Hardware Interrupt enable | Enable irq output
AXI_INTC_MER.write_volatile(0b11);
@ -27,20 +25,19 @@ pub unsafe fn init_external_interrupt() {
/// Claim and complete external interrupt by reading and writing to
/// PLIC Interrupt Claim/Complete Register.
pub unsafe fn handle_external_interrupt() {
const HART0_S_MODE_INTERRUPT_CLAIM_COMPLETE: *mut u32 =
(KERNEL_OFFSET + 0x0C20_1004) as *mut u32;
const HART0_S_MODE_INTERRUPT_CLAIM_COMPLETE: *mut u32 = phys_to_virt(0x0C20_1004) as *mut u32;
// claim
let source = HART0_S_MODE_INTERRUPT_CLAIM_COMPLETE.read_volatile();
// complete
HART0_S_MODE_INTERRUPT_CLAIM_COMPLETE.write_volatile(source);
// acknowledge all interrupts
const AXI_INTC_IAR: *mut u32 = (KERNEL_OFFSET + 0x1810_000C) as *mut u32;
const AXI_INTC_IAR: *mut u32 = phys_to_virt(0x6120_000C) as *mut u32;
AXI_INTC_IAR.write_volatile(0xffffffff);
}
pub unsafe fn enable_serial_interrupt() {
const SERIAL_BASE: *mut u32 = (KERNEL_OFFSET + 0x18000000) as *mut u32;
const SERIAL_BASE: *mut u32 = phys_to_virt(0x60000000) as *mut u32;
const UART_CTRL_REG: usize = 3;
// Intr enable | rx reset | tx reset
const UART_IE: u32 = 0x13;

@ -1,8 +1,9 @@
use super::consts::KERNEL_OFFSET;
use crate::memory::phys_to_virt;
/// Mask all external interrupt except serial.
pub unsafe fn init_external_interrupt() {
const HART1_S_MODE_INTERRUPT_ENABLES: *mut u64 = (KERNEL_OFFSET + 0x0C00_2100) as *mut u64;
const HART1_S_MODE_INTERRUPT_ENABLES: *mut u64 = phys_to_virt(0x0C00_2100) as *mut u64;
const SERIAL: u64 = 4;
HART1_S_MODE_INTERRUPT_ENABLES.write_volatile(1 << SERIAL);
}
@ -10,8 +11,7 @@ pub unsafe fn init_external_interrupt() {
/// Claim and complete external interrupt by reading and writing to
/// PLIC Interrupt Claim/Complete Register.
pub unsafe fn handle_external_interrupt() {
const HART1_S_MODE_INTERRUPT_CLAIM_COMPLETE: *mut u32 =
(KERNEL_OFFSET + 0x0C20_2004) as *mut u32;
const HART1_S_MODE_INTERRUPT_CLAIM_COMPLETE: *mut u32 = phys_to_virt(0x0C20_2004) as *mut u32;
// claim
let source = HART1_S_MODE_INTERRUPT_CLAIM_COMPLETE.read_volatile();
// complete
@ -19,7 +19,7 @@ pub unsafe fn handle_external_interrupt() {
}
pub unsafe fn enable_serial_interrupt() {
const SERIAL_BASE: *mut u8 = (KERNEL_OFFSET + 0x10010000) as *mut u8;
const SERIAL_BASE: *mut u8 = phys_to_virt(0x10010000) as *mut u8;
const UART_REG_IE: usize = 4;
const UART_RXWM: u8 = 0x2;
SERIAL_BASE.add(UART_REG_IE).write_volatile(UART_RXWM);

@ -1,4 +1,4 @@
use super::consts::KERNEL_OFFSET;
use crate::memory::phys_to_virt;
/// Mask all external interrupt except serial.
pub unsafe fn init_external_interrupt() {
@ -6,13 +6,13 @@ pub unsafe fn init_external_interrupt() {
// riscv-pk (bbl) enables all S-Mode IRQs (ref: machine/minit.c)
// OpenSBI v0.3 disables all IRQs (ref: platform/common/irqchip/plic.c)
const HART0_S_MODE_INTERRUPT_ENABLES: *mut u32 = (KERNEL_OFFSET + 0x0C00_2080) as *mut u32;
const HART0_S_MODE_INTERRUPT_ENABLES: *mut u32 = phys_to_virt(0x0C00_2080) as *mut u32;
const SERIAL: u32 = 0xa;
HART0_S_MODE_INTERRUPT_ENABLES.write_volatile(1 << SERIAL);
}
pub unsafe fn enable_serial_interrupt() {
const UART16550: *mut u8 = (KERNEL_OFFSET + 0x10000000) as *mut u8;
const UART16550: *mut u8 = phys_to_virt(0x10000000) as *mut u8;
UART16550.add(4).write_volatile(0x0B);
UART16550.add(1).write_volatile(0x01);
}

@ -12,11 +12,27 @@ _start:
lui sp, %hi(bootstack)
add sp, sp, t0
# 2. enable paging
# 2. paging
# satp = (1 << 31) | PPN(boot_page_table_sv32)
lui t0, %hi(boot_page_table_sv32)
li t1, 0xc0000000 - 0x80000000
sub t0, t0, t1
# 2.1 linear mapping (0xc0000000 -> 0x80000000)
li t2, 768*4
li t4, 0x400 << 10
li t5, 4
add t1, t0, t2
li t6, 1024*4
add t6, t0, t6
li t3, (0x80000 << 10) | 0xcf # VRWXAD
loop:
sw t3, 0(t1)
add t3, t3, t4
add t1, t1, t5
bne t1, t6, loop
# 2.2 enable paging
srli t0, t0, 12
li t1, 1 << 31
or t0, t0, t1
@ -41,15 +57,11 @@ bootstacktop:
boot_page_table_sv32:
# NOTE: assume kernel image < 16M
# 0x80000000 -> 0x80000000 (4M * 4)
# 0xc0000000 -> 0x80000000 (4M * 4)
# 0xc0000000 -> 0x80000000 (mapped in code above)
.zero 4 * 512
.word (0x80000 << 10) | 0xcf # VRWXAD
.word (0x80400 << 10) | 0xcf # VRWXAD
.word (0x80800 << 10) | 0xcf # VRWXAD
.word (0x80c00 << 10) | 0xcf # VRWXAD
.zero 4 * 252
.word (0x80000 << 10) | 0xcf # VRWXAD
.word (0x80400 << 10) | 0xcf # VRWXAD
.word (0x80800 << 10) | 0xcf # VRWXAD
.word (0x80c00 << 10) | 0xcf # VRWXAD
.zero 4 * 252
.zero 4 * 256

@ -1,27 +1,14 @@
// Physical address available on THINPAD:
// [0x80000000, 0x80800000]
// Linear mapping
#[cfg(target_arch = "riscv32")]
pub const RECURSIVE_INDEX: usize = 0x3fd;
pub const PHYSICAL_MEMORY_OFFSET: usize = 0x4000_0000;
#[cfg(target_arch = "riscv64")]
pub const RECURSIVE_INDEX: usize = 0o774;
// Under riscv64, upon booting, paging is enabled by bbl and
// root_table[0777] maps to p3_table,
// and p3_table[0777] maps to gigapage 8000_0000H,
// so 0xFFFF_FFFF_8000_0000 maps to 0x8000_0000
// root_table[0774] points to root_table itself as page table
// root_table[0775] points to root_table itself as leaf page
// root_table[0776] points to a temp page table as leaf page
pub const PHYSICAL_MEMORY_OFFSET: usize = 0xFFFF_FFFF_4000_0000;
#[cfg(target_arch = "riscv32")]
pub const KERNEL_OFFSET: usize = 0xC000_0000;
#[cfg(target_arch = "riscv64")]
pub const KERNEL_OFFSET: usize = 0xFFFF_FFFF_C000_0000;
#[cfg(target_arch = "riscv32")]
pub const KERNEL_P2_INDEX: usize = (KERNEL_OFFSET >> 12 >> 10) & 0x3ff;
#[cfg(target_arch = "riscv64")]
pub const KERNEL_P4_INDEX: usize = (KERNEL_OFFSET >> 12 >> 9 >> 9 >> 9) & 0o777;
#[cfg(feature = "board_k210")]
pub const KERNEL_HEAP_SIZE: usize = 0x0020_0000;
#[cfg(not(feature = "board_k210"))]
@ -35,7 +22,7 @@ pub const MEMORY_END: usize = 0x8060_0000;
pub const MEMORY_END: usize = 0x8800_0000;
// FIXME: rv64 `sh` and `ls` will crash if stack top > 0x80000000 ???
pub const USER_STACK_OFFSET: usize = 0x80000000 - USER_STACK_SIZE;
pub const USER_STACK_OFFSET: usize = 0x40000000 - USER_STACK_SIZE;
pub const USER_STACK_SIZE: usize = 0x10000;
pub const MAX_DTB_SIZE: usize = 0x2000;

@ -17,10 +17,6 @@ pub fn init(dtb: usize) {
// initialize heap and Frame allocator
init_frame_allocator();
init_heap();
// remap the kernel use 4K page
unsafe {
super::paging::setup_recursive_mapping();
}
remap_the_kernel(dtb);
}
@ -54,121 +50,8 @@ fn init_frame_allocator() {
}
/// Remap the kernel memory address with 4K page recorded in p1 page table
fn remap_the_kernel(dtb: usize) {
let offset = -(KERNEL_OFFSET as isize - MEMORY_OFFSET as isize);
let mut ms = MemorySet::new_bare();
ms.push(
stext as usize,
etext as usize,
MemoryAttr::default().execute().readonly(),
Linear::new(offset),
"text",
);
ms.push(
sdata as usize,
edata as usize,
MemoryAttr::default(),
Linear::new(offset),
"data",
);
ms.push(
srodata as usize,
erodata as usize,
MemoryAttr::default().readonly(),
Linear::new(offset),
"rodata",
);
ms.push(
bootstack as usize,
bootstacktop as usize,
MemoryAttr::default(),
Linear::new(offset),
"stack",
);
ms.push(
sbss as usize,
ebss as usize,
MemoryAttr::default(),
Linear::new(offset),
"bss",
);
// dtb on rocket chip is embedded into kernel
#[cfg(not(feature = "board_rocket_chip"))]
ms.push(
dtb,
dtb + super::consts::MAX_DTB_SIZE,
MemoryAttr::default().readonly(),
Linear::new(offset),
"dts",
);
// map PLIC for HiFiveU & VirtIO
let offset = -(KERNEL_OFFSET as isize);
ms.push(
KERNEL_OFFSET + 0x0C00_2000,
KERNEL_OFFSET + 0x0C00_2000 + PAGE_SIZE,
MemoryAttr::default(),
Linear::new(offset),
"plic0",
);
ms.push(
KERNEL_OFFSET + 0x0C20_2000,
KERNEL_OFFSET + 0x0C20_2000 + PAGE_SIZE,
MemoryAttr::default(),
Linear::new(offset),
"plic1",
);
// map UART for HiFiveU
ms.push(
KERNEL_OFFSET + 0x10010000,
KERNEL_OFFSET + 0x10010000 + PAGE_SIZE,
MemoryAttr::default(),
Linear::new(offset),
"uart",
);
// map UART for VirtIO
ms.push(
KERNEL_OFFSET + 0x10000000,
KERNEL_OFFSET + 0x10000000 + PAGE_SIZE,
MemoryAttr::default(),
Linear::new(offset),
"uart16550",
);
// map PLIC for Rocket Chip
#[cfg(feature = "board_rocket_chip")]
ms.push(
KERNEL_OFFSET + 0x0C20_1000,
KERNEL_OFFSET + 0x0C20_1000 + PAGE_SIZE,
MemoryAttr::default(),
Linear::new(offset),
"plic2",
);
// map UART for Rocket Chip
#[cfg(feature = "board_rocket_chip")]
ms.push(
KERNEL_OFFSET + 0x18000000,
KERNEL_OFFSET + 0x18000000 + PAGE_SIZE,
MemoryAttr::default(),
Linear::new(-(KERNEL_OFFSET as isize + 0x18000000 - 0x60000000)),
"uartlite",
);
// map AXI INTC for Rocket Chip
#[cfg(feature = "board_rocket_chip")]
ms.push(
KERNEL_OFFSET + 0x18100000,
KERNEL_OFFSET + 0x18100000 + PAGE_SIZE,
MemoryAttr::default(),
Linear::new(-(KERNEL_OFFSET as isize + 0x18100000 - 0x61200000)),
"axi_intc",
);
// map AXI4-Stream Data FIFO for Rocket Chip
#[cfg(feature = "board_rocket_chip")]
ms.push(
KERNEL_OFFSET + 0x18200000,
KERNEL_OFFSET + 0x18200000 + PAGE_SIZE,
MemoryAttr::default(),
Linear::new(-(KERNEL_OFFSET as isize + 0x18200000 - 0x64A00000)),
"router",
);
fn remap_the_kernel(_dtb: usize) {
let mut ms = MemorySet::new();
unsafe {
ms.activate();
}

@ -20,13 +20,13 @@ mod sbi;
pub mod syscall;
pub mod timer;
use self::consts::{KERNEL_OFFSET, MEMORY_OFFSET};
use crate::memory::phys_to_virt;
use core::sync::atomic::{AtomicBool, Ordering};
use log::*;
#[no_mangle]
pub extern "C" fn rust_main(hartid: usize, device_tree_paddr: usize) -> ! {
let mut device_tree_vaddr = device_tree_paddr - MEMORY_OFFSET + KERNEL_OFFSET;
let mut device_tree_vaddr = phys_to_virt(device_tree_paddr);
unsafe {
cpu::set_cpu_id(hartid);

@ -1,10 +1,5 @@
use crate::consts::RECURSIVE_INDEX;
// Depends on kernel
#[cfg(target_arch = "riscv32")]
use crate::consts::KERNEL_P2_INDEX;
#[cfg(target_arch = "riscv64")]
use crate::consts::KERNEL_P4_INDEX;
use crate::memory::{active_table, alloc_frame, dealloc_frame};
use crate::consts::PHYSICAL_MEMORY_OFFSET;
use crate::memory::{alloc_frame, dealloc_frame, phys_to_virt};
use log::*;
use rcore_memory::paging::*;
use riscv::addr::*;
@ -16,22 +11,29 @@ use riscv::paging::{
};
use riscv::register::satp;
pub struct ActivePageTable(RecursivePageTable<'static>, PageEntry);
#[cfg(target_arch = "riscv32")]
type TopLevelPageTable<'a> = riscv::paging::Rv32PageTable<'a>;
#[cfg(target_arch = "riscv64")]
type TopLevelPageTable<'a> = riscv::paging::Rv39PageTable<'a>;
pub struct PageTableImpl {
page_table: TopLevelPageTable<'static>,
root_frame: Frame,
entry: PageEntry,
}
/// PageTableEntry: the contents of this entry.
/// Page: this entry is the pte of page `Page`.
pub struct PageEntry(&'static mut PageTableEntry, Page);
impl PageTable for ActivePageTable {
impl PageTable for PageTableImpl {
fn map(&mut self, addr: usize, target: usize) -> &mut Entry {
// use riscv::paging:Mapper::map_to,
// map the 4K `page` to the 4K `frame` with `flags`
let flags = EF::VALID | EF::READABLE | EF::WRITABLE;
let page = Page::of_addr(VirtAddr::new(addr));
let frame = Frame::of_addr(PhysAddr::new(target));
// map the page to the frame using FrameAllocatorForRiscv
// we may need frame allocator to alloc frame for new page table(first/second)
self.0
self.page_table
.map_to(page, frame, flags, &mut FrameAllocatorForRiscv)
.unwrap()
.flush();
@ -40,59 +42,28 @@ impl PageTable for ActivePageTable {
fn unmap(&mut self, addr: usize) {
let page = Page::of_addr(VirtAddr::new(addr));
let (_, flush) = self.0.unmap(page).unwrap();
let (_, flush) = self.page_table.unmap(page).unwrap();
flush.flush();
}
fn get_entry(&mut self, vaddr: usize) -> Option<&mut Entry> {
let page = Page::of_addr(VirtAddr::new(vaddr));
if let Ok(e) = self.0.ref_entry(page.clone()) {
if let Ok(e) = self.page_table.ref_entry(page.clone()) {
let e = unsafe { &mut *(e as *mut PageTableEntry) };
self.1 = PageEntry(e, page);
Some(&mut self.1 as &mut Entry)
self.entry = PageEntry(e, page);
Some(&mut self.entry as &mut Entry)
} else {
None
}
}
}
impl PageTableExt for ActivePageTable {}
/// The virtual address of root page table
#[cfg(target_arch = "riscv32")]
const ROOT_PAGE_TABLE: *mut RvPageTable =
((RECURSIVE_INDEX << 12 << 10) | ((RECURSIVE_INDEX + 1) << 12)) as *mut RvPageTable;
#[cfg(all(target_arch = "riscv64", feature = "sv39"))]
const ROOT_PAGE_TABLE: *mut RvPageTable = ((0xFFFF_0000_0000_0000)
| (0o777 << 12 << 9 << 9 << 9)
| (RECURSIVE_INDEX << 12 << 9 << 9)
| (RECURSIVE_INDEX << 12 << 9)
| ((RECURSIVE_INDEX + 1) << 12)) as *mut RvPageTable;
#[cfg(all(target_arch = "riscv64", not(feature = "sv39")))]
const ROOT_PAGE_TABLE: *mut RvPageTable = ((0xFFFF_0000_0000_0000)
| (RECURSIVE_INDEX << 12 << 9 << 9 << 9)
| (RECURSIVE_INDEX << 12 << 9 << 9)
| (RECURSIVE_INDEX << 12 << 9)
| ((RECURSIVE_INDEX + 1) << 12)) as *mut RvPageTable;
impl ActivePageTable {
#[cfg(target_arch = "riscv32")]
pub unsafe fn new() -> Self {
ActivePageTable(
RecursivePageTable::new(&mut *ROOT_PAGE_TABLE).unwrap(),
::core::mem::uninitialized(),
)
}
#[cfg(target_arch = "riscv64")]
pub unsafe fn new() -> Self {
#[cfg(feature = "sv39")]
let type_ = PageTableType::Sv39;
#[cfg(not(feature = "sv39"))]
let type_ = PageTableType::Sv48;
ActivePageTable(
RecursivePageTable::new(&mut *ROOT_PAGE_TABLE, type_).unwrap(),
::core::mem::uninitialized(),
)
fn get_page_slice_mut<'a>(&mut self, addr: usize) -> &'a mut [u8] {
let frame = self
.page_table
.translate_page(Page::of_addr(VirtAddr::new(addr)))
.unwrap();
let vaddr = frame.start_address().as_usize() + PHYSICAL_MEMORY_OFFSET;
unsafe { core::slice::from_raw_parts_mut(vaddr as *mut u8, 0x1000) }
}
}
@ -173,72 +144,67 @@ impl Entry for PageEntry {
fn set_mmio(&mut self, _value: u8) {}
}
#[derive(Debug)]
pub struct InactivePageTable0 {
root_frame: Frame,
impl PageTableImpl {
/// Unsafely get the current active page table.
/// WARN: You MUST call `core::mem::forget` for it after use!
pub unsafe fn active() -> Self {
#[cfg(target_arch = "riscv32")]
let mask = 0x7fffffff;
#[cfg(target_arch = "riscv64")]
let mask = 0x0fffffff_ffffffff;
let frame = Frame::of_ppn(PageTableImpl::active_token() & mask);
let table = frame.as_kernel_mut(PHYSICAL_MEMORY_OFFSET);
PageTableImpl {
page_table: TopLevelPageTable::new(table, PHYSICAL_MEMORY_OFFSET),
root_frame: frame,
entry: unsafe { core::mem::uninitialized() },
}
}
}
impl InactivePageTable for InactivePageTable0 {
type Active = ActivePageTable;
impl PageTableExt for PageTableImpl {
fn new_bare() -> Self {
let target = alloc_frame().expect("failed to allocate frame");
let frame = Frame::of_addr(PhysAddr::new(target));
active_table().with_temporary_map(target, |_, table: &mut RvPageTable| {
table.zero();
table.set_recursive(RECURSIVE_INDEX, frame.clone());
});
InactivePageTable0 { root_frame: frame }
}
#[cfg(target_arch = "riscv32")]
fn map_kernel(&mut self) {
let table = unsafe { &mut *ROOT_PAGE_TABLE };
extern "C" {
fn start();
fn end();
}
let mut entrys: [PageTableEntry; 256] = unsafe { core::mem::uninitialized() };
let entry_start = start as usize >> 22;
let entry_end = (end as usize >> 22) + 1;
let entry_count = entry_end - entry_start;
for i in 0..entry_count {
entrys[i] = table[entry_start + i];
}
let table = unsafe { &mut *(phys_to_virt(target) as *mut RvPageTable) };
table.zero();
self.edit(|_| {
// NOTE: 'table' now refers to new page table
for i in 0..entry_count {
table[entry_start + i] = entrys[i];
}
});
PageTableImpl {
page_table: TopLevelPageTable::new(table, PHYSICAL_MEMORY_OFFSET),
root_frame: frame,
entry: unsafe { core::mem::uninitialized() },
}
}
#[cfg(target_arch = "riscv64")]
fn map_kernel(&mut self) {
let table = unsafe { &mut *ROOT_PAGE_TABLE };
let e1 = table[KERNEL_P4_INDEX];
assert!(!e1.is_unused());
self.edit(|_| {
table[KERNEL_P4_INDEX] = e1;
});
info!("mapping kernel linear mapping");
let table = unsafe {
&mut *(phys_to_virt(self.root_frame.start_address().as_usize()) as *mut RvPageTable)
};
#[cfg(target_arch = "riscv32")]
for i in 256..1024 {
let flags =
EF::VALID | EF::READABLE | EF::WRITABLE | EF::EXECUTABLE | EF::ACCESSED | EF::DIRTY;
let frame = Frame::of_addr(PhysAddr::new((i << 22) - PHYSICAL_MEMORY_OFFSET));
table[i].set(frame, flags);
}
#[cfg(target_arch = "riscv64")]
for i in 509..512 {
let flags =
EF::VALID | EF::READABLE | EF::WRITABLE | EF::EXECUTABLE | EF::ACCESSED | EF::DIRTY;
let frame = Frame::of_addr(PhysAddr::new(
(0xFFFFFF80_00000000 + (i << 30)) - PHYSICAL_MEMORY_OFFSET,
));
table[i].set(frame, flags);
}
}
#[cfg(target_arch = "riscv32")]
fn token(&self) -> usize {
self.root_frame.number() | (1 << 31) // as satp
}
#[cfg(target_arch = "riscv64")]
fn token(&self) -> usize {
use bit_field::BitField;
let mut satp = self.root_frame.number();
satp.set_bits(44..60, 0); // AS is 0
#[cfg(feature = "sv39")]
satp.set_bits(60..64, satp::Mode::Sv39 as usize);
#[cfg(not(feature = "sv39"))]
satp.set_bits(60..64, satp::Mode::Sv48 as usize);
satp
#[cfg(target_arch = "riscv32")]
return self.root_frame.number() | (1 << 31);
#[cfg(target_arch = "riscv64")]
return self.root_frame.number() | (8 << 60);
}
unsafe fn set_token(token: usize) {
@ -246,7 +212,11 @@ impl InactivePageTable for InactivePageTable0 {
}
fn active_token() -> usize {
satp::read().bits()
let mut token: usize = 0;
unsafe {
asm!("csrr $0, satp" : "=r"(token) ::: "volatile");
}
token
}
fn flush_tlb() {
@ -254,33 +224,9 @@ impl InactivePageTable for InactivePageTable0 {
sfence_vma_all();
}
}
fn edit<T>(&mut self, f: impl FnOnce(&mut Self::Active) -> T) -> T {
let target = satp::read().frame().start_address().as_usize();
active_table().with_temporary_map(target, |active_table, root_table: &mut RvPageTable| {
let backup = root_table[RECURSIVE_INDEX].clone();
// overwrite recursive mapping
root_table[RECURSIVE_INDEX].set(self.root_frame.clone(), EF::VALID);
unsafe {
sfence_vma_all();
}
// execute f in the new context
let ret = f(active_table);
// restore recursive mapping to original p2 table
root_table[RECURSIVE_INDEX] = backup;
unsafe {
sfence_vma_all();
}
ret
})
}
}
impl Drop for InactivePageTable0 {
impl Drop for PageTableImpl {
fn drop(&mut self) {
dealloc_frame(self.root_frame.start_address().as_usize());
}
@ -299,13 +245,3 @@ impl FrameDeallocator for FrameAllocatorForRiscv {
dealloc_frame(frame.start_address().as_usize());
}
}
pub unsafe fn setup_recursive_mapping() {
let frame = satp::read().frame();
let root_page_table = unsafe { &mut *(frame.start_address().as_usize() as *mut RvPageTable) };
root_page_table.set_recursive(RECURSIVE_INDEX, frame);
unsafe {
sfence_vma_all();
}
info!("setup recursive mapping end");
}

@ -1,6 +1,7 @@
pub const MEMORY_OFFSET: usize = 0;
pub const KERNEL_OFFSET: usize = 0xffffff00_00000000;
pub const KERNEL_HEAP_SIZE: usize = 8 * 1024 * 1024; // 8 MB
pub const PHYSICAL_MEMORY_OFFSET: usize = 0xfffffc00_00000000;
pub const USER_STACK_OFFSET: usize = 0x00008000_00000000 - USER_STACK_SIZE;
pub const USER_STACK_SIZE: usize = 8 * 1024 * 1024; // 8 MB, the default config of Linux

@ -1,3 +1,4 @@
use crate::memory::phys_to_virt;
use apic::{LocalApic, XApic};
use raw_cpuid::CpuId;
use x86_64::registers::control::{Cr0, Cr0Flags};
@ -21,12 +22,12 @@ pub fn id() -> usize {
}
pub fn send_ipi(cpu_id: usize) {
let mut lapic = unsafe { XApic::new(0xffffff00_fee00000) };
let mut lapic = unsafe { XApic::new(phys_to_virt(0xfee00000)) };
lapic.send_ipi(cpu_id as u8, 0x30); // TODO: Find a IPI trap num
}
pub fn init() {
let mut lapic = unsafe { XApic::new(0xffffff00_fee00000) };
let mut lapic = unsafe { XApic::new(phys_to_virt(0xfee00000)) };
lapic.cpu_init();
// enable FPU, the manual Volume 3 Chapter 13

@ -7,6 +7,7 @@ pub fn init() {
use crate::arch::interrupt::consts;
use crate::arch::interrupt::enable_irq;
enable_irq(consts::Keyboard);
info!("keyboard: init end");
}
/// Receive character from keyboard

@ -12,6 +12,7 @@ pub fn init() {
COM2.lock().init();
enable_irq(consts::COM1);
enable_irq(consts::COM2);
info!("serial: init end");
}
pub trait SerialRead {

@ -6,7 +6,7 @@ use spin::Mutex;
use volatile::Volatile;
use x86_64::instructions::port::Port;
use crate::consts::KERNEL_OFFSET;
use crate::memory::phys_to_virt;
use crate::util::color::ConsoleColor;
use crate::util::escape_parser::{EscapeParser, CSI};
@ -99,10 +99,9 @@ impl VgaBuffer {
}
lazy_static! {
pub static ref VGA_WRITER: Mutex<VgaWriter> = Mutex::new(
// VGA virtual address is specified at bootloader
VgaWriter::new(unsafe{ &mut *((KERNEL_OFFSET + 0xf0000000) as *mut VgaBuffer) })
);
pub static ref VGA_WRITER: Mutex<VgaWriter> = Mutex::new(VgaWriter::new(unsafe {
&mut *((phys_to_virt(0xb8000)) as *mut VgaBuffer)
}));
}
pub struct VgaWriter {

@ -48,10 +48,7 @@ impl Cpu {
}
pub fn iter() -> impl Iterator<Item = &'static Self> {
unsafe {
CPUS.iter()
.filter_map(|x| x.as_ref())
}
unsafe { CPUS.iter().filter_map(|x| x.as_ref()) }
}
pub fn id(&self) -> usize {
self.id
@ -114,7 +111,7 @@ const KCODE: Descriptor = Descriptor::UserSegment(0x0020980000000000); // EXECUT
const UCODE: Descriptor = Descriptor::UserSegment(0x0020F80000000000); // EXECUTABLE | USER_SEGMENT | USER_MODE | PRESENT | LONG_MODE
const KDATA: Descriptor = Descriptor::UserSegment(0x0000920000000000); // DATA_WRITABLE | USER_SEGMENT | PRESENT
const UDATA: Descriptor = Descriptor::UserSegment(0x0000F20000000000); // DATA_WRITABLE | USER_SEGMENT | USER_MODE | PRESENT
// Copied from xv6
// Copied from xv6
const UCODE32: Descriptor = Descriptor::UserSegment(0x00cffa00_0000ffff); // EXECUTABLE | USER_SEGMENT | USER_MODE | PRESENT
const UDATA32: Descriptor = Descriptor::UserSegment(0x00cff200_0000ffff); // EXECUTABLE | USER_SEGMENT | USER_MODE | PRESENT

@ -5,7 +5,7 @@ mod trapframe;
pub use self::handler::*;
pub use self::trapframe::*;
use crate::consts::KERNEL_OFFSET;
use crate::memory::phys_to_virt;
use apic::*;
#[inline(always)]
@ -39,12 +39,12 @@ pub fn no_interrupt(f: impl FnOnce()) {
#[inline(always)]
pub fn enable_irq(irq: u8) {
let mut ioapic = unsafe { IoApic::new(KERNEL_OFFSET + IOAPIC_ADDR as usize) };
let mut ioapic = unsafe { IoApic::new(phys_to_virt(IOAPIC_ADDR as usize)) };
ioapic.enable(irq, 0);
}
#[inline(always)]
pub fn ack(_irq: u8) {
let mut lapic = unsafe { XApic::new(KERNEL_OFFSET + LAPIC_ADDR) };
let mut lapic = unsafe { XApic::new(phys_to_virt(LAPIC_ADDR)) };
lapic.eoi();
}

@ -1,7 +1,6 @@
use crate::memory::phys_to_virt;
/// Interface for inter-processor interrupt.
/// This module wraps inter-processor interrupt into a broadcast-calling style.
use crate::consts::KERNEL_OFFSET;
use alloc::boxed::{Box, FnBox};
use alloc::sync::Arc;
use apic::{LocalApic, XApic, LAPIC_ADDR};
@ -10,7 +9,7 @@ use core::sync::atomic::{spin_loop_hint, AtomicU8, Ordering};
pub type IPIEventItem = Box<FnBox()>;
unsafe fn get_apic() -> XApic {
let mut lapic = unsafe { XApic::new(KERNEL_OFFSET + LAPIC_ADDR) };
let mut lapic = unsafe { XApic::new(phys_to_virt(LAPIC_ADDR)) };
lapic
}

@ -1,15 +1,10 @@
use crate::consts::KERNEL_OFFSET;
use bitmap_allocator::BitAlloc;
// Depends on kernel
use super::{BootInfo, MemoryRegionType};
use crate::memory::{active_table, init_heap, FRAME_ALLOCATOR};
use log::*;
use crate::memory::{init_heap, FRAME_ALLOCATOR};
use bitmap_allocator::BitAlloc;
use rcore_memory::paging::*;
use rcore_memory::PAGE_SIZE;
pub fn init(boot_info: &BootInfo) {
init_frame_allocator(boot_info);
init_device_vm_map();
init_heap();
info!("memory: init end");
}
@ -25,15 +20,3 @@ fn init_frame_allocator(boot_info: &BootInfo) {
}
}
}
fn init_device_vm_map() {
let mut page_table = active_table();
// IOAPIC
page_table
.map(KERNEL_OFFSET + 0xfec00000, 0xfec00000)
.update();
// LocalAPIC
page_table
.map(KERNEL_OFFSET + 0xfee00000, 0xfee00000)
.update();
}

@ -9,12 +9,12 @@ pub mod gdt;
pub mod idt;
pub mod interrupt;
pub mod io;
pub mod ipi;
pub mod memory;
pub mod paging;
pub mod rand;
pub mod syscall;
pub mod timer;
pub mod ipi;
static AP_CAN_INIT: AtomicBool = AtomicBool::new(false);
@ -25,16 +25,23 @@ pub extern "C" fn _start(boot_info: &'static BootInfo) -> ! {
println!("Hello world! from CPU {}!", cpu_id);
if cpu_id != 0 {
while !AP_CAN_INIT.load(Ordering::Relaxed) {}
while !AP_CAN_INIT.load(Ordering::Relaxed) {
spin_loop_hint();
}
other_start();
}
// First init log mod, so that we can print log info.
crate::logging::init();
info!("{:#?}", boot_info);
info!("{:#x?}", boot_info);
assert_eq!(
boot_info.physical_memory_offset as usize,
consts::PHYSICAL_MEMORY_OFFSET
);
// Init trap handling.
idt::init();
// setup fast syscall in x86_64
interrupt::fast_syscall::init();
// Init physical memory management and heap.
@ -60,14 +67,14 @@ pub extern "C" fn _start(boot_info: &'static BootInfo) -> ! {
/// The entry point for other processors
fn other_start() -> ! {
// Init trap handling.
// init trap handling.
idt::init();
// init gdt
gdt::init();
// init local apic
cpu::init();
// setup fast syscall in xv6-64
// setup fast syscall in x86_64
interrupt::fast_syscall::init();
//call the first main function in kernel.
// call the first main function in kernel.
crate::kmain();
}

@ -1,6 +1,4 @@
// Depends on kernel
use crate::consts::KERNEL_OFFSET;
use crate::memory::{active_table, alloc_frame, dealloc_frame};
use crate::memory::{alloc_frame, dealloc_frame, phys_to_virt};
use core::sync::atomic::Ordering;
use log::*;
use rcore_memory::paging::*;
@ -8,12 +6,12 @@ use x86_64::instructions::tlb;
use x86_64::registers::control::{Cr3, Cr3Flags};
use x86_64::structures::paging::{
frame::PhysFrame as Frame,
mapper::{Mapper, RecursivePageTable},
mapper::{MappedPageTable, Mapper},
page::{Page, PageRange, Size4KiB},
page_table::{PageTable as x86PageTable, PageTableEntry, PageTableFlags as EF},
FrameAllocator, FrameDeallocator,
};
use x86_64::{VirtAddr, PhysAddr};
use x86_64::{PhysAddr, VirtAddr};
pub trait PageExt {
fn of_addr(address: usize) -> Self;
@ -40,11 +38,15 @@ impl FrameExt for Frame {
}
}
pub struct ActivePageTable(RecursivePageTable<'static>);
pub struct PageTableImpl(
MappedPageTable<'static, fn(Frame) -> *mut x86PageTable>,
PageEntry,
Frame,
);
pub struct PageEntry(PageTableEntry);
pub struct PageEntry(&'static mut PageTableEntry, Page, Frame);
impl PageTable for ActivePageTable {
impl PageTable for PageTableImpl {
fn map(&mut self, addr: usize, target: usize) -> &mut Entry {
let flags = EF::PRESENT | EF::WRITABLE | EF::NO_EXECUTE;
unsafe {
@ -59,7 +61,7 @@ impl PageTable for ActivePageTable {
.flush();
}
flush_tlb_all(addr);
unsafe { &mut *(get_entry_ptr(addr, 1)) }
self.get_entry(addr).unwrap()
}
fn unmap(&mut self, addr: usize) {
@ -68,33 +70,39 @@ impl PageTable for ActivePageTable {
}
fn get_entry(&mut self, addr: usize) -> Option<&mut Entry> {
for level in 0..3 {
let entry = get_entry_ptr(addr, 4 - level);
if unsafe { !(*entry).present() } {
let mut page_table = frame_to_page_table(self.2);
for level in 0..4 {
let index = (addr >> (12 + (3 - level) * 9)) & 0o777;
let entry = unsafe { &mut (&mut *page_table)[index] };
if level == 3 {
let page = Page::of_addr(addr);
self.1 = PageEntry(entry, page, self.2);
return Some(&mut self.1 as &mut Entry);
}
if !entry.flags().contains(EF::PRESENT) {
return None;
}
page_table = frame_to_page_table(entry.frame().unwrap());
}
unsafe { Some(&mut *(get_entry_ptr(addr, 1))) }
unreachable!();
}
}
impl PageTableExt for ActivePageTable {
// FIXME: the default value 0xcafebe000 is so low that allocation might overwrite it sometimes.
// However, putting it to KERNEL_OFFSET | 0xcafeb000 has unintended effects.
// Someone needs to reconsider this and use an ultimate solution.
// const TEMP_PAGE_ADDR: usize = KERNEL_OFFSET | 0xcafeb000;
fn get_page_slice_mut<'a>(&mut self, addr: usize) -> &'a mut [u8] {
let frame = self.0.translate_page(Page::of_addr(addr)).unwrap();
let vaddr = phys_to_virt(frame.start_address().as_u64() as usize);
unsafe { core::slice::from_raw_parts_mut(vaddr as *mut u8, 0x1000) }
}
}
impl ActivePageTable {
pub unsafe fn new() -> Self {
ActivePageTable(RecursivePageTable::new(&mut *(0xffffffff_fffff000 as *mut _)).unwrap())
}
fn frame_to_page_table(frame: Frame) -> *mut x86PageTable {
let vaddr = phys_to_virt(frame.start_address().as_u64() as usize);
vaddr as *mut x86PageTable
}
impl Entry for PageEntry {
fn update(&mut self) {
use x86_64::{instructions::tlb::flush, VirtAddr};
let addr = VirtAddr::new_unchecked((self as *const _ as u64) << 9);
let addr = self.1.start_address();
flush(addr);
flush_tlb_all(addr.as_u64() as usize);
}
@ -153,14 +161,18 @@ impl Entry for PageEntry {
self.0.flags().contains(EF::USER_ACCESSIBLE)
}
fn set_user(&mut self, value: bool) {
self.as_flags().set(EF::USER_ACCESSIBLE, value);
// x86_64 page table struct do not implement setting USER bit
if value {
let mut addr = self as *const _ as usize;
for _ in 0..3 {
// Upper level entry
addr = ((addr >> 9) & 0o777_777_777_7770) | 0xffffff80_00000000;
// set USER_ACCESSIBLE
unsafe { (*(addr as *mut EF)).insert(EF::USER_ACCESSIBLE) };
let mut page_table = frame_to_page_table(self.2);
for level in 0..4 {
let index =
(self.1.start_address().as_u64() as usize >> (12 + (3 - level) * 9)) & 0o777;
let entry = unsafe { &mut (&mut *page_table)[index] };
entry.set_flags(entry.flags() | EF::USER_ACCESSIBLE);
if level == 3 {
return;
}
page_table = frame_to_page_table(entry.frame().unwrap());
}
}
}
@ -176,51 +188,57 @@ impl Entry for PageEntry {
fn set_mmio(&mut self, _value: u8) {}
}
fn get_entry_ptr(addr: usize, level: u8) -> *mut PageEntry {
debug_assert!(level <= 4);
let entry_addr = ((addr >> (level * 9)) & !0x7) | !((1 << (48 - level * 9)) - 1);
entry_addr as *mut PageEntry
}
impl PageEntry {
fn as_flags(&mut self) -> &mut EF {
unsafe { &mut *(self as *mut _ as *mut EF) }
unsafe { &mut *(self.0 as *mut _ as *mut EF) }
}
}
#[derive(Debug)]
pub struct InactivePageTable0 {
p4_frame: Frame,
impl PageTableImpl {
/// Unsafely get the current active page table.
/// WARN: You MUST call `core::mem::forget` for it after use!
pub unsafe fn active() -> Self {
let frame = Cr3::read().0;
let table = unsafe { &mut *frame_to_page_table(frame) };
PageTableImpl(
MappedPageTable::new(table, frame_to_page_table),
core::mem::uninitialized(),
frame,
)
}
}
impl InactivePageTable for InactivePageTable0 {
type Active = ActivePageTable;
impl PageTableExt for PageTableImpl {
fn new_bare() -> Self {
let target = alloc_frame().expect("failed to allocate frame");
let frame = Frame::of_addr(target);
active_table().with_temporary_map(target, |_, table: &mut x86PageTable| {
table.zero();
// set up recursive mapping for the table
table[511].set_frame(frame.clone(), EF::PRESENT | EF::WRITABLE);
});
InactivePageTable0 { p4_frame: frame }
let table = unsafe { &mut *frame_to_page_table(frame) };
table.zero();
unsafe {
PageTableImpl(
MappedPageTable::new(table, frame_to_page_table),
core::mem::uninitialized(),
frame,
)
}
}
fn map_kernel(&mut self) {
let table = unsafe { &mut *(0xffffffff_fffff000 as *mut x86PageTable) };
let table = unsafe { &mut *frame_to_page_table(Cr3::read().0) };
// Kernel at 0xffff_ff00_0000_0000
// Kernel stack at 0x0000_57ac_0000_0000 (defined in bootloader crate)
let e510 = table[510].clone();
let ekernel = table[510].clone();
let ephysical = table[0x1f8].clone();
let estack = table[175].clone();
self.edit(|_| {
table[510].set_addr(e510.addr(), e510.flags() | EF::GLOBAL);
table[175].set_addr(estack.addr(), estack.flags() | EF::GLOBAL);
});
let table = unsafe { &mut *frame_to_page_table(self.2) };
table[510].set_addr(ekernel.addr(), ekernel.flags() | EF::GLOBAL);
table[0x1f8].set_addr(ephysical.addr(), ephysical.flags() | EF::GLOBAL);
table[175].set_addr(estack.addr(), estack.flags() | EF::GLOBAL);
}
fn token(&self) -> usize {
self.p4_frame.start_address().as_u64() as usize // as CR3
self.2.start_address().as_u64() as usize // as CR3
}
unsafe fn set_token(token: usize) {
@ -237,40 +255,18 @@ impl InactivePageTable for InactivePageTable0 {
fn flush_tlb() {
tlb::flush_all();
}
fn edit<T>(&mut self, f: impl FnOnce(&mut Self::Active) -> T) -> T {
let target = Cr3::read().0.start_address().as_u64() as usize;
if self.p4_frame == Cr3::read().0 {
return f(&mut active_table());
}
active_table().with_temporary_map(target, |active_table, p4_table: &mut x86PageTable| {
let backup = p4_table[0o777].clone();
// overwrite recursive mapping
p4_table[0o777].set_frame(self.p4_frame.clone(), EF::PRESENT | EF::WRITABLE);
tlb::flush_all();
// execute f in the new context
let ret = f(active_table);
// restore recursive mapping to original p4 table
p4_table[0o777] = backup;
tlb::flush_all();
ret
})
}
}
impl Drop for InactivePageTable0 {
impl Drop for PageTableImpl {
fn drop(&mut self) {
info!("PageTable dropping: {:?}", self);
dealloc_frame(self.p4_frame.start_address().as_u64() as usize);
info!("PageTable dropping: {:?}", self.2);
dealloc_frame(self.2.start_address().as_u64() as usize);
}
}
struct FrameAllocatorForX86;
impl FrameAllocator<Size4KiB> for FrameAllocatorForX86 {
unsafe impl FrameAllocator<Size4KiB> for FrameAllocatorForX86 {
fn allocate_frame(&mut self) -> Option<Frame> {
alloc_frame().map(|addr| Frame::of_addr(addr))
}
@ -284,11 +280,10 @@ impl FrameDeallocator<Size4KiB> for FrameAllocatorForX86 {
/// Flush TLB for `vaddr` on all CPU
fn flush_tlb_all(vaddr: usize) {
// FIXME: too slow, disable now.
return;
if !super::AP_CAN_INIT.load(Ordering::Relaxed) {
return;
}
super::ipi::invoke_on_allcpu(
move || tlb::flush(VirtAddr::new(vaddr as u64)),
false,
);
super::ipi::invoke_on_allcpu(move || tlb::flush(VirtAddr::new(vaddr as u64)), false);
}

@ -8,16 +8,16 @@ use bitflags::*;
use device_tree::util::SliceRead;
use device_tree::Node;
use log::*;
use rcore_memory::paging::PageTable;
use rcore_memory::PAGE_SIZE;
use volatile::Volatile;
use crate::arch::consts::PHYSICAL_MEMORY_OFFSET;
use crate::drivers::BlockDriver;
use crate::memory::active_table;
use crate::sync::SpinNoIrqLock as Mutex;
use super::super::bus::virtio_mmio::*;
use super::super::{DeviceType, Driver, BLK_DRIVERS, DRIVERS};
use crate::memory::phys_to_virt;
pub struct VirtIOBlk {
interrupt_parent: u32,
@ -106,8 +106,6 @@ impl Driver for VirtIOBlkDriver {
fn try_handle_interrupt(&self, _irq: Option<u32>) -> bool {
let driver = self.0.lock();
// ensure header page is mapped
active_table().map_if_not_exists(driver.header as usize, driver.header as usize);
let header = unsafe { &mut *(driver.header as *mut VirtIOHeader) };
let interrupt = header.interrupt_status.read();
if interrupt != 0 {
@ -127,9 +125,6 @@ impl Driver for VirtIOBlkDriver {
fn read_block(&self, block_id: usize, buf: &mut [u8]) -> bool {
let mut driver = self.0.lock();
// ensure header page is mapped
active_table().map_if_not_exists(driver.header as usize, driver.header as usize);
let mut req = VirtIOBlkReadReq::default();
req.req_type = VIRTIO_BLK_T_IN;
req.reserved = 0;
@ -155,9 +150,6 @@ impl Driver for VirtIOBlkDriver {
fn write_block(&self, block_id: usize, buf: &[u8]) -> bool {
let mut driver = self.0.lock();
// ensure header page is mapped
active_table().map_if_not_exists(driver.header as usize, driver.header as usize);
let mut req: VirtIOBlkWriteReq = unsafe { zeroed() };
req.req_type = VIRTIO_BLK_T_OUT;
req.reserved = 0;
@ -184,8 +176,9 @@ impl Driver for VirtIOBlkDriver {
pub fn virtio_blk_init(node: &Node) {
let reg = node.prop_raw("reg").unwrap();
let from = reg.as_slice().read_be_u64(0).unwrap();
let header = unsafe { &mut *(from as *mut VirtIOHeader) };
let paddr = reg.as_slice().read_be_u64(0).unwrap();
let vaddr = phys_to_virt(paddr as usize);
let header = unsafe { &mut *(vaddr as *mut VirtIOHeader) };
header.status.write(VirtIODeviceStatus::DRIVER.bits());
@ -199,7 +192,7 @@ pub fn virtio_blk_init(node: &Node) {
header.write_driver_features(driver_features);
// read configuration space
let config = unsafe { &mut *((from + VIRTIO_CONFIG_SPACE_OFFSET) as *mut VirtIOBlkConfig) };
let config = unsafe { &mut *((vaddr + VIRTIO_CONFIG_SPACE_OFFSET) as *mut VirtIOBlkConfig) };
info!("Config: {:?}", config);
info!(
"Found a block device of size {}KB",
@ -213,7 +206,7 @@ pub fn virtio_blk_init(node: &Node) {
let driver = VirtIOBlkDriver(Mutex::new(VirtIOBlk {
interrupt: node.prop_u32("interrupts").unwrap(),
interrupt_parent: node.prop_u32("interrupt-parent").unwrap(),
header: from as usize,
header: vaddr as usize,
queue: VirtIOVirtqueue::new(header, 0, 16),
capacity: config.capacity.read() as usize,
}));

@ -1,12 +1,11 @@
use crate::consts::KERNEL_OFFSET;
use crate::drivers::block::*;
use crate::drivers::net::*;
use crate::drivers::{Driver, DRIVERS, NET_DRIVERS};
use crate::memory::active_table;
use crate::memory::phys_to_virt;
use alloc::collections::BTreeMap;
use alloc::sync::Arc;
use pci::*;
use rcore_memory::{paging::PageTable, PAGE_SIZE};
use rcore_memory::PAGE_SIZE;
use spin::Mutex;
const PCI_COMMAND: u16 = 0x04;
@ -141,12 +140,7 @@ pub fn init_driver(dev: &PCIDevice) {
// 82574L Gigabit Network Connection
if let Some(BAR::Memory(addr, len, _, _)) = dev.bars[0] {
let irq = unsafe { enable(dev.loc) };
let vaddr = KERNEL_OFFSET + addr as usize;
let mut current_addr = addr as usize;
while current_addr < addr as usize + len as usize {
active_table().map_if_not_exists(KERNEL_OFFSET + current_addr, current_addr);
current_addr = current_addr + PAGE_SIZE;
}
let vaddr = phys_to_virt(addr as usize);
let index = NET_DRIVERS.read().len();
e1000::init(name, irq, vaddr, len as usize, index);
}
@ -155,12 +149,7 @@ pub fn init_driver(dev: &PCIDevice) {
// 82599ES 10-Gigabit SFI/SFP+ Network Connection
if let Some(BAR::Memory(addr, len, _, _)) = dev.bars[0] {
let irq = unsafe { enable(dev.loc) };
let vaddr = KERNEL_OFFSET + addr as usize;
let mut current_addr = addr as usize;
while current_addr < addr as usize + len as usize {
active_table().map_if_not_exists(KERNEL_OFFSET + current_addr, current_addr);
current_addr = current_addr + PAGE_SIZE;
}
let vaddr = phys_to_virt(addr as usize);
let index = NET_DRIVERS.read().len();
PCI_DRIVERS.lock().insert(
dev.loc,
@ -173,8 +162,7 @@ pub fn init_driver(dev: &PCIDevice) {
if let Some(BAR::Memory(addr, len, _, _)) = dev.bars[5] {
let irq = unsafe { enable(dev.loc) };
assert!(len as usize <= PAGE_SIZE);
let vaddr = KERNEL_OFFSET + addr as usize;
active_table().map(vaddr, addr as usize);
let vaddr = phys_to_virt(addr as usize);
PCI_DRIVERS
.lock()
.insert(dev.loc, ahci::init(irq, vaddr, len as usize));

@ -8,18 +8,16 @@ use bitflags::*;
use device_tree::util::SliceRead;
use device_tree::Node;
use log::*;
use rcore_memory::paging::PageTable;
use rcore_memory::PAGE_SIZE;
use volatile::{ReadOnly, Volatile, WriteOnly};
use crate::arch::consts::{KERNEL_OFFSET, MEMORY_OFFSET};
use crate::memory::active_table;
use crate::HEAP_ALLOCATOR;
use super::super::block::virtio_blk;
use super::super::gpu::virtio_gpu;
use super::super::input::virtio_input;
use super::super::net::virtio_net;
use crate::memory::{phys_to_virt, virt_to_phys};
// virtio 4.2.4 Legacy interface
#[repr(C)]
@ -85,10 +83,10 @@ impl VirtIOVirtqueue {
assert_eq!(header.queue_pfn.read(), 0); // not in use
let queue_num_max = header.queue_num_max.read();
assert!(queue_num_max >= queue_num as u32); // queue available
assert!(queue_num & (queue_num - 1) == 0); // power of two
assert_eq!(queue_num & (queue_num - 1), 0); // power of two
let align = PAGE_SIZE;
let size = virtqueue_size(queue_num, align);
assert!(size % align == 0);
assert_eq!(size % align, 0);
// alloc continuous pages
let address =
unsafe { HEAP_ALLOCATOR.alloc_zeroed(Layout::from_size_align(size, align).unwrap()) }
@ -96,9 +94,7 @@ impl VirtIOVirtqueue {
header.queue_num.write(queue_num as u32);
header.queue_align.write(align as u32);
header
.queue_pfn
.write(((address - KERNEL_OFFSET + MEMORY_OFFSET) as u32) >> 12);
header.queue_pfn.write((virt_to_phys(address) as u32) >> 12);
// link desc together
let desc =
@ -146,7 +142,7 @@ impl VirtIOVirtqueue {
desc[cur].flags.write(VirtIOVirtqueueFlag::NEXT.bits());
desc[cur]
.addr
.write(output[i].as_ptr() as u64 - KERNEL_OFFSET as u64 + MEMORY_OFFSET as u64);
.write(virt_to_phys(output[i].as_ptr() as usize) as u64);
desc[cur].len.write(output[i].len() as u32);
prev = cur;
cur = desc[cur].next.read() as usize;
@ -157,7 +153,7 @@ impl VirtIOVirtqueue {
.write((VirtIOVirtqueueFlag::NEXT | VirtIOVirtqueueFlag::WRITE).bits());
desc[cur]
.addr
.write(input[i].as_ptr() as u64 - KERNEL_OFFSET as u64 + MEMORY_OFFSET as u64);
.write(virt_to_phys(input[i].as_ptr() as usize) as u64);
desc[cur].len.write(input[i].len() as u32);
prev = cur;
cur = desc[cur].next.read() as usize;
@ -222,7 +218,7 @@ impl VirtIOVirtqueue {
let mut output = Vec::new();
loop {
let flags = VirtIOVirtqueueFlag::from_bits_truncate(desc[cur].flags.read());
let addr = desc[cur].addr.read() as u64 - MEMORY_OFFSET as u64 + KERNEL_OFFSET as u64;
let addr = phys_to_virt(desc[cur].addr.read() as usize);
let buffer =
unsafe { slice::from_raw_parts(addr as *const u8, desc[cur].len.read() as usize) };
if flags.contains(VirtIOVirtqueueFlag::WRITE) {
@ -265,7 +261,7 @@ impl VirtIOVirtqueue {
}
}
pub const VIRTIO_CONFIG_SPACE_OFFSET: u64 = 0x100;
pub const VIRTIO_CONFIG_SPACE_OFFSET: usize = 0x100;
impl VirtIOHeader {
pub fn read_device_features(&mut self) -> u64 {
@ -354,12 +350,13 @@ pub fn virtqueue_used_elem_offset(num: usize, align: usize) -> usize {
pub fn virtio_probe(node: &Node) {
if let Some(reg) = node.prop_raw("reg") {
let from = reg.as_slice().read_be_u64(0).unwrap();
let paddr = reg.as_slice().read_be_u64(0).unwrap();
let vaddr = phys_to_virt(paddr as usize);
debug!("walk dt {:x} {:x}", paddr, vaddr);
let size = reg.as_slice().read_be_u64(8).unwrap();
// assuming one page
assert_eq!(size as usize, PAGE_SIZE);
active_table().map(from as usize, from as usize);
let header = unsafe { &mut *(from as *mut VirtIOHeader) };
let header = unsafe { &mut *(vaddr as *mut VirtIOHeader) };
let magic = header.magic.read();
let version = header.version.read();
let device_id = header.device_id.read();
@ -374,23 +371,13 @@ pub fn virtio_probe(node: &Node) {
// virtio 3.1.1 Device Initialization
header.status.write(0);
header.status.write(VirtIODeviceStatus::ACKNOWLEDGE.bits());
if device_id == 1 {
// net device
virtio_net::virtio_net_init(node);
} else if device_id == 2 {
// blk device
virtio_blk::virtio_blk_init(node);
} else if device_id == 16 {
// gpu device
virtio_gpu::virtio_gpu_init(node);
} else if device_id == 18 {
// input device
virtio_input::virtio_input_init(node);
} else {
println!("Unrecognized virtio device {}", device_id);
match device_id {
1 => virtio_net::virtio_net_init(node),
2 => virtio_blk::virtio_blk_init(node),
16 => virtio_gpu::virtio_gpu_init(node),
18 => virtio_input::virtio_input_init(node),
_ => warn!("Unrecognized virtio device {}", device_id),
}
} else {
active_table().unmap(from as usize);
}
}
}

@ -7,19 +7,18 @@ use bitflags::*;
use device_tree::util::SliceRead;
use device_tree::Node;
use log::*;
use rcore_memory::paging::PageTable;
use rcore_memory::PAGE_SIZE;
use volatile::{ReadOnly, Volatile, WriteOnly};
use crate::arch::consts::{KERNEL_OFFSET, MEMORY_OFFSET};
use crate::arch::cpu;
use crate::memory::active_table;
use crate::memory::virt_to_phys;
use crate::sync::SpinNoIrqLock as Mutex;
use crate::HEAP_ALLOCATOR;
use super::super::bus::virtio_mmio::*;
use super::super::{DeviceType, Driver, DRIVERS};
use super::test::mandelbrot;
use crate::memory::phys_to_virt;
const VIRTIO_GPU_EVENT_DISPLAY: u32 = 1 << 0;
@ -198,11 +197,6 @@ impl Driver for VirtIOGpuDriver {
let mut driver = self.0.lock();
// ensure header page is mapped
// TODO: this should be mapped in all page table by default
let header_addr = &mut driver.header as *mut _ as usize;
active_table().map_if_not_exists(header_addr, header_addr);
let interrupt = driver.header.interrupt_status.read();
if interrupt != 0 {
driver.header.interrupt_ack.write(interrupt);
@ -285,7 +279,7 @@ fn setup_framebuffer(driver: &mut VirtIOGpu) {
header: VirtIOGpuCtrlHdr::with_type(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING),
resource_id: VIRTIO_GPU_RESOURCE_ID,
nr_entries: 1,
addr: (frame_buffer - KERNEL_OFFSET + MEMORY_OFFSET) as u64,
addr: virt_to_phys(frame_buffer) as u64,
length: size,
padding: 0,
};
@ -350,8 +344,9 @@ fn flush_frame_buffer_to_screen(driver: &mut VirtIOGpu) {
pub fn virtio_gpu_init(node: &Node) {
let reg = node.prop_raw("reg").unwrap();
let from = reg.as_slice().read_be_u64(0).unwrap();
let header = unsafe { &mut *(from as *mut VirtIOHeader) };
let paddr = reg.as_slice().read_be_u64(0).unwrap();
let vaddr = phys_to_virt(paddr as usize);
let header = unsafe { &mut *(vaddr as *mut VirtIOHeader) };
header.status.write(VirtIODeviceStatus::DRIVER.bits());
@ -365,7 +360,7 @@ pub fn virtio_gpu_init(node: &Node) {
header.write_driver_features(driver_features);
// read configuration space
let config = unsafe { &mut *((from + VIRTIO_CONFIG_SPACE_OFFSET) as *mut VirtIOGpuConfig) };
let config = unsafe { &mut *((vaddr + VIRTIO_CONFIG_SPACE_OFFSET) as *mut VirtIOGpuConfig) };
info!("Config: {:?}", config);
// virtio 4.2.4 Legacy interface

@ -16,11 +16,11 @@ use rcore_memory::PAGE_SIZE;
use volatile::Volatile;
use crate::arch::cpu;
use crate::memory::active_table;
use crate::sync::SpinNoIrqLock as Mutex;
use super::super::bus::virtio_mmio::*;
use super::super::{DeviceType, Driver, DRIVERS};
use crate::memory::phys_to_virt;
struct VirtIOInput {
interrupt_parent: u32,
@ -125,11 +125,6 @@ impl VirtIOInput {
return false;
}
// ensure header page is mapped
// TODO: this should be mapped in all page table by default
let header_addr = self.header as *mut _ as usize;
active_table().map_if_not_exists(header_addr, header_addr);
let interrupt = self.header.interrupt_status.read();
if interrupt != 0 {
self.header.interrupt_ack.write(interrupt);
@ -173,8 +168,9 @@ impl Driver for VirtIOInputDriver {
pub fn virtio_input_init(node: &Node) {
let reg = node.prop_raw("reg").unwrap();
let from = reg.as_slice().read_be_u64(0).unwrap();
let header = unsafe { &mut *(from as *mut VirtIOHeader) };
let paddr = reg.as_slice().read_be_u64(0).unwrap();
let vaddr = phys_to_virt(paddr as usize);
let header = unsafe { &mut *(vaddr as *mut VirtIOHeader) };
header.status.write(VirtIODeviceStatus::DRIVER.bits());
@ -188,7 +184,7 @@ pub fn virtio_input_init(node: &Node) {
header.write_driver_features(driver_features);
// read configuration space
let config = unsafe { &mut *((from + VIRTIO_CONFIG_SPACE_OFFSET) as *mut VirtIOInputConfig) };
let config = unsafe { &mut *((vaddr + VIRTIO_CONFIG_SPACE_OFFSET) as *mut VirtIOInputConfig) };
info!("Config: {:?}", config);
// virtio 4.2.4 Legacy interface

@ -18,19 +18,19 @@ use crate::net::SOCKETS;
use crate::sync::SpinNoIrqLock as Mutex;
use super::super::{DeviceType, Driver, DRIVERS, NET_DRIVERS, SOCKET_ACTIVITY};
use crate::consts::{KERNEL_OFFSET, MEMORY_OFFSET};
const AXI_STREAM_FIFO_ISR: *mut u32 = (KERNEL_OFFSET + 0x1820_0000) as *mut u32;
const AXI_STREAM_FIFO_IER: *mut u32 = (KERNEL_OFFSET + 0x1820_0004) as *mut u32;
const AXI_STREAM_FIFO_TDFR: *mut u32 = (KERNEL_OFFSET + 0x1820_0008) as *mut u32;
const AXI_STREAM_FIFO_TDFD: *mut u32 = (KERNEL_OFFSET + 0x1820_0010) as *mut u32;
const AXI_STREAM_FIFO_TLR: *mut u32 = (KERNEL_OFFSET + 0x1820_0014) as *mut u32;
const AXI_STREAM_FIFO_RDFR: *mut u32 = (KERNEL_OFFSET + 0x1820_0018) as *mut u32;
const AXI_STREAM_FIFO_RDFO: *mut u32 = (KERNEL_OFFSET + 0x1820_001C) as *mut u32;
const AXI_STREAM_FIFO_RDFD: *mut u32 = (KERNEL_OFFSET + 0x1820_0020) as *mut u32;
const AXI_STREAM_FIFO_RLR: *mut u32 = (KERNEL_OFFSET + 0x1820_0024) as *mut u32;
const AXI_STREAM_FIFO_TDR: *mut u32 = (KERNEL_OFFSET + 0x1820_002C) as *mut u32;
const AXI_STREAM_FIFO_RDR: *mut u32 = (KERNEL_OFFSET + 0x1820_0030) as *mut u32;
use crate::memory::phys_to_virt;
const AXI_STREAM_FIFO_ISR: *mut u32 = phys_to_virt(0x1820_0000) as *mut u32;
const AXI_STREAM_FIFO_IER: *mut u32 = phys_to_virt(0x1820_0004) as *mut u32;
const AXI_STREAM_FIFO_TDFR: *mut u32 = phys_to_virt(0x1820_0008) as *mut u32;
const AXI_STREAM_FIFO_TDFD: *mut u32 = phys_to_virt(0x1820_0010) as *mut u32;
const AXI_STREAM_FIFO_TLR: *mut u32 = phys_to_virt(0x1820_0014) as *mut u32;
const AXI_STREAM_FIFO_RDFR: *mut u32 = phys_to_virt(0x1820_0018) as *mut u32;
const AXI_STREAM_FIFO_RDFO: *mut u32 = phys_to_virt(0x1820_001C) as *mut u32;
const AXI_STREAM_FIFO_RDFD: *mut u32 = phys_to_virt(0x1820_0020) as *mut u32;
const AXI_STREAM_FIFO_RLR: *mut u32 = phys_to_virt(0x1820_0024) as *mut u32;
const AXI_STREAM_FIFO_TDR: *mut u32 = phys_to_virt(0x1820_002C) as *mut u32;
const AXI_STREAM_FIFO_RDR: *mut u32 = phys_to_virt(0x1820_0030) as *mut u32;
pub struct Router {
buffer: Vec<Vec<u8>>,
@ -219,7 +219,6 @@ pub fn router_init() -> Arc<RouterInterface> {
DRIVERS.write().push(driver.clone());
NET_DRIVERS.write().push(driver.clone());
const AXI_STREAM_FIFO_IER: *mut u32 = (KERNEL_OFFSET + 0x1820_0004) as *mut u32;
// Enable Receive Complete Interrupt
unsafe {
AXI_STREAM_FIFO_IER.write_volatile(1 << 26);

@ -9,7 +9,6 @@ use bitflags::*;
use device_tree::util::SliceRead;
use device_tree::Node;
use log::*;
use rcore_memory::paging::PageTable;
use rcore_memory::PAGE_SIZE;
use smoltcp::phy::{self, DeviceCapabilities};
use smoltcp::time::Instant;
@ -17,12 +16,12 @@ use smoltcp::wire::{EthernetAddress, Ipv4Address};
use smoltcp::Result;
use volatile::{ReadOnly, Volatile};
use crate::memory::active_table;
use crate::sync::SpinNoIrqLock as Mutex;
use crate::HEAP_ALLOCATOR;
use super::super::bus::virtio_mmio::*;
use super::super::{DeviceType, Driver, DRIVERS, NET_DRIVERS};
use crate::memory::phys_to_virt;
pub struct VirtIONet {
interrupt_parent: u32,
@ -43,9 +42,6 @@ impl Driver for VirtIONetDriver {
fn try_handle_interrupt(&self, _irq: Option<u32>) -> bool {
let driver = self.0.lock();
// ensure header page is mapped
active_table().map_if_not_exists(driver.header as usize, driver.header as usize);
let header = unsafe { &mut *(driver.header as *mut VirtIOHeader) };
let interrupt = header.interrupt_status.read();
if interrupt != 0 {
@ -138,10 +134,6 @@ impl phy::RxToken for VirtIONetRxToken {
{
let (input, output, _, user_data) = {
let mut driver = (self.0).0.lock();
// ensure header page is mapped
active_table().map_if_not_exists(driver.header as usize, driver.header as usize);
driver.queues[VIRTIO_QUEUE_RECEIVE].get().unwrap()
};
let result = f(&input[0][size_of::<VirtIONetHeader>()..]);
@ -159,10 +151,6 @@ impl phy::TxToken for VirtIONetTxToken {
{
let output = {
let mut driver = (self.0).0.lock();
// ensure header page is mapped
active_table().map_if_not_exists(driver.header as usize, driver.header as usize);
if let Some((_, output, _, _)) = driver.queues[VIRTIO_QUEUE_TRANSMIT].get() {
unsafe { slice::from_raw_parts_mut(output[0].as_ptr() as *mut u8, output[0].len()) }
} else {
@ -252,8 +240,9 @@ struct VirtIONetHeader {
pub fn virtio_net_init(node: &Node) {
let reg = node.prop_raw("reg").unwrap();
let from = reg.as_slice().read_be_u64(0).unwrap();
let header = unsafe { &mut *(from as *mut VirtIOHeader) };
let paddr = reg.as_slice().read_be_u64(0).unwrap();
let vaddr = phys_to_virt(paddr as usize);
let header = unsafe { &mut *(vaddr as *mut VirtIOHeader) };
header.status.write(VirtIODeviceStatus::DRIVER.bits());
@ -267,7 +256,8 @@ pub fn virtio_net_init(node: &Node) {
header.write_driver_features(driver_features);
// read configuration space
let config = unsafe { &mut *((from + VIRTIO_CONFIG_SPACE_OFFSET) as *mut VirtIONetworkConfig) };
let config =
unsafe { &mut *((vaddr + VIRTIO_CONFIG_SPACE_OFFSET) as *mut VirtIONetworkConfig) };
let mac = config.mac;
let status = VirtIONetworkStatus::from_bits_truncate(config.status.read());
debug!("Got MAC address {:?} and status {:?}", mac, status);
@ -280,7 +270,7 @@ pub fn virtio_net_init(node: &Node) {
let mut driver = VirtIONet {
interrupt: node.prop_u32("interrupts").unwrap(),
interrupt_parent: node.prop_u32("interrupt-parent").unwrap(),
header: from as usize,
header: vaddr as usize,
mac: EthernetAddress(mac),
queues: [
VirtIOVirtqueue::new(header, VIRTIO_QUEUE_RECEIVE, queue_num),

@ -1,11 +1,10 @@
use alloc::alloc::{alloc_zeroed, dealloc, Layout};
pub use crate::arch::paging::PageTableImpl;
use isomorphic_drivers::provider;
use rcore_memory::paging::PageTable;
use rcore_memory::PAGE_SIZE;
use crate::memory::active_table;
pub struct Provider;
impl provider::Provider for Provider {
@ -14,7 +13,9 @@ impl provider::Provider for Provider {
fn alloc_dma(size: usize) -> (usize, usize) {
let layout = Layout::from_size_align(size, PAGE_SIZE).unwrap();
let vaddr = unsafe { alloc_zeroed(layout) } as usize;
let paddr = active_table().get_entry(vaddr).unwrap().target();
let mut page_table = unsafe { PageTableImpl::active() };
let paddr = page_table.get_entry(vaddr).unwrap().target();
core::mem::forget(page_table);
(vaddr, paddr)
}

@ -20,7 +20,7 @@ mod pipe;
mod pseudo;
mod stdio;
/// Hard link user programs
// Hard link user programs
#[cfg(feature = "link_user")]
global_asm!(concat!(
r#"

@ -14,9 +14,9 @@
use super::HEAP_ALLOCATOR;
pub use crate::arch::paging::*;
use crate::consts::{KERNEL_OFFSET, MEMORY_OFFSET};
use crate::consts::{KERNEL_OFFSET, MEMORY_OFFSET, PHYSICAL_MEMORY_OFFSET};
use crate::process::current_thread;
use crate::sync::{SpinNoIrqLock, MutexGuard, SpinNoIrq};
use crate::sync::{MutexGuard, SpinNoIrq, SpinNoIrqLock};
use alloc::boxed::Box;
use bitmap_allocator::BitAlloc;
use buddy_system_allocator::Heap;
@ -27,7 +27,7 @@ pub use rcore_memory::memory_set::{handler::*, MemoryArea, MemoryAttr};
use rcore_memory::paging::PageTable;
use rcore_memory::*;
pub type MemorySet = rcore_memory::memory_set::MemorySet<InactivePageTable0>;
pub type MemorySet = rcore_memory::memory_set::MemorySet<PageTableImpl>;
// x86_64 support up to 64G memory
#[cfg(target_arch = "x86_64")]
@ -52,19 +52,16 @@ pub type FrameAlloc = bitmap_allocator::BitAlloc4K;
lazy_static! {
pub static ref FRAME_ALLOCATOR: SpinNoIrqLock<FrameAlloc> =
SpinNoIrqLock::new(FrameAlloc::default());
pub static ref ACTIVE_TABLE: SpinNoIrqLock<ActivePageTable> =
SpinNoIrqLock::new(unsafe { ActivePageTable::new() });
}
/// The only way to get current active page table safely
///
/// NOTE:
/// Current implementation of recursive page table has a problem that
/// will cause race condition to the initial page table.
/// So we have to add a global mutex to avoid the racing.
/// This will be removed after replacing recursive mapping by linear mapping.
pub fn active_table() -> MutexGuard<'static, ActivePageTable, SpinNoIrq> {
ACTIVE_TABLE.lock()
/// Convert physical address to virtual address
pub const fn phys_to_virt(paddr: usize) -> usize {
PHYSICAL_MEMORY_OFFSET + paddr
}
/// Convert virtual address to physical address
pub const fn virt_to_phys(vaddr: usize) -> usize {
vaddr - PHYSICAL_MEMORY_OFFSET
}
#[derive(Debug, Clone, Copy)]
@ -148,7 +145,7 @@ pub fn init_heap() {
pub fn enlarge_heap(heap: &mut Heap) {
info!("Enlarging heap to avoid oom");
let mut page_table = active_table();
let mut page_table = unsafe { PageTableImpl::active() };
let mut addrs = [(0, 0); 32];
let mut addr_len = 0;
#[cfg(target_arch = "x86_64")]
@ -178,4 +175,5 @@ pub fn enlarge_heap(heap: &mut Heap) {
heap.init(*addr, *len);
}
}
core::mem::forget(page_table);
}

@ -20,9 +20,9 @@ use crate::memory::{
use crate::sync::{Condvar, SpinNoIrqLock as Mutex};
use super::abi::{self, ProcInitInfo};
use crate::processor;
use core::mem::uninitialized;
use rcore_fs::vfs::INode;
use crate::processor;
pub struct Thread {
context: Context,
@ -76,8 +76,8 @@ pub struct Process {
pub child_exit_code: BTreeMap<usize, usize>, // child process store its exit code here
}
/// Records the mapping between pid and Process struct.
lazy_static! {
/// Records the mapping between pid and Process struct.
pub static ref PROCESSES: RwLock<BTreeMap<usize, Weak<Mutex<Process>>>> =
RwLock::new(BTreeMap::new());
}

@ -79,7 +79,7 @@ impl Syscall<'_> {
}
}
/// should be initialized together
// should be initialized together
lazy_static! {
pub static ref EPOCH_BASE: u64 = crate::arch::timer::read_epoch();
pub static ref TICK_BASE: u64 = unsafe { crate::trap::TICK as u64 };

Loading…
Cancel
Save