[WIP] update rv32 linear mapping

master
Jiajie Chen 6 years ago
parent d66df43ec0
commit cf6a7746f4

@ -65,8 +65,3 @@ boot_page_table_sv32:
.word (0x80c00 << 10) | 0xcf # VRWXAD
.zero 4 * 252
.zero 4 * 256
.align 12 # page align
.global _root_page_table_ptr
_root_page_table_ptr:
.space 4 # 4bytes

@ -1,28 +1,11 @@
// Physical address available on THINPAD:
// [0x80000000, 0x80800000]
#[cfg(target_arch = "riscv64")]
pub const RECURSIVE_INDEX: usize = 0o774;
// Under riscv64, upon booting, paging is enabled by bbl and
// root_table[0777] maps to p3_table,
// and p3_table[0777] maps to gigapage 8000_0000H,
// so 0xFFFF_FFFF_8000_0000 maps to 0x8000_0000
// root_table[0774] points to root_table itself as page table
// root_table[0775] points to root_table itself as leaf page
// root_table[0776] points to a temp page table as leaf page
// Linear mapping
pub const LINEAR_OFFSET: usize = 0x4000_0000;
pub const PHYSICAL_MEMORY_OFFSET: usize = 0x4000_0000;
#[cfg(target_arch = "riscv32")]
pub const KERNEL_OFFSET: usize = 0xC000_0000;
#[cfg(target_arch = "riscv64")]
pub const KERNEL_OFFSET: usize = 0xFFFF_FFFF_C000_0000;
#[cfg(target_arch = "riscv32")]
pub const KERNEL_P2_INDEX: usize = (KERNEL_OFFSET >> 12 >> 10) & 0x3ff;
#[cfg(target_arch = "riscv64")]
pub const KERNEL_P4_INDEX: usize = (KERNEL_OFFSET >> 12 >> 9 >> 9 >> 9) & 0o777;
#[cfg(feature = "board_k210")]
pub const KERNEL_HEAP_SIZE: usize = 0x0020_0000;
#[cfg(not(feature = "board_k210"))]

@ -1,12 +1,10 @@
use crate::consts::LINEAR_OFFSET;
use crate::consts::PHYSICAL_MEMORY_OFFSET;
#[cfg(target_arch = "riscv64")]
use crate::consts::RECURSIVE_INDEX;
// Depends on kernel
#[cfg(target_arch = "riscv32")]
use crate::consts::KERNEL_P2_INDEX;
#[cfg(target_arch = "riscv64")]
use crate::consts::KERNEL_P4_INDEX;
use crate::memory::{active_table, alloc_frame, dealloc_frame};
use crate::memory::{alloc_frame, dealloc_frame, phys_to_virt};
use log::*;
use rcore_memory::paging::*;
use riscv::addr::*;
@ -14,31 +12,28 @@ use riscv::asm::{sfence_vma, sfence_vma_all};
use riscv::paging::{FrameAllocator, FrameDeallocator};
use riscv::paging::{
Mapper, PageTable as RvPageTable, PageTableEntry, PageTableFlags as EF, PageTableType,
RecursivePageTable, TwoLevelPageTable
RecursivePageTable, TwoLevelPageTable,
};
use riscv::register::satp;
#[cfg(target_arch = "riscv32")]
pub struct ActivePageTable(usize, PageEntry);
#[cfg(target_arch = "riscv64")]
pub struct ActivePageTable(RecursivePageTable<'static>, PageEntry);
pub struct PageTableImpl {
page_table: TwoLevelPageTable<'static>,
root_frame: Frame,
entry: PageEntry,
}
/// PageTableEntry: the contents of this entry.
/// Page: this entry is the pte of page `Page`.
pub struct PageEntry(&'static mut PageTableEntry, Page);
impl PageTable for ActivePageTable {
impl PageTable for PageTableImpl {
fn map(&mut self, addr: usize, target: usize) -> &mut Entry {
// use riscv::paging:Mapper::map_to,
// map the 4K `page` to the 4K `frame` with `flags`
let flags = EF::VALID | EF::READABLE | EF::WRITABLE;
let page = Page::of_addr(VirtAddr::new(addr));
let frame = Frame::of_addr(PhysAddr::new(target));
// map the page to the frame using FrameAllocatorForRiscv
// we may need frame allocator to alloc frame for new page table(first/second)
info!("map");
self.get_table()
self.page_table
.map_to(page, frame, flags, &mut FrameAllocatorForRiscv)
.unwrap()
.flush();
@ -47,89 +42,28 @@ impl PageTable for ActivePageTable {
fn unmap(&mut self, addr: usize) {
let page = Page::of_addr(VirtAddr::new(addr));
let (_, flush) = self.get_table().unmap(page).unwrap();
let (_, flush) = self.page_table.unmap(page).unwrap();
flush.flush();
}
fn get_entry(&mut self, vaddr: usize) -> Option<&mut Entry> {
let page = Page::of_addr(VirtAddr::new(vaddr));
if let Ok(e) = self.get_table().ref_entry(page.clone()) {
if let Ok(e) = self.page_table.ref_entry(page.clone()) {
let e = unsafe { &mut *(e as *mut PageTableEntry) };
self.1 = PageEntry(e, page);
Some(&mut self.1 as &mut Entry)
self.entry = PageEntry(e, page);
Some(&mut self.entry as &mut Entry)
} else {
None
}
}
}
extern "C" {
fn _root_page_table_ptr();
}
pub fn set_root_page_table_ptr(ptr: usize) {
unsafe {
sfence_vma_all();
*(_root_page_table_ptr as *mut usize) = ptr;
}
}
pub fn get_root_page_table_ptr() -> usize {
unsafe { *(_root_page_table_ptr as *mut usize) }
}
pub fn root_page_table_buffer() -> &'static mut RvPageTable {
unsafe { &mut *(_root_page_table_ptr as *mut RvPageTable) }
}
impl PageTableExt for ActivePageTable {}
static mut __page_table_with_mode: bool = false;
/// The virtual address of root page table
#[cfg(all(target_arch = "riscv64", feature = "sv39"))]
const ROOT_PAGE_TABLE: *mut RvPageTable = ((0xFFFF_0000_0000_0000)
| (0o777 << 12 << 9 << 9 << 9)
| (RECURSIVE_INDEX << 12 << 9 << 9)
| (RECURSIVE_INDEX << 12 << 9)
| ((RECURSIVE_INDEX + 1) << 12)) as *mut RvPageTable;
#[cfg(all(target_arch = "riscv64", not(feature = "sv39")))]
const ROOT_PAGE_TABLE: *mut RvPageTable = ((0xFFFF_0000_0000_0000)
| (RECURSIVE_INDEX << 12 << 9 << 9 << 9)
| (RECURSIVE_INDEX << 12 << 9 << 9)
| (RECURSIVE_INDEX << 12 << 9)
| ((RECURSIVE_INDEX + 1) << 12)) as *mut RvPageTable;
impl ActivePageTable {
#[cfg(target_arch = "riscv32")]
pub unsafe fn new() -> Self {
ActivePageTable(
get_root_page_table_ptr(),
::core::mem::uninitialized(),
)
}
#[cfg(target_arch = "riscv64")]
pub unsafe fn new() -> Self {
#[cfg(feature = "sv39")]
let type_ = PageTableType::Sv39;
#[cfg(not(feature = "sv39"))]
let type_ = PageTableType::Sv48;
ActivePageTable(
RecursivePageTable::new(&mut *ROOT_PAGE_TABLE, type_).unwrap(),
::core::mem::uninitialized(),
)
}
unsafe fn get_raw_table(&mut self) -> *mut RvPageTable {
if __page_table_with_mode {
get_root_page_table_ptr() as *mut RvPageTable
} else {
self.0 as *mut RvPageTable
}
}
fn get_table(&mut self) -> TwoLevelPageTable<'static> {
unsafe { TwoLevelPageTable::new(&mut *self.get_raw_table(), LINEAR_OFFSET) }
fn get_page_slice_mut<'a>(&mut self, addr: usize) -> &'a mut [u8] {
let frame = self
.page_table
.translate_page(Page::of_addr(VirtAddr::new(addr)))
.unwrap();
let vaddr = frame.start_address().as_usize() + PHYSICAL_MEMORY_OFFSET;
unsafe { core::slice::from_raw_parts_mut(vaddr as *mut u8, 0x1000) }
}
}
@ -210,66 +144,50 @@ impl Entry for PageEntry {
fn set_mmio(&mut self, _value: u8) {}
}
#[derive(Debug)]
pub struct InactivePageTable0 {
root_frame: Frame,
impl PageTableImpl {
/// Unsafely get the current active page table.
/// WARN: You MUST call `core::mem::forget` for it after use!
pub unsafe fn active() -> Self {
let frame = Frame::of_ppn(PageTableImpl::active_token() & 0x7fffffff);
let table = frame.as_kernel_mut(PHYSICAL_MEMORY_OFFSET);
PageTableImpl {
page_table: TwoLevelPageTable::new(table, PHYSICAL_MEMORY_OFFSET),
root_frame: frame,
entry: unsafe { core::mem::uninitialized() },
}
}
}
impl InactivePageTable for InactivePageTable0 {
type Active = ActivePageTable;
impl PageTableExt for PageTableImpl {
fn new_bare() -> Self {
let target = alloc_frame().expect("failed to allocate frame");
let frame = Frame::of_addr(PhysAddr::new(target));
#[cfg(arch = "riscv32")]
unsafe {
let table = unsafe { &mut *(target as *mut RvPageTable) };
let table = unsafe { &mut *(phys_to_virt(target) as *mut RvPageTable) };
table.zero();
PageTableImpl {
page_table: TwoLevelPageTable::new(table, PHYSICAL_MEMORY_OFFSET),
root_frame: frame,
entry: unsafe { core::mem::uninitialized() },
}
#[cfg(arch = "riscv64")]
active_table().with_temporary_map(target, |_, table: &mut RvPageTable| {
table.zero();
table.set_recursive(RECURSIVE_INDEX, frame.clone());
});
InactivePageTable0 { root_frame: frame }
}
#[cfg(target_arch = "riscv32")]
fn map_kernel(&mut self) {
info!("mapping kernel linear mapping");
let table: &mut RvPageTable = unsafe { self.root_frame.as_kernel_mut(LINEAR_OFFSET)};
let table = unsafe {
&mut *(phys_to_virt(self.root_frame.start_address().as_usize()) as *mut RvPageTable)
};
for i in 256..1024 {
let flags = EF::VALID | EF::READABLE | EF::WRITABLE | EF::EXECUTABLE;
let frame = Frame::of_addr(PhysAddr::new((i - 256) << 22));
let flags =
EF::VALID | EF::READABLE | EF::WRITABLE | EF::EXECUTABLE | EF::ACCESSED | EF::DIRTY;
let frame = Frame::of_addr(PhysAddr::new((i << 22) - PHYSICAL_MEMORY_OFFSET));
table[i].set(frame, flags);
}
}
#[cfg(target_arch = "riscv64")]
fn map_kernel(&mut self) {
let table = unsafe { &mut *ROOT_PAGE_TABLE };
let e1 = table[KERNEL_P4_INDEX];
assert!(!e1.is_unused());
self.edit(|_| {
table[KERNEL_P4_INDEX] = e1;
});
}
#[cfg(target_arch = "riscv32")]
fn token(&self) -> usize {
self.root_frame.number() | (1 << 31) // as satp
}
#[cfg(target_arch = "riscv64")]
fn token(&self) -> usize {
use bit_field::BitField;
let mut satp = self.root_frame.number();
satp.set_bits(44..60, 0); // AS is 0
#[cfg(feature = "sv39")]
satp.set_bits(60..64, satp::Mode::Sv39 as usize);
#[cfg(not(feature = "sv39"))]
satp.set_bits(60..64, satp::Mode::Sv48 as usize);
satp
self.root_frame.number() | (1 << 31)
}
unsafe fn set_token(token: usize) {
@ -277,61 +195,22 @@ impl InactivePageTable for InactivePageTable0 {
}
fn active_token() -> usize {
satp::read().bits()
}
fn flush_tlb() {
let mut token: usize = 0;
unsafe {
sfence_vma_all();
}
asm!("csrr $0, satp" : "=r"(token) ::: "volatile");
}
fn edit<T>(&mut self, f: impl FnOnce(&mut Self::Active) -> T) -> T {
#[cfg(target_arch = "riscv32")]
{
debug!(
"edit table {:x?} -> {:x?}",
Self::active_token(),
self.token()
);
let mut active = unsafe { ActivePageTable(self.token(), ::core::mem::uninitialized()) };
let ret = f(&mut active);
debug!("finish table");
Self::flush_tlb();
ret
token
}
#[cfg(target_arch = "riscv64")]
{
let target = satp::read().frame().start_address().as_usize();
active_table().with_temporary_map(target, |active_table, root_table: &mut RvPageTable| {
let backup = root_table[RECURSIVE_INDEX].clone();
// overwrite recursive mapping
root_table[RECURSIVE_INDEX].set(self.root_frame.clone(), EF::VALID);
unsafe {
sfence_vma_all();
}
// execute f in the new context
let ret = f(active_table);
// restore recursive mapping to original p2 table
root_table[RECURSIVE_INDEX] = backup;
fn flush_tlb() {
debug!("flushing token {:x}", Self::active_token());
unsafe {
sfence_vma_all();
}
ret
})
}
}
}
impl Drop for InactivePageTable0 {
impl Drop for PageTableImpl {
fn drop(&mut self) {
dealloc_frame(self.root_frame.start_address().as_usize());
}
@ -350,14 +229,3 @@ impl FrameDeallocator for FrameAllocatorForRiscv {
dealloc_frame(frame.start_address().as_usize());
}
}
#[cfg(target_arch = "riscv64")]
pub unsafe fn setup_recursive_mapping() {
let frame = satp::read().frame();
let root_page_table = unsafe { &mut *(frame.start_address().as_usize() as *mut RvPageTable) };
root_page_table.set_recursive(RECURSIVE_INDEX, frame);
unsafe {
sfence_vma_all();
}
info!("setup recursive mapping end");
}

@ -11,9 +11,9 @@ use log::*;
use rcore_memory::PAGE_SIZE;
use volatile::Volatile;
use crate::arch::consts::PHYSICAL_MEMORY_OFFSET;
use crate::drivers::BlockDriver;
use crate::sync::SpinNoIrqLock as Mutex;
use crate::arch::consts::LINEAR_OFFSET;
use super::super::bus::virtio_mmio::*;
use super::super::{DeviceType, Driver, BLK_DRIVERS, DRIVERS};
@ -125,10 +125,6 @@ impl Driver for VirtIOBlkDriver {
fn read_block(&self, block_id: usize, buf: &mut [u8]) -> bool {
let mut driver = self.0.lock();
<<<<<<< HEAD
=======
>>>>>>> [WIP] rv32 linear mapping
let mut req = VirtIOBlkReadReq::default();
req.req_type = VIRTIO_BLK_T_IN;
req.reserved = 0;
@ -154,10 +150,6 @@ impl Driver for VirtIOBlkDriver {
fn write_block(&self, block_id: usize, buf: &[u8]) -> bool {
let mut driver = self.0.lock();
<<<<<<< HEAD
=======
>>>>>>> [WIP] rv32 linear mapping
let mut req: VirtIOBlkWriteReq = unsafe { zeroed() };
req.req_type = VIRTIO_BLK_T_OUT;
req.reserved = 0;

@ -11,11 +11,7 @@ use log::*;
use rcore_memory::PAGE_SIZE;
use volatile::{ReadOnly, Volatile, WriteOnly};
<<<<<<< HEAD
use crate::arch::consts::{KERNEL_OFFSET, MEMORY_OFFSET};
=======
use crate::arch::consts::{KERNEL_OFFSET, MEMORY_OFFSET, LINEAR_OFFSET};
>>>>>>> [WIP] rv32 linear mapping
use crate::HEAP_ALLOCATOR;
use super::super::block::virtio_blk;
@ -359,6 +355,7 @@ pub fn virtio_probe(node: &Node) {
if let Some(reg) = node.prop_raw("reg") {
let paddr = reg.as_slice().read_be_u64(0).unwrap();
let vaddr = phys_to_virt(paddr as usize);
debug!("walk dt {:x} {:x}", paddr, vaddr);
let size = reg.as_slice().read_be_u64(8).unwrap();
// assuming one page
assert_eq!(size as usize, PAGE_SIZE);

@ -134,10 +134,6 @@ impl phy::RxToken for VirtIONetRxToken {
{
let (input, output, _, user_data) = {
let mut driver = (self.0).0.lock();
<<<<<<< HEAD
=======
>>>>>>> [WIP] rv32 linear mapping
driver.queues[VIRTIO_QUEUE_RECEIVE].get().unwrap()
};
let result = f(&input[0][size_of::<VirtIONetHeader>()..]);
@ -155,10 +151,6 @@ impl phy::TxToken for VirtIONetTxToken {
{
let output = {
let mut driver = (self.0).0.lock();
<<<<<<< HEAD
=======
>>>>>>> [WIP] rv32 linear mapping
if let Some((_, output, _, _)) = driver.queues[VIRTIO_QUEUE_TRANSMIT].get() {
unsafe { slice::from_raw_parts_mut(output[0].as_ptr() as *mut u8, output[0].len()) }
} else {
@ -264,7 +256,8 @@ pub fn virtio_net_init(node: &Node) {
header.write_driver_features(driver_features);
// read configuration space
let config = unsafe { &mut *((vaddr + VIRTIO_CONFIG_SPACE_OFFSET) as *mut VirtIONetworkConfig) };
let config =
unsafe { &mut *((vaddr + VIRTIO_CONFIG_SPACE_OFFSET) as *mut VirtIONetworkConfig) };
let mac = config.mac;
let status = VirtIONetworkStatus::from_bits_truncate(config.status.read());
debug!("Got MAC address {:?} and status {:?}", mac, status);

Loading…
Cancel
Save