[WIP] rv32 linear mapping

master
Jiajie Chen 6 years ago
parent c8262f936f
commit d66df43ec0

2
kernel/Cargo.lock generated

@ -450,7 +450,7 @@ dependencies = [
[[package]]
name = "riscv"
version = "0.5.0"
source = "git+https://github.com/rcore-os/riscv#e8be9f93513225596709a2dccd9064324591fc3c"
source = "git+https://github.com/rcore-os/riscv#58b3c27b455bed03547cb6112a2f1479e4f4f5ee"
dependencies = [
"bare-metal 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)",
"bit_field 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",

@ -12,11 +12,27 @@ _start:
lui sp, %hi(bootstack)
add sp, sp, t0
# 2. enable paging
# 2. paging
# satp = (1 << 31) | PPN(boot_page_table_sv32)
lui t0, %hi(boot_page_table_sv32)
li t1, 0xc0000000 - 0x80000000
sub t0, t0, t1
# 2.1 linear mapping (0xc0000000 -> 0x80000000)
li t2, 768*4
li t4, 0x400 << 10
li t5, 4
add t1, t0, t2
li t6, 1024*4
add t6, t0, t6
li t3, (0x80000 << 10) | 0xcf # VRWXAD
loop:
sw t3, 0(t1)
add t3, t3, t4
add t1, t1, t5
bne t1, t6, loop
# 2.2 enable paging
srli t0, t0, 12
li t1, 1 << 31
or t0, t0, t1
@ -41,15 +57,16 @@ bootstacktop:
boot_page_table_sv32:
# NOTE: assume kernel image < 16M
# 0x80000000 -> 0x80000000 (4M * 4)
# 0xc0000000 -> 0x80000000 (4M * 4)
# 0xc0000000 -> 0x80000000 (mapped in code above)
.zero 4 * 512
.word (0x80000 << 10) | 0xcf # VRWXAD
.word (0x80400 << 10) | 0xcf # VRWXAD
.word (0x80800 << 10) | 0xcf # VRWXAD
.word (0x80c00 << 10) | 0xcf # VRWXAD
.zero 4 * 252
.word (0x80000 << 10) | 0xcf # VRWXAD
.word (0x80400 << 10) | 0xcf # VRWXAD
.word (0x80800 << 10) | 0xcf # VRWXAD
.word (0x80c00 << 10) | 0xcf # VRWXAD
.zero 4 * 252
.zero 4 * 256
.align 12 # page align
.global _root_page_table_ptr
_root_page_table_ptr:
.space 4 # 4bytes

@ -1,7 +1,5 @@
// Physical address available on THINPAD:
// [0x80000000, 0x80800000]
#[cfg(target_arch = "riscv32")]
pub const RECURSIVE_INDEX: usize = 0x3fd;
#[cfg(target_arch = "riscv64")]
pub const RECURSIVE_INDEX: usize = 0o774;
// Under riscv64, upon booting, paging is enabled by bbl and
@ -12,6 +10,9 @@ pub const RECURSIVE_INDEX: usize = 0o774;
// root_table[0775] points to root_table itself as leaf page
// root_table[0776] points to a temp page table as leaf page
// Linear mapping
pub const LINEAR_OFFSET: usize = 0x4000_0000;
#[cfg(target_arch = "riscv32")]
pub const KERNEL_OFFSET: usize = 0xC000_0000;
#[cfg(target_arch = "riscv64")]

@ -18,6 +18,7 @@ pub fn init(dtb: usize) {
init_frame_allocator();
init_heap();
// remap the kernel use 4K page
#[cfg(target_arch = "riscv64")]
unsafe {
super::paging::setup_recursive_mapping();
}
@ -54,6 +55,21 @@ fn init_frame_allocator() {
}
/// Remap the kernel memory address with 4K page recorded in p1 page table
#[cfg(target_arch = "riscv32")]
fn remap_the_kernel(_dtb: usize) {
let mut ms = MemorySet::new();
unsafe {
ms.activate();
}
unsafe {
SATP = ms.token();
}
mem::forget(ms);
info!("remap kernel end");
}
/// Remap the kernel memory address with 4K page recorded in p1 page table
#[cfg(target_arch = "riscv64")]
fn remap_the_kernel(dtb: usize) {
let offset = -(KERNEL_OFFSET as isize - MEMORY_OFFSET as isize);
let mut ms = MemorySet::new_bare();

@ -1,3 +1,5 @@
use crate::consts::LINEAR_OFFSET;
#[cfg(target_arch = "riscv64")]
use crate::consts::RECURSIVE_INDEX;
// Depends on kernel
#[cfg(target_arch = "riscv32")]
@ -12,10 +14,14 @@ use riscv::asm::{sfence_vma, sfence_vma_all};
use riscv::paging::{FrameAllocator, FrameDeallocator};
use riscv::paging::{
Mapper, PageTable as RvPageTable, PageTableEntry, PageTableFlags as EF, PageTableType,
RecursivePageTable,
RecursivePageTable, TwoLevelPageTable
};
use riscv::register::satp;
#[cfg(target_arch = "riscv32")]
pub struct ActivePageTable(usize, PageEntry);
#[cfg(target_arch = "riscv64")]
pub struct ActivePageTable(RecursivePageTable<'static>, PageEntry);
/// PageTableEntry: the contents of this entry.
@ -31,7 +37,8 @@ impl PageTable for ActivePageTable {
let frame = Frame::of_addr(PhysAddr::new(target));
// map the page to the frame using FrameAllocatorForRiscv
// we may need frame allocator to alloc frame for new page table(first/second)
self.0
info!("map");
self.get_table()
.map_to(page, frame, flags, &mut FrameAllocatorForRiscv)
.unwrap()
.flush();
@ -40,13 +47,13 @@ impl PageTable for ActivePageTable {
fn unmap(&mut self, addr: usize) {
let page = Page::of_addr(VirtAddr::new(addr));
let (_, flush) = self.0.unmap(page).unwrap();
let (_, flush) = self.get_table().unmap(page).unwrap();
flush.flush();
}
fn get_entry(&mut self, vaddr: usize) -> Option<&mut Entry> {
let page = Page::of_addr(VirtAddr::new(vaddr));
if let Ok(e) = self.0.ref_entry(page.clone()) {
if let Ok(e) = self.get_table().ref_entry(page.clone()) {
let e = unsafe { &mut *(e as *mut PageTableEntry) };
self.1 = PageEntry(e, page);
Some(&mut self.1 as &mut Entry)
@ -56,12 +63,30 @@ impl PageTable for ActivePageTable {
}
}
extern "C" {
fn _root_page_table_ptr();
}
pub fn set_root_page_table_ptr(ptr: usize) {
unsafe {
sfence_vma_all();
*(_root_page_table_ptr as *mut usize) = ptr;
}
}
pub fn get_root_page_table_ptr() -> usize {
unsafe { *(_root_page_table_ptr as *mut usize) }
}
pub fn root_page_table_buffer() -> &'static mut RvPageTable {
unsafe { &mut *(_root_page_table_ptr as *mut RvPageTable) }
}
impl PageTableExt for ActivePageTable {}
static mut __page_table_with_mode: bool = false;
/// The virtual address of root page table
#[cfg(target_arch = "riscv32")]
const ROOT_PAGE_TABLE: *mut RvPageTable =
((RECURSIVE_INDEX << 12 << 10) | ((RECURSIVE_INDEX + 1) << 12)) as *mut RvPageTable;
#[cfg(all(target_arch = "riscv64", feature = "sv39"))]
const ROOT_PAGE_TABLE: *mut RvPageTable = ((0xFFFF_0000_0000_0000)
| (0o777 << 12 << 9 << 9 << 9)
@ -79,7 +104,7 @@ impl ActivePageTable {
#[cfg(target_arch = "riscv32")]
pub unsafe fn new() -> Self {
ActivePageTable(
RecursivePageTable::new(&mut *ROOT_PAGE_TABLE).unwrap(),
get_root_page_table_ptr(),
::core::mem::uninitialized(),
)
}
@ -94,6 +119,18 @@ impl ActivePageTable {
::core::mem::uninitialized(),
)
}
unsafe fn get_raw_table(&mut self) -> *mut RvPageTable {
if __page_table_with_mode {
get_root_page_table_ptr() as *mut RvPageTable
} else {
self.0 as *mut RvPageTable
}
}
fn get_table(&mut self) -> TwoLevelPageTable<'static> {
unsafe { TwoLevelPageTable::new(&mut *self.get_raw_table(), LINEAR_OFFSET) }
}
}
/// implementation for the Entry trait in /crate/memory/src/paging/mod.rs
@ -184,6 +221,12 @@ impl InactivePageTable for InactivePageTable0 {
fn new_bare() -> Self {
let target = alloc_frame().expect("failed to allocate frame");
let frame = Frame::of_addr(PhysAddr::new(target));
#[cfg(arch = "riscv32")]
unsafe {
let table = unsafe { &mut *(target as *mut RvPageTable) };
table.zero();
}
#[cfg(arch = "riscv64")]
active_table().with_temporary_map(target, |_, table: &mut RvPageTable| {
table.zero();
table.set_recursive(RECURSIVE_INDEX, frame.clone());
@ -193,25 +236,13 @@ impl InactivePageTable for InactivePageTable0 {
#[cfg(target_arch = "riscv32")]
fn map_kernel(&mut self) {
let table = unsafe { &mut *ROOT_PAGE_TABLE };
extern "C" {
fn start();
fn end();
}
let mut entrys: [PageTableEntry; 256] = unsafe { core::mem::uninitialized() };
let entry_start = start as usize >> 22;
let entry_end = (end as usize >> 22) + 1;
let entry_count = entry_end - entry_start;
for i in 0..entry_count {
entrys[i] = table[entry_start + i];
}
self.edit(|_| {
// NOTE: 'table' now refers to new page table
for i in 0..entry_count {
table[entry_start + i] = entrys[i];
info!("mapping kernel linear mapping");
let table: &mut RvPageTable = unsafe { self.root_frame.as_kernel_mut(LINEAR_OFFSET)};
for i in 256..1024 {
let flags = EF::VALID | EF::READABLE | EF::WRITABLE | EF::EXECUTABLE;
let frame = Frame::of_addr(PhysAddr::new((i - 256) << 22));
table[i].set(frame, flags);
}
});
}
#[cfg(target_arch = "riscv64")]
@ -256,6 +287,24 @@ impl InactivePageTable for InactivePageTable0 {
}
fn edit<T>(&mut self, f: impl FnOnce(&mut Self::Active) -> T) -> T {
#[cfg(target_arch = "riscv32")]
{
debug!(
"edit table {:x?} -> {:x?}",
Self::active_token(),
self.token()
);
let mut active = unsafe { ActivePageTable(self.token(), ::core::mem::uninitialized()) };
let ret = f(&mut active);
debug!("finish table");
Self::flush_tlb();
ret
}
#[cfg(target_arch = "riscv64")]
{
let target = satp::read().frame().start_address().as_usize();
active_table().with_temporary_map(target, |active_table, root_table: &mut RvPageTable| {
let backup = root_table[RECURSIVE_INDEX].clone();
@ -277,6 +326,8 @@ impl InactivePageTable for InactivePageTable0 {
ret
})
}
}
}
@ -300,6 +351,7 @@ impl FrameDeallocator for FrameAllocatorForRiscv {
}
}
#[cfg(target_arch = "riscv64")]
pub unsafe fn setup_recursive_mapping() {
let frame = satp::read().frame();
let root_page_table = unsafe { &mut *(frame.start_address().as_usize() as *mut RvPageTable) };

@ -13,6 +13,7 @@ use volatile::Volatile;
use crate::drivers::BlockDriver;
use crate::sync::SpinNoIrqLock as Mutex;
use crate::arch::consts::LINEAR_OFFSET;
use super::super::bus::virtio_mmio::*;
use super::super::{DeviceType, Driver, BLK_DRIVERS, DRIVERS};
@ -124,6 +125,10 @@ impl Driver for VirtIOBlkDriver {
fn read_block(&self, block_id: usize, buf: &mut [u8]) -> bool {
let mut driver = self.0.lock();
<<<<<<< HEAD
=======
>>>>>>> [WIP] rv32 linear mapping
let mut req = VirtIOBlkReadReq::default();
req.req_type = VIRTIO_BLK_T_IN;
req.reserved = 0;
@ -149,6 +154,10 @@ impl Driver for VirtIOBlkDriver {
fn write_block(&self, block_id: usize, buf: &[u8]) -> bool {
let mut driver = self.0.lock();
<<<<<<< HEAD
=======
>>>>>>> [WIP] rv32 linear mapping
let mut req: VirtIOBlkWriteReq = unsafe { zeroed() };
req.req_type = VIRTIO_BLK_T_OUT;
req.reserved = 0;

@ -11,7 +11,11 @@ use log::*;
use rcore_memory::PAGE_SIZE;
use volatile::{ReadOnly, Volatile, WriteOnly};
<<<<<<< HEAD
use crate::arch::consts::{KERNEL_OFFSET, MEMORY_OFFSET};
=======
use crate::arch::consts::{KERNEL_OFFSET, MEMORY_OFFSET, LINEAR_OFFSET};
>>>>>>> [WIP] rv32 linear mapping
use crate::HEAP_ALLOCATOR;
use super::super::block::virtio_blk;

@ -134,6 +134,10 @@ impl phy::RxToken for VirtIONetRxToken {
{
let (input, output, _, user_data) = {
let mut driver = (self.0).0.lock();
<<<<<<< HEAD
=======
>>>>>>> [WIP] rv32 linear mapping
driver.queues[VIRTIO_QUEUE_RECEIVE].get().unwrap()
};
let result = f(&input[0][size_of::<VirtIONetHeader>()..]);
@ -151,6 +155,10 @@ impl phy::TxToken for VirtIONetTxToken {
{
let output = {
let mut driver = (self.0).0.lock();
<<<<<<< HEAD
=======
>>>>>>> [WIP] rv32 linear mapping
if let Some((_, output, _, _)) = driver.queues[VIRTIO_QUEUE_TRANSMIT].get() {
unsafe { slice::from_raw_parts_mut(output[0].as_ptr() as *mut u8, output[0].len()) }
} else {

Loading…
Cancel
Save