remove TLS setup code. fix check writable memory

toolchain_update
WangRunji 6 years ago
parent 84e07a6d83
commit f7f740a021

@ -2,7 +2,6 @@ use super::*;
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct ByFrame<T: FrameAllocator> { pub struct ByFrame<T: FrameAllocator> {
flags: MemoryAttr,
allocator: T, allocator: T,
} }
@ -11,9 +10,11 @@ impl<T: FrameAllocator> MemoryHandler for ByFrame<T> {
Box::new(self.clone()) Box::new(self.clone())
} }
fn map(&self, pt: &mut PageTable, addr: VirtAddr) { fn map(&self, pt: &mut PageTable, addr: VirtAddr, attr: &MemoryAttr) {
let target = self.allocator.alloc().expect("failed to allocate frame"); let target = self.allocator.alloc().expect("failed to allocate frame");
self.flags.apply(pt.map(addr, target)); let entry = pt.map(addr, target);
entry.set_present(true);
attr.apply(entry);
} }
fn unmap(&self, pt: &mut PageTable, addr: VirtAddr) { fn unmap(&self, pt: &mut PageTable, addr: VirtAddr) {
@ -28,7 +29,7 @@ impl<T: FrameAllocator> MemoryHandler for ByFrame<T> {
} }
impl<T: FrameAllocator> ByFrame<T> { impl<T: FrameAllocator> ByFrame<T> {
pub fn new(flags: MemoryAttr, allocator: T) -> Self { pub fn new(allocator: T) -> Self {
ByFrame { flags, allocator } ByFrame { allocator }
} }
} }

@ -2,7 +2,6 @@ use super::*;
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct Delay<T: FrameAllocator> { pub struct Delay<T: FrameAllocator> {
flags: MemoryAttr,
allocator: T, allocator: T,
} }
@ -11,16 +10,17 @@ impl<T: FrameAllocator> MemoryHandler for Delay<T> {
Box::new(self.clone()) Box::new(self.clone())
} }
fn map(&self, pt: &mut PageTable, addr: VirtAddr) { fn map(&self, pt: &mut PageTable, addr: VirtAddr, attr: &MemoryAttr) {
let entry = pt.map(addr, 0); let entry = pt.map(addr, 0);
self.flags.apply(entry);
entry.set_present(false); entry.set_present(false);
entry.update(); attr.apply(entry);
} }
fn map_eager(&self, pt: &mut PageTable, addr: VirtAddr) { fn map_eager(&self, pt: &mut PageTable, addr: VirtAddr, attr: &MemoryAttr) {
let target = self.allocator.alloc().expect("failed to alloc frame"); let target = self.allocator.alloc().expect("failed to alloc frame");
self.flags.apply(pt.map(addr, target)); let entry = pt.map(addr, target);
entry.set_present(true);
attr.apply(entry);
} }
fn unmap(&self, pt: &mut PageTable, addr: VirtAddr) { fn unmap(&self, pt: &mut PageTable, addr: VirtAddr) {
@ -45,7 +45,7 @@ impl<T: FrameAllocator> MemoryHandler for Delay<T> {
} }
impl<T: FrameAllocator> Delay<T> { impl<T: FrameAllocator> Delay<T> {
pub fn new(flags: MemoryAttr, allocator: T) -> Self { pub fn new(allocator: T) -> Self {
Delay { flags, allocator } Delay { allocator }
} }
} }

@ -3,7 +3,6 @@ use super::*;
#[derive(Debug, Eq, PartialEq, Clone)] #[derive(Debug, Eq, PartialEq, Clone)]
pub struct Linear { pub struct Linear {
offset: isize, offset: isize,
flags: MemoryAttr,
} }
impl MemoryHandler for Linear { impl MemoryHandler for Linear {
@ -11,9 +10,11 @@ impl MemoryHandler for Linear {
Box::new(self.clone()) Box::new(self.clone())
} }
fn map(&self, pt: &mut PageTable, addr: VirtAddr) { fn map(&self, pt: &mut PageTable, addr: VirtAddr, attr: &MemoryAttr) {
let target = (addr as isize + self.offset) as PhysAddr; let target = (addr as isize + self.offset) as PhysAddr;
self.flags.apply(pt.map(addr, target)); let entry = pt.map(addr, target);
entry.set_present(true);
attr.apply(entry);
} }
fn unmap(&self, pt: &mut PageTable, addr: VirtAddr) { fn unmap(&self, pt: &mut PageTable, addr: VirtAddr) {
@ -26,7 +27,7 @@ impl MemoryHandler for Linear {
} }
impl Linear { impl Linear {
pub fn new(offset: isize, flags: MemoryAttr) -> Self { pub fn new(offset: isize) -> Self {
Linear { offset, flags } Linear { offset }
} }
} }

@ -4,17 +4,22 @@ use super::*;
pub trait MemoryHandler: Debug + 'static { pub trait MemoryHandler: Debug + 'static {
fn box_clone(&self) -> Box<MemoryHandler>; fn box_clone(&self) -> Box<MemoryHandler>;
/// Map addr in the page table /// Map `addr` in the page table
/// Should set page flags here instead of in page_fault_handler /// Should set page flags here instead of in page_fault_handler
fn map(&self, pt: &mut PageTable, addr: VirtAddr); fn map(&self, pt: &mut PageTable, addr: VirtAddr, attr: &MemoryAttr);
/// Map addr in the page table eagerly (i.e. no delay allocation) /// Map `addr` in the page table eagerly (i.e. no delay allocation)
/// Should set page flags here instead of in page_fault_handler /// Should set page flags here instead of in page_fault_handler
fn map_eager(&self, pt: &mut PageTable, addr: VirtAddr) { fn map_eager(&self, pt: &mut PageTable, addr: VirtAddr, attr: &MemoryAttr) {
// override this when pages are allocated lazily // override this when pages are allocated lazily
self.map(pt, addr); self.map(pt, addr, attr);
} }
/// Unmap `addr` in the page table
fn unmap(&self, pt: &mut PageTable, addr: VirtAddr); fn unmap(&self, pt: &mut PageTable, addr: VirtAddr);
/// Handle page fault on `addr`
/// Return true if success, false if error
fn page_fault_handler(&self, pt: &mut PageTable, addr: VirtAddr) -> bool; fn page_fault_handler(&self, pt: &mut PageTable, addr: VirtAddr) -> bool;
} }

@ -18,6 +18,7 @@ pub mod handler;
pub struct MemoryArea { pub struct MemoryArea {
start_addr: VirtAddr, start_addr: VirtAddr,
end_addr: VirtAddr, end_addr: VirtAddr,
attr: MemoryAttr,
handler: Box<MemoryHandler>, handler: Box<MemoryHandler>,
name: &'static str, name: &'static str,
} }
@ -55,13 +56,12 @@ impl MemoryArea {
} }
/// Check the array is within the readable memory /// Check the array is within the readable memory
pub fn check_array<S>(&self, ptr: *const S, count: usize) -> bool { pub fn check_array<S>(&self, ptr: *const S, count: usize) -> bool {
// FIXME: check readable
ptr as usize >= self.start_addr && ptr as usize >= self.start_addr &&
unsafe { ptr.offset(count as isize) as usize } <= self.end_addr unsafe { ptr.offset(count as isize) as usize } <= self.end_addr
} }
/// Check the array is within the writable memory /// Check the array is within the writable memory
pub fn check_mut_array<S>(&self, ptr: *mut S, count: usize) -> bool { pub fn check_mut_array<S>(&self, ptr: *mut S, count: usize) -> bool {
// FIXME: check writable !self.attr.readonly &&
ptr as usize >= self.start_addr && ptr as usize >= self.start_addr &&
unsafe { ptr.offset(count as isize) as usize } <= self.end_addr unsafe { ptr.offset(count as isize) as usize } <= self.end_addr
} }
@ -94,7 +94,7 @@ impl MemoryArea {
*/ */
fn map(&self, pt: &mut PageTable) { fn map(&self, pt: &mut PageTable) {
for page in Page::range_of(self.start_addr, self.end_addr) { for page in Page::range_of(self.start_addr, self.end_addr) {
self.handler.map(pt, page.start_address()); self.handler.map(pt, page.start_address(), &self.attr);
} }
} }
/* /*
@ -104,7 +104,7 @@ impl MemoryArea {
*/ */
fn map_eager(&self, pt: &mut PageTable) { fn map_eager(&self, pt: &mut PageTable) {
for page in Page::range_of(self.start_addr, self.end_addr) { for page in Page::range_of(self.start_addr, self.end_addr) {
self.handler.map_eager(pt, page.start_address()); self.handler.map_eager(pt, page.start_address(), &self.attr);
} }
} }
/* /*
@ -169,14 +169,9 @@ impl MemoryAttr {
self.mmio = value; self.mmio = value;
self self
} }
/* /// Apply the attributes to page table entry, then update it.
** @brief apply the memory attribute to a page table entry /// NOTE: You may need to set present manually.
** @param entry: &mut impl Entry
** the page table entry to apply the attribute
** @retval none
*/
pub fn apply(&self, entry: &mut Entry) { pub fn apply(&self, entry: &mut Entry) {
entry.set_present(true);
entry.set_user(self.user); entry.set_user(self.user);
entry.set_writable(!self.readonly); entry.set_writable(!self.readonly);
entry.set_execute(self.execute); entry.set_execute(self.execute);
@ -254,6 +249,7 @@ impl<T: InactivePageTable> MemorySet<T> {
.find(|&addr| self.test_free_area(addr, addr + len)) .find(|&addr| self.test_free_area(addr, addr + len))
.expect("failed to find free area ???") .expect("failed to find free area ???")
} }
/// Test if [`start_addr`, `end_addr`) is a free area
fn test_free_area(&self, start_addr: usize, end_addr: usize) -> bool { fn test_free_area(&self, start_addr: usize, end_addr: usize) -> bool {
self.areas.iter() self.areas.iter()
.find(|area| area.is_overlap_with(start_addr, end_addr)) .find(|area| area.is_overlap_with(start_addr, end_addr))
@ -264,10 +260,10 @@ impl<T: InactivePageTable> MemorySet<T> {
** @param area: MemoryArea the memory area to add ** @param area: MemoryArea the memory area to add
** @retval none ** @retval none
*/ */
pub fn push(&mut self, start_addr: VirtAddr, end_addr: VirtAddr, handler: impl MemoryHandler, name: &'static str) { pub fn push(&mut self, start_addr: VirtAddr, end_addr: VirtAddr, attr: MemoryAttr, handler: impl MemoryHandler, name: &'static str) {
assert!(start_addr <= end_addr, "invalid memory area"); assert!(start_addr <= end_addr, "invalid memory area");
assert!(self.test_free_area(start_addr, end_addr), "memory area overlap"); assert!(self.test_free_area(start_addr, end_addr), "memory area overlap");
let area = MemoryArea { start_addr, end_addr, handler: Box::new(handler), name }; let area = MemoryArea { start_addr, end_addr, attr, handler: Box::new(handler), name };
self.page_table.edit(|pt| area.map(pt)); self.page_table.edit(|pt| area.map(pt));
self.areas.push(area); self.areas.push(area);
} }

@ -103,14 +103,14 @@ static mut KERNEL_MEMORY_SET: Option<MemorySet> = None;
/// remap kernel page table after all initialization. /// remap kernel page table after all initialization.
fn remap_the_kernel() { fn remap_the_kernel() {
let mut ms = MemorySet::new_bare(); let mut ms = MemorySet::new_bare();
ms.push(0, bootstacktop as usize, Linear::new(0, MemoryAttr::default()), "kstack"); ms.push(0, bootstacktop as usize, MemoryAttr::default(), Linear::new(0), "kstack");
ms.push(stext as usize, etext as usize, Linear::new(0, MemoryAttr::default().execute().readonly()), "text"); ms.push(stext as usize, etext as usize, MemoryAttr::default().execute().readonly(), Linear::new(0), "text");
ms.push(sdata as usize, edata as usize, Linear::new(0, MemoryAttr::default()), "data"); ms.push(sdata as usize, edata as usize, MemoryAttr::default(), Linear::new(0), "data");
ms.push(srodata as usize, erodata as usize, Linear::new(0, MemoryAttr::default().readonly()), "rodata"); ms.push(srodata as usize, erodata as usize, MemoryAttr::default().readonly(), Linear::new(0), "rodata");
ms.push(sbss as usize, ebss as usize, Linear::new(0, MemoryAttr::default()), "bss"); ms.push(sbss as usize, ebss as usize, MemoryAttr::default(), Linear::new(0), "bss");
use super::board::{IO_REMAP_BASE, IO_REMAP_END}; use super::board::{IO_REMAP_BASE, IO_REMAP_END};
ms.push(IO_REMAP_BASE, IO_REMAP_END, Linear::new(0, MemoryAttr::default().mmio(MMIOType::Device as u8)), "io_remap"); ms.push(IO_REMAP_BASE, IO_REMAP_END, MemoryAttr::default().mmio(MMIOType::Device as u8), Linear::new(0), "io_remap");
unsafe { ms.get_page_table_mut().activate_as_kernel() } unsafe { ms.get_page_table_mut().activate_as_kernel() }
unsafe { KERNEL_MEMORY_SET = Some(ms) } unsafe { KERNEL_MEMORY_SET = Some(ms) }
@ -119,7 +119,7 @@ fn remap_the_kernel() {
pub fn ioremap(start: usize, len: usize, name: &'static str) -> usize { pub fn ioremap(start: usize, len: usize, name: &'static str) -> usize {
if let Some(ms) = unsafe { KERNEL_MEMORY_SET.as_mut() } { if let Some(ms) = unsafe { KERNEL_MEMORY_SET.as_mut() } {
ms.push(start, start + len, Linear::new(0, MemoryAttr::default().mmio(MMIOType::NormalNonCacheable as u8)), name); ms.push(start, start + len, MemoryAttr::default().mmio(MMIOType::NormalNonCacheable as u8), Linear::new(0), name);
return start; return start;
} }
0 0

@ -74,12 +74,12 @@ fn init_frame_allocator() {
fn remap_the_kernel(dtb: usize) { fn remap_the_kernel(dtb: usize) {
let offset = -(KERNEL_OFFSET as isize - MEMORY_OFFSET as isize); let offset = -(KERNEL_OFFSET as isize - MEMORY_OFFSET as isize);
let mut ms = MemorySet::new_bare(); let mut ms = MemorySet::new_bare();
ms.push(stext as usize, etext as usize, Linear::new(offset, MemoryAttr::default().execute().readonly()), "text"); ms.push(stext as usize, etext as usize, MemoryAttr::default().execute().readonly(), Linear::new(offset), "text");
ms.push(sdata as usize, edata as usize, Linear::new(offset, MemoryAttr::default()), "data"); ms.push(sdata as usize, edata as usize, MemoryAttr::default(), Linear::new(offset), "data");
ms.push(srodata as usize, erodata as usize, Linear::new(offset, MemoryAttr::default().readonly()), "rodata"); ms.push(srodata as usize, erodata as usize, MemoryAttr::default().readonly(), Linear::new(offset), "rodata");
ms.push(bootstack as usize, bootstacktop as usize, Linear::new(offset, MemoryAttr::default()), "stack"); ms.push(bootstack as usize, bootstacktop as usize, MemoryAttr::default(), Linear::new(offset), "stack");
ms.push(sbss as usize, ebss as usize, Linear::new(offset, MemoryAttr::default()), "bss"); ms.push(sbss as usize, ebss as usize, MemoryAttr::default(), Linear::new(offset), "bss");
ms.push(dtb, dtb + super::consts::MAX_DTB_SIZE, Linear::new(offset, MemoryAttr::default()), "dts"); ms.push(dtb, dtb + super::consts::MAX_DTB_SIZE, MemoryAttr::default().readonly(), Linear::new(offset), "dts");
unsafe { ms.activate(); } unsafe { ms.activate(); }
unsafe { SATP = ms.token(); } unsafe { SATP = ms.token(); }
mem::forget(ms); mem::forget(ms);

@ -48,7 +48,7 @@ impl TrapFrame {
tf.rflags = 0x282; tf.rflags = 0x282;
tf tf
} }
fn new_user_thread(entry_addr: usize, rsp: usize, is32: bool, tls: usize) -> Self { fn new_user_thread(entry_addr: usize, rsp: usize, is32: bool) -> Self {
use crate::arch::gdt; use crate::arch::gdt;
let mut tf = TrapFrame::default(); let mut tf = TrapFrame::default();
tf.cs = if is32 { gdt::UCODE32_SELECTOR.0 } else { gdt::UCODE_SELECTOR.0 } as usize; tf.cs = if is32 { gdt::UCODE32_SELECTOR.0 } else { gdt::UCODE_SELECTOR.0 } as usize;
@ -56,7 +56,6 @@ impl TrapFrame {
tf.ss = if is32 { gdt::UDATA32_SELECTOR.0 } else { gdt::UDATA_SELECTOR.0 } as usize; tf.ss = if is32 { gdt::UDATA32_SELECTOR.0 } else { gdt::UDATA_SELECTOR.0 } as usize;
tf.rsp = rsp; tf.rsp = rsp;
tf.rflags = 0x282; tf.rflags = 0x282;
tf.fsbase = tls;
tf tf
} }
pub fn is_user(&self) -> bool { pub fn is_user(&self) -> bool {
@ -167,11 +166,11 @@ impl Context {
tf: TrapFrame::new_kernel_thread(entry, arg, kstack_top), tf: TrapFrame::new_kernel_thread(entry, arg, kstack_top),
}.push_at(kstack_top) }.push_at(kstack_top)
} }
pub unsafe fn new_user_thread(entry_addr: usize, ustack_top: usize, kstack_top: usize, is32: bool, cr3: usize, tls: usize) -> Self { pub unsafe fn new_user_thread(entry_addr: usize, ustack_top: usize, kstack_top: usize, is32: bool, cr3: usize) -> Self {
InitStack { InitStack {
context: ContextData::new(cr3), context: ContextData::new(cr3),
trapret: trap_ret as usize, trapret: trap_ret as usize,
tf: TrapFrame::new_user_thread(entry_addr, ustack_top, is32, tls), tf: TrapFrame::new_user_thread(entry_addr, ustack_top, is32),
}.push_at(kstack_top) }.push_at(kstack_top)
} }
pub unsafe fn new_fork(tf: &TrapFrame, kstack_top: usize, cr3: usize) -> Self { pub unsafe fn new_fork(tf: &TrapFrame, kstack_top: usize, cr3: usize) -> Self {

@ -118,7 +118,7 @@ impl Thread {
} }
// Make page table // Make page table
let (mut memory_set, entry_addr, tls) = memory_set_from(&elf); let (mut memory_set, entry_addr) = memory_set_from(&elf);
// User stack // User stack
use crate::consts::{USER_STACK_OFFSET, USER_STACK_SIZE, USER32_STACK_OFFSET}; use crate::consts::{USER_STACK_OFFSET, USER_STACK_SIZE, USER32_STACK_OFFSET};
@ -128,7 +128,7 @@ impl Thread {
true => (USER32_STACK_OFFSET, USER32_STACK_OFFSET + USER_STACK_SIZE), true => (USER32_STACK_OFFSET, USER32_STACK_OFFSET + USER_STACK_SIZE),
false => (USER_STACK_OFFSET, USER_STACK_OFFSET + USER_STACK_SIZE), false => (USER_STACK_OFFSET, USER_STACK_OFFSET + USER_STACK_SIZE),
}; };
memory_set.push(ustack_buttom, ustack_top, ByFrame::new(MemoryAttr::default().user(), GlobalFrameAlloc), "user_stack"); memory_set.push(ustack_buttom, ustack_top, MemoryAttr::default().user(), ByFrame::new(GlobalFrameAlloc), "user_stack");
ustack_top ustack_top
}; };
#[cfg(feature = "no_mmu")] #[cfg(feature = "no_mmu")]
@ -147,7 +147,7 @@ impl Thread {
// otherwise, check if elf is loaded from the beginning, then phdr can be inferred. // otherwise, check if elf is loaded from the beginning, then phdr can be inferred.
map.insert(abi::AT_PHDR, elf_addr.virtual_addr() as usize + elf.header.pt2.ph_offset() as usize); map.insert(abi::AT_PHDR, elf_addr.virtual_addr() as usize + elf.header.pt2.ph_offset() as usize);
} else { } else {
debug!("new_user: no phdr found, tls might not work"); warn!("new_user: no phdr found, tls might not work");
} }
map.insert(abi::AT_PHENT, elf.header.pt2.ph_entry_size() as usize); map.insert(abi::AT_PHENT, elf.header.pt2.ph_entry_size() as usize);
map.insert(abi::AT_PHNUM, elf.header.pt2.ph_count() as usize); map.insert(abi::AT_PHNUM, elf.header.pt2.ph_count() as usize);
@ -170,7 +170,7 @@ impl Thread {
Box::new(Thread { Box::new(Thread {
context: unsafe { context: unsafe {
Context::new_user_thread( Context::new_user_thread(
entry_addr, ustack_top, kstack.top(), is32, memory_set.token(), tls) entry_addr, ustack_top, kstack.top(), is32, memory_set.token())
}, },
kstack, kstack,
proc: Arc::new(Mutex::new(Process { proc: Arc::new(Mutex::new(Process {
@ -232,12 +232,11 @@ impl Process {
/// Generate a MemorySet according to the ELF file. /// Generate a MemorySet according to the ELF file.
/// Also return the real entry point address and tls top addr. /// Also return the real entry point address.
fn memory_set_from(elf: &ElfFile<'_>) -> (MemorySet, usize, usize) { fn memory_set_from(elf: &ElfFile<'_>) -> (MemorySet, usize) {
debug!("come in to memory_set_from"); debug!("come in to memory_set_from");
let mut ms = MemorySet::new(); let mut ms = MemorySet::new();
let mut entry = elf.header.pt2.entry_point() as usize; let mut entry = elf.header.pt2.entry_point() as usize;
let mut tls = 0;
// [NoMMU] Get total memory size and alloc space // [NoMMU] Get total memory size and alloc space
let va_begin = elf.program_iter() let va_begin = elf.program_iter()
@ -255,21 +254,13 @@ fn memory_set_from(elf: &ElfFile<'_>) -> (MemorySet, usize, usize) {
{ entry += 0x40000000; } { entry += 0x40000000; }
for ph in elf.program_iter() { for ph in elf.program_iter() {
if ph.get_type() != Ok(Type::Load) && ph.get_type() != Ok(Type::Tls) { if ph.get_type() != Ok(Type::Load) {
continue; continue;
} }
let virt_addr = ph.virtual_addr() as usize;
let mut virt_addr = ph.virtual_addr() as usize;
let offset = ph.offset() as usize; let offset = ph.offset() as usize;
let file_size = ph.file_size() as usize; let file_size = ph.file_size() as usize;
let mem_size = ph.mem_size() as usize; let mem_size = ph.mem_size() as usize;
let mut name = "load";
if ph.get_type() == Ok(Type::Tls) {
virt_addr = USER_TLS_OFFSET;
name = "tls";
debug!("copying tls addr to {:X}", virt_addr);
}
#[cfg(target_arch = "aarch64")] #[cfg(target_arch = "aarch64")]
assert_eq!((virt_addr >> 48), 0xffff, "Segment Fault"); assert_eq!((virt_addr >> 48), 0xffff, "Segment Fault");
@ -281,7 +272,7 @@ fn memory_set_from(elf: &ElfFile<'_>) -> (MemorySet, usize, usize) {
info!("area @ {:?}, size = {:#x}", target.as_ptr(), mem_size); info!("area @ {:?}, size = {:#x}", target.as_ptr(), mem_size);
#[cfg(not(feature = "no_mmu"))] #[cfg(not(feature = "no_mmu"))]
let target = { let target = {
ms.push(virt_addr, virt_addr + mem_size, ByFrame::new(memory_attr_from(ph.flags()), GlobalFrameAlloc), &name); ms.push(virt_addr, virt_addr + mem_size, ph.flags().to_attr(), ByFrame::new(GlobalFrameAlloc), "");
unsafe { ::core::slice::from_raw_parts_mut(virt_addr as *mut u8, mem_size) } unsafe { ::core::slice::from_raw_parts_mut(virt_addr as *mut u8, mem_size) }
}; };
// Copy data // Copy data
@ -293,40 +284,19 @@ fn memory_set_from(elf: &ElfFile<'_>) -> (MemorySet, usize, usize) {
target[file_size..].iter_mut().for_each(|x| *x = 0); target[file_size..].iter_mut().for_each(|x| *x = 0);
}); });
} }
if ph.get_type() == Ok(Type::Tls) {
virt_addr = USER_TMP_TLS_OFFSET;
tls = virt_addr + ph.mem_size() as usize;
debug!("copying tls addr to {:X}", virt_addr);
// TODO: put this in a function
// Get target slice
#[cfg(feature = "no_mmu")]
let target = &mut target[virt_addr - va_begin..virt_addr - va_begin + mem_size];
#[cfg(feature = "no_mmu")]
info!("area @ {:?}, size = {:#x}", target.as_ptr(), mem_size);
#[cfg(not(feature = "no_mmu"))]
let target = {
ms.push(virt_addr, virt_addr + mem_size, ByFrame::new(memory_attr_from(ph.flags()).writable(), GlobalFrameAlloc), "tmptls");
unsafe { ::core::slice::from_raw_parts_mut(virt_addr as *mut u8, mem_size) }
};
// Copy data
unsafe {
ms.with(|| {
if file_size != 0 {
target[..file_size].copy_from_slice(&elf.input[offset..offset + file_size]);
} }
target[file_size..].iter_mut().for_each(|x| *x = 0); (ms, entry)
});
}
}
}
(ms, entry, tls)
} }
fn memory_attr_from(elf_flags: Flags) -> MemoryAttr { trait ToMemoryAttr {
fn to_attr(&self) -> MemoryAttr;
}
impl ToMemoryAttr for Flags {
fn to_attr(&self) -> MemoryAttr {
let mut flags = MemoryAttr::default().user(); let mut flags = MemoryAttr::default().user();
// TODO: handle readonly // FIXME: handle readonly
if elf_flags.is_execute() { flags = flags.execute(); } if self.is_execute() { flags = flags.execute(); }
flags flags
}
} }

@ -26,8 +26,7 @@ pub fn sys_mmap(mut addr: usize, len: usize, prot: usize, flags: usize, fd: i32,
if flags.contains(MmapFlags::SHARED) { if flags.contains(MmapFlags::SHARED) {
return Err(SysError::EINVAL); return Err(SysError::EINVAL);
} }
let handler = Delay::new(prot_to_attr(prot), GlobalFrameAlloc); proc.memory_set.push(addr, addr + len, prot.to_attr(), Delay::new(GlobalFrameAlloc), "mmap");
proc.memory_set.push(addr, addr + len, handler, "mmap");
return Ok(addr as isize); return Ok(addr as isize);
} }
unimplemented!() unimplemented!()
@ -38,19 +37,14 @@ pub fn sys_mprotect(addr: usize, len: usize, prot: usize) -> SysResult {
info!("mprotect: addr={:#x}, size={:#x}, prot={:?}", addr, len, prot); info!("mprotect: addr={:#x}, size={:#x}, prot={:?}", addr, len, prot);
let mut proc = process(); let mut proc = process();
let attr = prot_to_attr(prot); let attr = prot.to_attr();
let memory_area = proc.memory_set.iter().find(|area| area.contains(addr)); let memory_area = proc.memory_set.iter().find(|area| area.contains(addr));
if memory_area.is_some() { if memory_area.is_some() {
proc.memory_set.edit(|pt| { proc.memory_set.edit(|pt| {
for page in Page::range_of(addr, addr + len) { for page in Page::range_of(addr, addr + len) {
let entry = pt.get_entry(page.start_address()).expect("failed to get entry"); let entry = pt.get_entry(page.start_address()).expect("failed to get entry");
// keep original presence
let orig_present = entry.present();
attr.apply(entry); attr.apply(entry);
entry.set_present(orig_present);
entry.update();
} }
}); });
Ok(0) Ok(0)
@ -90,9 +84,11 @@ bitflags! {
} }
} }
fn prot_to_attr(prot: MmapProt) -> MemoryAttr { impl MmapProt {
fn to_attr(self) -> MemoryAttr {
let mut attr = MemoryAttr::default().user(); let mut attr = MemoryAttr::default().user();
if prot.contains(MmapProt::EXEC) { attr = attr.execute(); } if self.contains(MmapProt::EXEC) { attr = attr.execute(); }
if !prot.contains(MmapProt::WRITE) { attr = attr.readonly(); } if !self.contains(MmapProt::WRITE) { attr = attr.readonly(); }
attr attr
}
} }
Loading…
Cancel
Save