From f7f740a021a44e0ce00ee8e41f5f112d5bc50ff1 Mon Sep 17 00:00:00 2001 From: WangRunji Date: Fri, 8 Mar 2019 10:10:13 +0800 Subject: [PATCH] remove TLS setup code. fix check writable memory --- .../memory/src/memory_set/handler/byframe.rs | 11 +-- crate/memory/src/memory_set/handler/delay.rs | 16 ++--- crate/memory/src/memory_set/handler/linear.rs | 11 +-- crate/memory/src/memory_set/handler/mod.rs | 15 ++-- crate/memory/src/memory_set/mod.rs | 24 +++---- kernel/src/arch/aarch64/memory.rs | 14 ++-- kernel/src/arch/riscv32/memory.rs | 12 ++-- kernel/src/arch/x86_64/interrupt/trapframe.rs | 7 +- kernel/src/process/structs.rs | 72 ++++++------------- kernel/src/syscall/mem.rs | 24 +++---- 10 files changed, 87 insertions(+), 119 deletions(-) diff --git a/crate/memory/src/memory_set/handler/byframe.rs b/crate/memory/src/memory_set/handler/byframe.rs index ddfdd9d..93ac15a 100644 --- a/crate/memory/src/memory_set/handler/byframe.rs +++ b/crate/memory/src/memory_set/handler/byframe.rs @@ -2,7 +2,6 @@ use super::*; #[derive(Debug, Clone)] pub struct ByFrame { - flags: MemoryAttr, allocator: T, } @@ -11,9 +10,11 @@ impl MemoryHandler for ByFrame { Box::new(self.clone()) } - fn map(&self, pt: &mut PageTable, addr: VirtAddr) { + fn map(&self, pt: &mut PageTable, addr: VirtAddr, attr: &MemoryAttr) { let target = self.allocator.alloc().expect("failed to allocate frame"); - self.flags.apply(pt.map(addr, target)); + let entry = pt.map(addr, target); + entry.set_present(true); + attr.apply(entry); } fn unmap(&self, pt: &mut PageTable, addr: VirtAddr) { @@ -28,7 +29,7 @@ impl MemoryHandler for ByFrame { } impl ByFrame { - pub fn new(flags: MemoryAttr, allocator: T) -> Self { - ByFrame { flags, allocator } + pub fn new(allocator: T) -> Self { + ByFrame { allocator } } } \ No newline at end of file diff --git a/crate/memory/src/memory_set/handler/delay.rs b/crate/memory/src/memory_set/handler/delay.rs index a908834..7ba92d8 100644 --- a/crate/memory/src/memory_set/handler/delay.rs +++ b/crate/memory/src/memory_set/handler/delay.rs @@ -2,7 +2,6 @@ use super::*; #[derive(Debug, Clone)] pub struct Delay { - flags: MemoryAttr, allocator: T, } @@ -11,16 +10,17 @@ impl MemoryHandler for Delay { Box::new(self.clone()) } - fn map(&self, pt: &mut PageTable, addr: VirtAddr) { + fn map(&self, pt: &mut PageTable, addr: VirtAddr, attr: &MemoryAttr) { let entry = pt.map(addr, 0); - self.flags.apply(entry); entry.set_present(false); - entry.update(); + attr.apply(entry); } - fn map_eager(&self, pt: &mut PageTable, addr: VirtAddr) { + fn map_eager(&self, pt: &mut PageTable, addr: VirtAddr, attr: &MemoryAttr) { let target = self.allocator.alloc().expect("failed to alloc frame"); - self.flags.apply(pt.map(addr, target)); + let entry = pt.map(addr, target); + entry.set_present(true); + attr.apply(entry); } fn unmap(&self, pt: &mut PageTable, addr: VirtAddr) { @@ -45,7 +45,7 @@ impl MemoryHandler for Delay { } impl Delay { - pub fn new(flags: MemoryAttr, allocator: T) -> Self { - Delay { flags, allocator } + pub fn new(allocator: T) -> Self { + Delay { allocator } } } diff --git a/crate/memory/src/memory_set/handler/linear.rs b/crate/memory/src/memory_set/handler/linear.rs index 0ea8482..900355b 100644 --- a/crate/memory/src/memory_set/handler/linear.rs +++ b/crate/memory/src/memory_set/handler/linear.rs @@ -3,7 +3,6 @@ use super::*; #[derive(Debug, Eq, PartialEq, Clone)] pub struct Linear { offset: isize, - flags: MemoryAttr, } impl MemoryHandler for Linear { @@ -11,9 +10,11 @@ impl MemoryHandler for Linear { Box::new(self.clone()) } - fn map(&self, pt: &mut PageTable, addr: VirtAddr) { + fn map(&self, pt: &mut PageTable, addr: VirtAddr, attr: &MemoryAttr) { let target = (addr as isize + self.offset) as PhysAddr; - self.flags.apply(pt.map(addr, target)); + let entry = pt.map(addr, target); + entry.set_present(true); + attr.apply(entry); } fn unmap(&self, pt: &mut PageTable, addr: VirtAddr) { @@ -26,7 +27,7 @@ impl MemoryHandler for Linear { } impl Linear { - pub fn new(offset: isize, flags: MemoryAttr) -> Self { - Linear { offset, flags } + pub fn new(offset: isize) -> Self { + Linear { offset } } } \ No newline at end of file diff --git a/crate/memory/src/memory_set/handler/mod.rs b/crate/memory/src/memory_set/handler/mod.rs index 81922db..8f92fe3 100644 --- a/crate/memory/src/memory_set/handler/mod.rs +++ b/crate/memory/src/memory_set/handler/mod.rs @@ -4,17 +4,22 @@ use super::*; pub trait MemoryHandler: Debug + 'static { fn box_clone(&self) -> Box; - /// Map addr in the page table + /// Map `addr` in the page table /// Should set page flags here instead of in page_fault_handler - fn map(&self, pt: &mut PageTable, addr: VirtAddr); + fn map(&self, pt: &mut PageTable, addr: VirtAddr, attr: &MemoryAttr); - /// Map addr in the page table eagerly (i.e. no delay allocation) + /// Map `addr` in the page table eagerly (i.e. no delay allocation) /// Should set page flags here instead of in page_fault_handler - fn map_eager(&self, pt: &mut PageTable, addr: VirtAddr) { + fn map_eager(&self, pt: &mut PageTable, addr: VirtAddr, attr: &MemoryAttr) { // override this when pages are allocated lazily - self.map(pt, addr); + self.map(pt, addr, attr); } + + /// Unmap `addr` in the page table fn unmap(&self, pt: &mut PageTable, addr: VirtAddr); + + /// Handle page fault on `addr` + /// Return true if success, false if error fn page_fault_handler(&self, pt: &mut PageTable, addr: VirtAddr) -> bool; } diff --git a/crate/memory/src/memory_set/mod.rs b/crate/memory/src/memory_set/mod.rs index b4611fa..e141f94 100644 --- a/crate/memory/src/memory_set/mod.rs +++ b/crate/memory/src/memory_set/mod.rs @@ -18,6 +18,7 @@ pub mod handler; pub struct MemoryArea { start_addr: VirtAddr, end_addr: VirtAddr, + attr: MemoryAttr, handler: Box, name: &'static str, } @@ -55,14 +56,13 @@ impl MemoryArea { } /// Check the array is within the readable memory pub fn check_array(&self, ptr: *const S, count: usize) -> bool { - // FIXME: check readable ptr as usize >= self.start_addr && unsafe { ptr.offset(count as isize) as usize } <= self.end_addr } /// Check the array is within the writable memory pub fn check_mut_array(&self, ptr: *mut S, count: usize) -> bool { - // FIXME: check writable - ptr as usize >= self.start_addr && + !self.attr.readonly && + ptr as usize >= self.start_addr && unsafe { ptr.offset(count as isize) as usize } <= self.end_addr } /// Check the null-end C string is within the readable memory, and is valid. @@ -94,7 +94,7 @@ impl MemoryArea { */ fn map(&self, pt: &mut PageTable) { for page in Page::range_of(self.start_addr, self.end_addr) { - self.handler.map(pt, page.start_address()); + self.handler.map(pt, page.start_address(), &self.attr); } } /* @@ -104,7 +104,7 @@ impl MemoryArea { */ fn map_eager(&self, pt: &mut PageTable) { for page in Page::range_of(self.start_addr, self.end_addr) { - self.handler.map_eager(pt, page.start_address()); + self.handler.map_eager(pt, page.start_address(), &self.attr); } } /* @@ -169,14 +169,9 @@ impl MemoryAttr { self.mmio = value; self } - /* - ** @brief apply the memory attribute to a page table entry - ** @param entry: &mut impl Entry - ** the page table entry to apply the attribute - ** @retval none - */ + /// Apply the attributes to page table entry, then update it. + /// NOTE: You may need to set present manually. pub fn apply(&self, entry: &mut Entry) { - entry.set_present(true); entry.set_user(self.user); entry.set_writable(!self.readonly); entry.set_execute(self.execute); @@ -254,6 +249,7 @@ impl MemorySet { .find(|&addr| self.test_free_area(addr, addr + len)) .expect("failed to find free area ???") } + /// Test if [`start_addr`, `end_addr`) is a free area fn test_free_area(&self, start_addr: usize, end_addr: usize) -> bool { self.areas.iter() .find(|area| area.is_overlap_with(start_addr, end_addr)) @@ -264,10 +260,10 @@ impl MemorySet { ** @param area: MemoryArea the memory area to add ** @retval none */ - pub fn push(&mut self, start_addr: VirtAddr, end_addr: VirtAddr, handler: impl MemoryHandler, name: &'static str) { + pub fn push(&mut self, start_addr: VirtAddr, end_addr: VirtAddr, attr: MemoryAttr, handler: impl MemoryHandler, name: &'static str) { assert!(start_addr <= end_addr, "invalid memory area"); assert!(self.test_free_area(start_addr, end_addr), "memory area overlap"); - let area = MemoryArea { start_addr, end_addr, handler: Box::new(handler), name }; + let area = MemoryArea { start_addr, end_addr, attr, handler: Box::new(handler), name }; self.page_table.edit(|pt| area.map(pt)); self.areas.push(area); } diff --git a/kernel/src/arch/aarch64/memory.rs b/kernel/src/arch/aarch64/memory.rs index f4b7cc0..a0a2723 100644 --- a/kernel/src/arch/aarch64/memory.rs +++ b/kernel/src/arch/aarch64/memory.rs @@ -103,14 +103,14 @@ static mut KERNEL_MEMORY_SET: Option = None; /// remap kernel page table after all initialization. fn remap_the_kernel() { let mut ms = MemorySet::new_bare(); - ms.push(0, bootstacktop as usize, Linear::new(0, MemoryAttr::default()), "kstack"); - ms.push(stext as usize, etext as usize, Linear::new(0, MemoryAttr::default().execute().readonly()), "text"); - ms.push(sdata as usize, edata as usize, Linear::new(0, MemoryAttr::default()), "data"); - ms.push(srodata as usize, erodata as usize, Linear::new(0, MemoryAttr::default().readonly()), "rodata"); - ms.push(sbss as usize, ebss as usize, Linear::new(0, MemoryAttr::default()), "bss"); + ms.push(0, bootstacktop as usize, MemoryAttr::default(), Linear::new(0), "kstack"); + ms.push(stext as usize, etext as usize, MemoryAttr::default().execute().readonly(), Linear::new(0), "text"); + ms.push(sdata as usize, edata as usize, MemoryAttr::default(), Linear::new(0), "data"); + ms.push(srodata as usize, erodata as usize, MemoryAttr::default().readonly(), Linear::new(0), "rodata"); + ms.push(sbss as usize, ebss as usize, MemoryAttr::default(), Linear::new(0), "bss"); use super::board::{IO_REMAP_BASE, IO_REMAP_END}; - ms.push(IO_REMAP_BASE, IO_REMAP_END, Linear::new(0, MemoryAttr::default().mmio(MMIOType::Device as u8)), "io_remap"); + ms.push(IO_REMAP_BASE, IO_REMAP_END, MemoryAttr::default().mmio(MMIOType::Device as u8), Linear::new(0), "io_remap"); unsafe { ms.get_page_table_mut().activate_as_kernel() } unsafe { KERNEL_MEMORY_SET = Some(ms) } @@ -119,7 +119,7 @@ fn remap_the_kernel() { pub fn ioremap(start: usize, len: usize, name: &'static str) -> usize { if let Some(ms) = unsafe { KERNEL_MEMORY_SET.as_mut() } { - ms.push(start, start + len, Linear::new(0, MemoryAttr::default().mmio(MMIOType::NormalNonCacheable as u8)), name); + ms.push(start, start + len, MemoryAttr::default().mmio(MMIOType::NormalNonCacheable as u8), Linear::new(0), name); return start; } 0 diff --git a/kernel/src/arch/riscv32/memory.rs b/kernel/src/arch/riscv32/memory.rs index 7355ab8..5266987 100644 --- a/kernel/src/arch/riscv32/memory.rs +++ b/kernel/src/arch/riscv32/memory.rs @@ -74,12 +74,12 @@ fn init_frame_allocator() { fn remap_the_kernel(dtb: usize) { let offset = -(KERNEL_OFFSET as isize - MEMORY_OFFSET as isize); let mut ms = MemorySet::new_bare(); - ms.push(stext as usize, etext as usize, Linear::new(offset, MemoryAttr::default().execute().readonly()), "text"); - ms.push(sdata as usize, edata as usize, Linear::new(offset, MemoryAttr::default()), "data"); - ms.push(srodata as usize, erodata as usize, Linear::new(offset, MemoryAttr::default().readonly()), "rodata"); - ms.push(bootstack as usize, bootstacktop as usize, Linear::new(offset, MemoryAttr::default()), "stack"); - ms.push(sbss as usize, ebss as usize, Linear::new(offset, MemoryAttr::default()), "bss"); - ms.push(dtb, dtb + super::consts::MAX_DTB_SIZE, Linear::new(offset, MemoryAttr::default()), "dts"); + ms.push(stext as usize, etext as usize, MemoryAttr::default().execute().readonly(), Linear::new(offset), "text"); + ms.push(sdata as usize, edata as usize, MemoryAttr::default(), Linear::new(offset), "data"); + ms.push(srodata as usize, erodata as usize, MemoryAttr::default().readonly(), Linear::new(offset), "rodata"); + ms.push(bootstack as usize, bootstacktop as usize, MemoryAttr::default(), Linear::new(offset), "stack"); + ms.push(sbss as usize, ebss as usize, MemoryAttr::default(), Linear::new(offset), "bss"); + ms.push(dtb, dtb + super::consts::MAX_DTB_SIZE, MemoryAttr::default().readonly(), Linear::new(offset), "dts"); unsafe { ms.activate(); } unsafe { SATP = ms.token(); } mem::forget(ms); diff --git a/kernel/src/arch/x86_64/interrupt/trapframe.rs b/kernel/src/arch/x86_64/interrupt/trapframe.rs index f4531d9..926085f 100644 --- a/kernel/src/arch/x86_64/interrupt/trapframe.rs +++ b/kernel/src/arch/x86_64/interrupt/trapframe.rs @@ -48,7 +48,7 @@ impl TrapFrame { tf.rflags = 0x282; tf } - fn new_user_thread(entry_addr: usize, rsp: usize, is32: bool, tls: usize) -> Self { + fn new_user_thread(entry_addr: usize, rsp: usize, is32: bool) -> Self { use crate::arch::gdt; let mut tf = TrapFrame::default(); tf.cs = if is32 { gdt::UCODE32_SELECTOR.0 } else { gdt::UCODE_SELECTOR.0 } as usize; @@ -56,7 +56,6 @@ impl TrapFrame { tf.ss = if is32 { gdt::UDATA32_SELECTOR.0 } else { gdt::UDATA_SELECTOR.0 } as usize; tf.rsp = rsp; tf.rflags = 0x282; - tf.fsbase = tls; tf } pub fn is_user(&self) -> bool { @@ -167,11 +166,11 @@ impl Context { tf: TrapFrame::new_kernel_thread(entry, arg, kstack_top), }.push_at(kstack_top) } - pub unsafe fn new_user_thread(entry_addr: usize, ustack_top: usize, kstack_top: usize, is32: bool, cr3: usize, tls: usize) -> Self { + pub unsafe fn new_user_thread(entry_addr: usize, ustack_top: usize, kstack_top: usize, is32: bool, cr3: usize) -> Self { InitStack { context: ContextData::new(cr3), trapret: trap_ret as usize, - tf: TrapFrame::new_user_thread(entry_addr, ustack_top, is32, tls), + tf: TrapFrame::new_user_thread(entry_addr, ustack_top, is32), }.push_at(kstack_top) } pub unsafe fn new_fork(tf: &TrapFrame, kstack_top: usize, cr3: usize) -> Self { diff --git a/kernel/src/process/structs.rs b/kernel/src/process/structs.rs index 59ccc9e..2e40d1d 100644 --- a/kernel/src/process/structs.rs +++ b/kernel/src/process/structs.rs @@ -118,7 +118,7 @@ impl Thread { } // Make page table - let (mut memory_set, entry_addr, tls) = memory_set_from(&elf); + let (mut memory_set, entry_addr) = memory_set_from(&elf); // User stack use crate::consts::{USER_STACK_OFFSET, USER_STACK_SIZE, USER32_STACK_OFFSET}; @@ -128,7 +128,7 @@ impl Thread { true => (USER32_STACK_OFFSET, USER32_STACK_OFFSET + USER_STACK_SIZE), false => (USER_STACK_OFFSET, USER_STACK_OFFSET + USER_STACK_SIZE), }; - memory_set.push(ustack_buttom, ustack_top, ByFrame::new(MemoryAttr::default().user(), GlobalFrameAlloc), "user_stack"); + memory_set.push(ustack_buttom, ustack_top, MemoryAttr::default().user(), ByFrame::new(GlobalFrameAlloc), "user_stack"); ustack_top }; #[cfg(feature = "no_mmu")] @@ -147,7 +147,7 @@ impl Thread { // otherwise, check if elf is loaded from the beginning, then phdr can be inferred. map.insert(abi::AT_PHDR, elf_addr.virtual_addr() as usize + elf.header.pt2.ph_offset() as usize); } else { - debug!("new_user: no phdr found, tls might not work"); + warn!("new_user: no phdr found, tls might not work"); } map.insert(abi::AT_PHENT, elf.header.pt2.ph_entry_size() as usize); map.insert(abi::AT_PHNUM, elf.header.pt2.ph_count() as usize); @@ -170,7 +170,7 @@ impl Thread { Box::new(Thread { context: unsafe { Context::new_user_thread( - entry_addr, ustack_top, kstack.top(), is32, memory_set.token(), tls) + entry_addr, ustack_top, kstack.top(), is32, memory_set.token()) }, kstack, proc: Arc::new(Mutex::new(Process { @@ -232,12 +232,11 @@ impl Process { /// Generate a MemorySet according to the ELF file. -/// Also return the real entry point address and tls top addr. -fn memory_set_from(elf: &ElfFile<'_>) -> (MemorySet, usize, usize) { +/// Also return the real entry point address. +fn memory_set_from(elf: &ElfFile<'_>) -> (MemorySet, usize) { debug!("come in to memory_set_from"); let mut ms = MemorySet::new(); let mut entry = elf.header.pt2.entry_point() as usize; - let mut tls = 0; // [NoMMU] Get total memory size and alloc space let va_begin = elf.program_iter() @@ -255,21 +254,13 @@ fn memory_set_from(elf: &ElfFile<'_>) -> (MemorySet, usize, usize) { { entry += 0x40000000; } for ph in elf.program_iter() { - if ph.get_type() != Ok(Type::Load) && ph.get_type() != Ok(Type::Tls) { + if ph.get_type() != Ok(Type::Load) { continue; } - - let mut virt_addr = ph.virtual_addr() as usize; + let virt_addr = ph.virtual_addr() as usize; let offset = ph.offset() as usize; let file_size = ph.file_size() as usize; let mem_size = ph.mem_size() as usize; - let mut name = "load"; - - if ph.get_type() == Ok(Type::Tls) { - virt_addr = USER_TLS_OFFSET; - name = "tls"; - debug!("copying tls addr to {:X}", virt_addr); - } #[cfg(target_arch = "aarch64")] assert_eq!((virt_addr >> 48), 0xffff, "Segment Fault"); @@ -281,7 +272,7 @@ fn memory_set_from(elf: &ElfFile<'_>) -> (MemorySet, usize, usize) { info!("area @ {:?}, size = {:#x}", target.as_ptr(), mem_size); #[cfg(not(feature = "no_mmu"))] let target = { - ms.push(virt_addr, virt_addr + mem_size, ByFrame::new(memory_attr_from(ph.flags()), GlobalFrameAlloc), &name); + ms.push(virt_addr, virt_addr + mem_size, ph.flags().to_attr(), ByFrame::new(GlobalFrameAlloc), ""); unsafe { ::core::slice::from_raw_parts_mut(virt_addr as *mut u8, mem_size) } }; // Copy data @@ -293,40 +284,19 @@ fn memory_set_from(elf: &ElfFile<'_>) -> (MemorySet, usize, usize) { target[file_size..].iter_mut().for_each(|x| *x = 0); }); } - - if ph.get_type() == Ok(Type::Tls) { - virt_addr = USER_TMP_TLS_OFFSET; - tls = virt_addr + ph.mem_size() as usize; - debug!("copying tls addr to {:X}", virt_addr); - - // TODO: put this in a function - // Get target slice - #[cfg(feature = "no_mmu")] - let target = &mut target[virt_addr - va_begin..virt_addr - va_begin + mem_size]; - #[cfg(feature = "no_mmu")] - info!("area @ {:?}, size = {:#x}", target.as_ptr(), mem_size); - #[cfg(not(feature = "no_mmu"))] - let target = { - ms.push(virt_addr, virt_addr + mem_size, ByFrame::new(memory_attr_from(ph.flags()).writable(), GlobalFrameAlloc), "tmptls"); - unsafe { ::core::slice::from_raw_parts_mut(virt_addr as *mut u8, mem_size) } - }; - // Copy data - unsafe { - ms.with(|| { - if file_size != 0 { - target[..file_size].copy_from_slice(&elf.input[offset..offset + file_size]); - } - target[file_size..].iter_mut().for_each(|x| *x = 0); - }); - } - } } - (ms, entry, tls) + (ms, entry) } -fn memory_attr_from(elf_flags: Flags) -> MemoryAttr { - let mut flags = MemoryAttr::default().user(); - // TODO: handle readonly - if elf_flags.is_execute() { flags = flags.execute(); } - flags +trait ToMemoryAttr { + fn to_attr(&self) -> MemoryAttr; +} + +impl ToMemoryAttr for Flags { + fn to_attr(&self) -> MemoryAttr { + let mut flags = MemoryAttr::default().user(); + // FIXME: handle readonly + if self.is_execute() { flags = flags.execute(); } + flags + } } diff --git a/kernel/src/syscall/mem.rs b/kernel/src/syscall/mem.rs index 7ed32f4..8ca9c9c 100644 --- a/kernel/src/syscall/mem.rs +++ b/kernel/src/syscall/mem.rs @@ -26,8 +26,7 @@ pub fn sys_mmap(mut addr: usize, len: usize, prot: usize, flags: usize, fd: i32, if flags.contains(MmapFlags::SHARED) { return Err(SysError::EINVAL); } - let handler = Delay::new(prot_to_attr(prot), GlobalFrameAlloc); - proc.memory_set.push(addr, addr + len, handler, "mmap"); + proc.memory_set.push(addr, addr + len, prot.to_attr(), Delay::new(GlobalFrameAlloc), "mmap"); return Ok(addr as isize); } unimplemented!() @@ -38,19 +37,14 @@ pub fn sys_mprotect(addr: usize, len: usize, prot: usize) -> SysResult { info!("mprotect: addr={:#x}, size={:#x}, prot={:?}", addr, len, prot); let mut proc = process(); - let attr = prot_to_attr(prot); + let attr = prot.to_attr(); let memory_area = proc.memory_set.iter().find(|area| area.contains(addr)); if memory_area.is_some() { proc.memory_set.edit(|pt| { for page in Page::range_of(addr, addr + len) { let entry = pt.get_entry(page.start_address()).expect("failed to get entry"); - - // keep original presence - let orig_present = entry.present(); attr.apply(entry); - entry.set_present(orig_present); - entry.update(); } }); Ok(0) @@ -90,9 +84,11 @@ bitflags! { } } -fn prot_to_attr(prot: MmapProt) -> MemoryAttr { - let mut attr = MemoryAttr::default().user(); - if prot.contains(MmapProt::EXEC) { attr = attr.execute(); } - if !prot.contains(MmapProt::WRITE) { attr = attr.readonly(); } - attr -} \ No newline at end of file +impl MmapProt { + fn to_attr(self) -> MemoryAttr { + let mut attr = MemoryAttr::default().user(); + if self.contains(MmapProt::EXEC) { attr = attr.execute(); } + if !self.contains(MmapProt::WRITE) { attr = attr.readonly(); } + attr + } +}