Compare commits
No commits in common. 'master' and 'dev' have entirely different histories.
@ -1,8 +0,0 @@
|
||||
[workspace]
|
||||
members = [
|
||||
"zircon-loader",
|
||||
"zircon-object",
|
||||
"zircon-syscall",
|
||||
"kernel-hal-unix",
|
||||
"kernel-hal",
|
||||
]
|
||||
@ -1,19 +0,0 @@
|
||||
[package]
|
||||
name = "kernel-hal-unix"
|
||||
version = "0.1.0"
|
||||
authors = ["Runji Wang <wangrunji0408@163.com>"]
|
||||
edition = "2018"
|
||||
description = "Kernel HAL implementation on Linux and macOS."
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
log = "0.4"
|
||||
libc = "0.2"
|
||||
tempfile = "3"
|
||||
bitflags = "1.2"
|
||||
lazy_static = "1.4"
|
||||
kernel-hal = { path = "../kernel-hal" }
|
||||
async-std = "1.9"
|
||||
trapframe = "0.8.0"
|
||||
git-version = "0.3"
|
||||
@ -1,363 +0,0 @@
|
||||
#![feature(asm)]
|
||||
#![feature(linkage)]
|
||||
#![deny(warnings)]
|
||||
|
||||
extern crate alloc;
|
||||
#[macro_use]
|
||||
extern crate log;
|
||||
|
||||
use {
|
||||
alloc::boxed::Box,
|
||||
alloc::collections::VecDeque,
|
||||
async_std::task_local,
|
||||
core::time::Duration,
|
||||
core::{cell::Cell, future::Future, pin::Pin},
|
||||
git_version::git_version,
|
||||
lazy_static::*,
|
||||
std::fmt::{Debug, Formatter},
|
||||
std::fs::{File, OpenOptions},
|
||||
std::io::Error,
|
||||
std::os::unix::io::AsRawFd,
|
||||
std::sync::Mutex,
|
||||
std::time::SystemTime,
|
||||
tempfile::tempdir,
|
||||
};
|
||||
|
||||
use kernel_hal::vdso::*;
|
||||
pub use kernel_hal::{defs::*, *};
|
||||
pub use trapframe::syscall_fn_entry as syscall_entry;
|
||||
|
||||
#[repr(C)]
|
||||
pub struct Thread {
|
||||
thread: usize,
|
||||
}
|
||||
|
||||
impl Thread {
|
||||
#[export_name = "hal_thread_spawn"]
|
||||
pub fn spawn(
|
||||
future: Pin<Box<dyn Future<Output = ()> + Send + 'static>>,
|
||||
_vmtoken: usize,
|
||||
) -> Self {
|
||||
async_std::task::spawn(future);
|
||||
Thread { thread: 0 }
|
||||
}
|
||||
|
||||
#[export_name = "hal_thread_set_tid"]
|
||||
pub fn set_tid(tid: u64, pid: u64) {
|
||||
TID.with(|x| x.set(tid));
|
||||
PID.with(|x| x.set(pid));
|
||||
}
|
||||
|
||||
#[export_name = "hal_thread_get_tid"]
|
||||
pub fn get_tid() -> (u64, u64) {
|
||||
(TID.with(|x| x.get()), PID.with(|x| x.get()))
|
||||
}
|
||||
}
|
||||
|
||||
task_local! {
|
||||
static TID: Cell<u64> = Cell::new(0);
|
||||
static PID: Cell<u64> = Cell::new(0);
|
||||
}
|
||||
|
||||
/// Get current time.
|
||||
#[export_name = "hal_timer_now"]
|
||||
pub fn timer_now() -> Duration {
|
||||
SystemTime::now()
|
||||
.duration_since(SystemTime::UNIX_EPOCH)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
/// Initialize the HAL.
|
||||
///
|
||||
/// This function must be called at the beginning.
|
||||
pub fn init() {
|
||||
#[cfg(target_os = "macos")]
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
pub struct PhysFrame {
|
||||
paddr: PhysAddr,
|
||||
}
|
||||
|
||||
impl Debug for PhysFrame {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> core::result::Result<(), std::fmt::Error> {
|
||||
write!(f, "PhysFrame({:#x})", self.paddr)
|
||||
}
|
||||
}
|
||||
|
||||
const PMEM_SIZE: usize = 0x4000_0000; // 1GiB
|
||||
const PAGE_SIZE: usize = 0x1000;
|
||||
fn page_aligned(x: VirtAddr) -> bool {
|
||||
x % PAGE_SIZE == 0
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
static ref FRAME_FILE: File = create_pmem_file();
|
||||
}
|
||||
|
||||
fn create_pmem_file() -> File {
|
||||
let dir = tempdir().expect("failed to create pmem dir");
|
||||
let path = dir.path().join("pmem");
|
||||
|
||||
// workaround on macOS to avoid permission denied.
|
||||
// see https://jiege.ch/software/2020/02/07/macos-mmap-exec/ for analysis on this problem.
|
||||
#[cfg(target_os = "macos")]
|
||||
std::mem::forget(dir);
|
||||
|
||||
let file = OpenOptions::new()
|
||||
.read(true)
|
||||
.write(true)
|
||||
.create(true)
|
||||
.open(&path)
|
||||
.expect("failed to create pmem file");
|
||||
file.set_len(PMEM_SIZE as u64)
|
||||
.expect("failed to resize file");
|
||||
trace!("create pmem file: path={:?}, size={:#x}", path, PMEM_SIZE);
|
||||
let prot = libc::PROT_READ | libc::PROT_WRITE;
|
||||
mmap(file.as_raw_fd(), 0, PMEM_SIZE, phys_to_virt(0), prot);
|
||||
file
|
||||
}
|
||||
|
||||
/// Mmap frame file `fd` to `vaddr`.
|
||||
fn mmap(fd: libc::c_int, offset: usize, len: usize, vaddr: VirtAddr, prot: libc::c_int) {
|
||||
// workaround on macOS to write text section.
|
||||
#[cfg(target_os = "macos")]
|
||||
let prot = if prot & libc::PROT_EXEC != 0 {
|
||||
prot | libc::PROT_WRITE
|
||||
} else {
|
||||
prot
|
||||
};
|
||||
|
||||
let ret = unsafe {
|
||||
let flags = libc::MAP_SHARED | libc::MAP_FIXED;
|
||||
libc::mmap(vaddr as _, len, prot, flags, fd, offset as _)
|
||||
} as usize;
|
||||
trace!(
|
||||
"mmap file: fd={}, offset={:#x}, len={:#x}, vaddr={:#x}, prot={:#b}",
|
||||
fd,
|
||||
offset,
|
||||
len,
|
||||
vaddr,
|
||||
prot,
|
||||
);
|
||||
assert_eq!(ret, vaddr, "failed to mmap: {:?}", Error::last_os_error());
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
static ref AVAILABLE_FRAMES: Mutex<VecDeque<usize>> =
|
||||
Mutex::new((PAGE_SIZE..PMEM_SIZE).step_by(PAGE_SIZE).collect());
|
||||
}
|
||||
|
||||
impl PhysFrame {
|
||||
#[export_name = "hal_frame_alloc"]
|
||||
pub fn alloc() -> Option<Self> {
|
||||
let ret = AVAILABLE_FRAMES
|
||||
.lock()
|
||||
.unwrap()
|
||||
.pop_front()
|
||||
.map(|paddr| PhysFrame { paddr });
|
||||
trace!("frame alloc: {:?}", ret);
|
||||
ret
|
||||
}
|
||||
#[export_name = "hal_zero_frame_paddr"]
|
||||
pub fn zero_frame_addr() -> PhysAddr {
|
||||
0
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for PhysFrame {
|
||||
#[export_name = "hal_frame_dealloc"]
|
||||
fn drop(&mut self) {
|
||||
trace!("frame dealloc: {:?}", self);
|
||||
AVAILABLE_FRAMES.lock().unwrap().push_back(self.paddr);
|
||||
}
|
||||
}
|
||||
|
||||
fn phys_to_virt(paddr: PhysAddr) -> VirtAddr {
|
||||
/// Map physical memory from here.
|
||||
const PMEM_BASE: VirtAddr = 0x8_0000_0000;
|
||||
|
||||
PMEM_BASE + paddr
|
||||
}
|
||||
|
||||
/// Ensure physical memory are mmapped and accessible.
|
||||
fn ensure_mmap_pmem() {
|
||||
FRAME_FILE.as_raw_fd();
|
||||
}
|
||||
|
||||
/// Read physical memory from `paddr` to `buf`.
|
||||
#[export_name = "hal_pmem_read"]
|
||||
pub fn pmem_read(paddr: PhysAddr, buf: &mut [u8]) {
|
||||
trace!("pmem read: paddr={:#x}, len={:#x}", paddr, buf.len());
|
||||
assert!(paddr + buf.len() <= PMEM_SIZE);
|
||||
ensure_mmap_pmem();
|
||||
unsafe {
|
||||
(phys_to_virt(paddr) as *const u8).copy_to_nonoverlapping(buf.as_mut_ptr(), buf.len());
|
||||
}
|
||||
}
|
||||
|
||||
/// Write physical memory to `paddr` from `buf`.
|
||||
#[export_name = "hal_pmem_write"]
|
||||
pub fn pmem_write(paddr: PhysAddr, buf: &[u8]) {
|
||||
trace!("pmem write: paddr={:#x}, len={:#x}", paddr, buf.len());
|
||||
assert!(paddr + buf.len() <= PMEM_SIZE);
|
||||
ensure_mmap_pmem();
|
||||
unsafe {
|
||||
buf.as_ptr()
|
||||
.copy_to_nonoverlapping(phys_to_virt(paddr) as _, buf.len());
|
||||
}
|
||||
}
|
||||
|
||||
/// Zero physical memory at `[paddr, paddr + len)`
|
||||
#[export_name = "hal_pmem_zero"]
|
||||
pub fn pmem_zero(paddr: PhysAddr, len: usize) {
|
||||
trace!("pmem_zero: addr={:#x}, len={:#x}", paddr, len);
|
||||
assert!(paddr + len <= PMEM_SIZE);
|
||||
ensure_mmap_pmem();
|
||||
unsafe {
|
||||
core::ptr::write_bytes(phys_to_virt(paddr) as *mut u8, 0, len);
|
||||
}
|
||||
}
|
||||
|
||||
/// Copy content of `src` frame to `target` frame
|
||||
#[export_name = "hal_frame_copy"]
|
||||
pub fn frame_copy(src: PhysAddr, target: PhysAddr) {
|
||||
trace!("frame_copy: {:#x} <- {:#x}", target, src);
|
||||
assert!(src + PAGE_SIZE <= PMEM_SIZE && target + PAGE_SIZE <= PMEM_SIZE);
|
||||
ensure_mmap_pmem();
|
||||
unsafe {
|
||||
let buf = phys_to_virt(src) as *const u8;
|
||||
buf.copy_to_nonoverlapping(phys_to_virt(target) as _, PAGE_SIZE);
|
||||
}
|
||||
}
|
||||
|
||||
/// Flush the physical frame.
|
||||
#[export_name = "hal_frame_flush"]
|
||||
pub fn frame_flush(_target: PhysAddr) {
|
||||
// do nothing
|
||||
}
|
||||
|
||||
/// Page Table
|
||||
#[repr(C)]
|
||||
pub struct PageTable {
|
||||
table_phys: PhysAddr,
|
||||
}
|
||||
|
||||
impl PageTable {
|
||||
/// Create a new `PageTable`.
|
||||
#[allow(clippy::new_without_default)]
|
||||
#[export_name = "hal_pt_new"]
|
||||
pub fn new() -> Self {
|
||||
PageTable { table_phys: 0 }
|
||||
}
|
||||
}
|
||||
|
||||
impl PageTableTrait for PageTable {
|
||||
/// Map the page of `vaddr` to the frame of `paddr` with `flags`.
|
||||
#[export_name = "hal_pt_map"]
|
||||
fn map(&mut self, vaddr: VirtAddr, paddr: PhysAddr, flags: MMUFlags) -> Result<()> {
|
||||
debug_assert!(page_aligned(vaddr));
|
||||
debug_assert!(page_aligned(paddr));
|
||||
let prot = flags.to_mmap_prot();
|
||||
mmap(FRAME_FILE.as_raw_fd(), paddr, PAGE_SIZE, vaddr, prot);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Unmap the page of `vaddr`.
|
||||
#[export_name = "hal_pt_unmap"]
|
||||
fn unmap(&mut self, vaddr: VirtAddr) -> Result<()> {
|
||||
self.unmap_cont(vaddr, 1)
|
||||
}
|
||||
|
||||
/// Change the `flags` of the page of `vaddr`.
|
||||
#[export_name = "hal_pt_protect"]
|
||||
fn protect(&mut self, vaddr: VirtAddr, flags: MMUFlags) -> Result<()> {
|
||||
debug_assert!(page_aligned(vaddr));
|
||||
let prot = flags.to_mmap_prot();
|
||||
let ret = unsafe { libc::mprotect(vaddr as _, PAGE_SIZE, prot) };
|
||||
assert_eq!(ret, 0, "failed to mprotect: {:?}", Error::last_os_error());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Query the physical address which the page of `vaddr` maps to.
|
||||
#[export_name = "hal_pt_query"]
|
||||
fn query(&mut self, vaddr: VirtAddr) -> Result<PhysAddr> {
|
||||
debug_assert!(page_aligned(vaddr));
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
/// Get the physical address of root page table.
|
||||
#[export_name = "hal_pt_table_phys"]
|
||||
fn table_phys(&self) -> PhysAddr {
|
||||
self.table_phys
|
||||
}
|
||||
|
||||
#[export_name = "hal_pt_unmap_cont"]
|
||||
fn unmap_cont(&mut self, vaddr: VirtAddr, pages: usize) -> Result<()> {
|
||||
if pages == 0 {
|
||||
return Ok(());
|
||||
}
|
||||
debug_assert!(page_aligned(vaddr));
|
||||
let ret = unsafe { libc::munmap(vaddr as _, PAGE_SIZE * pages) };
|
||||
assert_eq!(ret, 0, "failed to munmap: {:?}", Error::last_os_error());
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
trait FlagsExt {
|
||||
fn to_mmap_prot(&self) -> libc::c_int;
|
||||
}
|
||||
|
||||
impl FlagsExt for MMUFlags {
|
||||
fn to_mmap_prot(&self) -> libc::c_int {
|
||||
let mut flags = 0;
|
||||
if self.contains(MMUFlags::READ) {
|
||||
flags |= libc::PROT_READ;
|
||||
}
|
||||
if self.contains(MMUFlags::WRITE) {
|
||||
flags |= libc::PROT_WRITE;
|
||||
}
|
||||
if self.contains(MMUFlags::EXECUTE) {
|
||||
flags |= libc::PROT_EXEC;
|
||||
}
|
||||
flags
|
||||
}
|
||||
}
|
||||
|
||||
#[export_name = "hal_context_run"]
|
||||
unsafe fn context_run(context: &mut UserContext) {
|
||||
context.run_fncall();
|
||||
}
|
||||
|
||||
#[export_name = "hal_vdso_constants"]
|
||||
pub fn vdso_constants() -> VdsoConstants {
|
||||
let tsc_frequency = 3000u16;
|
||||
let mut constants = VdsoConstants {
|
||||
max_num_cpus: 1,
|
||||
features: Features {
|
||||
cpu: 0,
|
||||
hw_breakpoint_count: 0,
|
||||
hw_watchpoint_count: 0,
|
||||
},
|
||||
dcache_line_size: 0,
|
||||
icache_line_size: 0,
|
||||
ticks_per_second: tsc_frequency as u64 * 1_000_000,
|
||||
ticks_to_mono_numerator: 1000,
|
||||
ticks_to_mono_denominator: tsc_frequency as u32,
|
||||
physmem: PMEM_SIZE as u64,
|
||||
version_string_len: 0,
|
||||
version_string: Default::default(),
|
||||
};
|
||||
constants.set_version_string(git_version!(
|
||||
prefix = "git-",
|
||||
args = ["--always", "--abbrev=40", "--dirty=-dirty"]
|
||||
));
|
||||
constants
|
||||
}
|
||||
|
||||
/// Output a char to console.
|
||||
#[export_name = "hal_serial_write"]
|
||||
pub fn serial_write(s: &str) {
|
||||
eprint!("{}", s);
|
||||
}
|
||||
@ -1,13 +0,0 @@
|
||||
[package]
|
||||
name = "kernel-hal"
|
||||
version = "0.1.0"
|
||||
authors = ["Runji Wang <wangrunji0408@163.com>"]
|
||||
edition = "2018"
|
||||
description = "Kernel HAL interface definations."
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
bitflags = "1.2"
|
||||
trapframe = "0.8.0"
|
||||
numeric-enum-macro = "0.2"
|
||||
@ -1,305 +0,0 @@
|
||||
use super::*;
|
||||
use crate::vdso::VdsoConstants;
|
||||
use alloc::boxed::Box;
|
||||
use alloc::vec::Vec;
|
||||
use core::future::Future;
|
||||
use core::pin::Pin;
|
||||
use core::time::Duration;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct HalError;
|
||||
/// The result type returned by HAL functions.
|
||||
pub type Result<T> = core::result::Result<T, HalError>;
|
||||
|
||||
#[repr(C)]
|
||||
pub struct Thread {
|
||||
id: usize,
|
||||
}
|
||||
|
||||
impl Thread {
|
||||
/// Spawn a new thread.
|
||||
#[linkage = "weak"]
|
||||
#[export_name = "hal_thread_spawn"]
|
||||
pub fn spawn(
|
||||
_future: Pin<Box<dyn Future<Output = ()> + Send + 'static>>,
|
||||
_vmtoken: usize,
|
||||
) -> Self {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
/// Set tid and pid of current task.
|
||||
#[linkage = "weak"]
|
||||
#[export_name = "hal_thread_set_tid"]
|
||||
pub fn set_tid(_tid: u64, _pid: u64) {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
/// Get tid and pid of current task.
|
||||
#[linkage = "weak"]
|
||||
#[export_name = "hal_thread_get_tid"]
|
||||
pub fn get_tid() -> (u64, u64) {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
|
||||
#[linkage = "weak"]
|
||||
#[export_name = "hal_timer_now"]
|
||||
pub fn timer_now() -> Duration {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
pub struct PhysFrame {
|
||||
paddr: PhysAddr,
|
||||
}
|
||||
|
||||
impl PhysFrame {
|
||||
#[linkage = "weak"]
|
||||
#[export_name = "hal_frame_alloc"]
|
||||
pub fn alloc() -> Option<Self> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
#[linkage = "weak"]
|
||||
#[export_name = "hal_frame_alloc_contiguous"]
|
||||
pub fn alloc_contiguous_base(_size: usize, _align_log2: usize) -> Option<PhysAddr> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
pub fn alloc_contiguous(size: usize, align_log2: usize) -> Vec<Self> {
|
||||
PhysFrame::alloc_contiguous_base(size, align_log2).map_or(Vec::new(), |base| {
|
||||
(0..size)
|
||||
.map(|i| PhysFrame {
|
||||
paddr: base + i * PAGE_SIZE,
|
||||
})
|
||||
.collect()
|
||||
})
|
||||
}
|
||||
|
||||
pub fn alloc_zeroed() -> Option<Self> {
|
||||
Self::alloc().map(|f| {
|
||||
pmem_zero(f.addr(), PAGE_SIZE);
|
||||
f
|
||||
})
|
||||
}
|
||||
|
||||
pub fn alloc_contiguous_zeroed(size: usize, align_log2: usize) -> Vec<Self> {
|
||||
PhysFrame::alloc_contiguous_base(size, align_log2).map_or(Vec::new(), |base| {
|
||||
pmem_zero(base, size * PAGE_SIZE);
|
||||
(0..size)
|
||||
.map(|i| PhysFrame {
|
||||
paddr: base + i * PAGE_SIZE,
|
||||
})
|
||||
.collect()
|
||||
})
|
||||
}
|
||||
|
||||
pub fn addr(&self) -> PhysAddr {
|
||||
self.paddr
|
||||
}
|
||||
|
||||
#[linkage = "weak"]
|
||||
#[export_name = "hal_zero_frame_paddr"]
|
||||
pub fn zero_frame_addr() -> PhysAddr {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for PhysFrame {
|
||||
#[linkage = "weak"]
|
||||
#[export_name = "hal_frame_dealloc"]
|
||||
fn drop(&mut self) {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
|
||||
/// Read physical memory from `paddr` to `buf`.
|
||||
#[linkage = "weak"]
|
||||
#[export_name = "hal_pmem_read"]
|
||||
pub fn pmem_read(_paddr: PhysAddr, _buf: &mut [u8]) {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
/// Write physical memory to `paddr` from `buf`.
|
||||
#[linkage = "weak"]
|
||||
#[export_name = "hal_pmem_write"]
|
||||
pub fn pmem_write(_paddr: PhysAddr, _buf: &[u8]) {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
/// Zero physical memory at `[paddr, paddr + len)`
|
||||
#[linkage = "weak"]
|
||||
#[export_name = "hal_pmem_zero"]
|
||||
pub fn pmem_zero(_paddr: PhysAddr, _len: usize) {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
/// Copy content of `src` frame to `target` frame.
|
||||
#[linkage = "weak"]
|
||||
#[export_name = "hal_frame_copy"]
|
||||
pub fn frame_copy(_src: PhysAddr, _target: PhysAddr) {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
/// Flush the physical frame.
|
||||
#[linkage = "weak"]
|
||||
#[export_name = "hal_frame_flush"]
|
||||
pub fn frame_flush(_target: PhysAddr) {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
pub trait PageTableTrait: Sync + Send {
|
||||
/// Map the page of `vaddr` to the frame of `paddr` with `flags`.
|
||||
fn map(&mut self, _vaddr: VirtAddr, _paddr: PhysAddr, _flags: MMUFlags) -> Result<()>;
|
||||
|
||||
/// Unmap the page of `vaddr`.
|
||||
fn unmap(&mut self, _vaddr: VirtAddr) -> Result<()>;
|
||||
|
||||
/// Change the `flags` of the page of `vaddr`.
|
||||
fn protect(&mut self, _vaddr: VirtAddr, _flags: MMUFlags) -> Result<()>;
|
||||
|
||||
/// Query the physical address which the page of `vaddr` maps to.
|
||||
fn query(&mut self, _vaddr: VirtAddr) -> Result<PhysAddr>;
|
||||
|
||||
/// Get the physical address of root page table.
|
||||
fn table_phys(&self) -> PhysAddr;
|
||||
|
||||
#[cfg(target_arch = "riscv64")]
|
||||
/// Activate this page table
|
||||
fn activate(&self);
|
||||
|
||||
fn map_many(
|
||||
&mut self,
|
||||
mut vaddr: VirtAddr,
|
||||
paddrs: &[PhysAddr],
|
||||
flags: MMUFlags,
|
||||
) -> Result<()> {
|
||||
for &paddr in paddrs {
|
||||
self.map(vaddr, paddr, flags)?;
|
||||
vaddr += PAGE_SIZE;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn map_cont(
|
||||
&mut self,
|
||||
mut vaddr: VirtAddr,
|
||||
paddr: PhysAddr,
|
||||
pages: usize,
|
||||
flags: MMUFlags,
|
||||
) -> Result<()> {
|
||||
for i in 0..pages {
|
||||
let paddr = paddr + i * PAGE_SIZE;
|
||||
self.map(vaddr, paddr, flags)?;
|
||||
vaddr += PAGE_SIZE;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn unmap_cont(&mut self, vaddr: VirtAddr, pages: usize) -> Result<()> {
|
||||
for i in 0..pages {
|
||||
self.unmap(vaddr + i * PAGE_SIZE)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Page Table
|
||||
#[repr(C)]
|
||||
pub struct PageTable {
|
||||
table_phys: PhysAddr,
|
||||
}
|
||||
|
||||
impl PageTable {
|
||||
/// Get current page table
|
||||
#[linkage = "weak"]
|
||||
#[export_name = "hal_pt_current"]
|
||||
pub fn current() -> Self {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
/// Create a new `PageTable`.
|
||||
#[allow(clippy::new_without_default)]
|
||||
#[linkage = "weak"]
|
||||
#[export_name = "hal_pt_new"]
|
||||
pub fn new() -> Self {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
|
||||
impl PageTableTrait for PageTable {
|
||||
/// Map the page of `vaddr` to the frame of `paddr` with `flags`.
|
||||
#[linkage = "weak"]
|
||||
#[export_name = "hal_pt_map"]
|
||||
fn map(&mut self, _vaddr: VirtAddr, _paddr: PhysAddr, _flags: MMUFlags) -> Result<()> {
|
||||
unimplemented!()
|
||||
}
|
||||
/// Unmap the page of `vaddr`.
|
||||
#[linkage = "weak"]
|
||||
#[export_name = "hal_pt_unmap"]
|
||||
fn unmap(&mut self, _vaddr: VirtAddr) -> Result<()> {
|
||||
unimplemented!()
|
||||
}
|
||||
/// Change the `flags` of the page of `vaddr`.
|
||||
#[linkage = "weak"]
|
||||
#[export_name = "hal_pt_protect"]
|
||||
fn protect(&mut self, _vaddr: VirtAddr, _flags: MMUFlags) -> Result<()> {
|
||||
unimplemented!()
|
||||
}
|
||||
/// Query the physical address which the page of `vaddr` maps to.
|
||||
#[linkage = "weak"]
|
||||
#[export_name = "hal_pt_query"]
|
||||
fn query(&mut self, _vaddr: VirtAddr) -> Result<PhysAddr> {
|
||||
unimplemented!()
|
||||
}
|
||||
/// Get the physical address of root page table.
|
||||
#[linkage = "weak"]
|
||||
#[export_name = "hal_pt_table_phys"]
|
||||
fn table_phys(&self) -> PhysAddr {
|
||||
self.table_phys
|
||||
}
|
||||
|
||||
/// Activate this page table
|
||||
#[cfg(target_arch = "riscv64")]
|
||||
#[linkage = "weak"]
|
||||
#[export_name = "hal_pt_activate"]
|
||||
fn activate(&self) {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
#[linkage = "weak"]
|
||||
#[export_name = "hal_pt_unmap_cont"]
|
||||
fn unmap_cont(&mut self, vaddr: VirtAddr, pages: usize) -> Result<()> {
|
||||
for i in 0..pages {
|
||||
self.unmap(vaddr + i * PAGE_SIZE)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[linkage = "weak"]
|
||||
#[export_name = "hal_context_run"]
|
||||
pub fn context_run(_context: &mut UserContext) {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
/// Get platform specific information.
|
||||
#[linkage = "weak"]
|
||||
#[export_name = "hal_vdso_constants"]
|
||||
pub fn vdso_constants() -> VdsoConstants {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
/// Read a string from console.
|
||||
#[linkage = "weak"]
|
||||
#[export_name = "hal_serial_read"]
|
||||
pub fn serial_read(_buf: &mut [u8]) -> usize {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
/// Output a string to console.
|
||||
#[linkage = "weak"]
|
||||
#[export_name = "hal_serial_write"]
|
||||
pub fn serial_write(_s: &str) {
|
||||
unimplemented!()
|
||||
}
|
||||
@ -1,56 +0,0 @@
|
||||
//! Hardware Abstraction Layer
|
||||
|
||||
#![no_std]
|
||||
#![feature(linkage)]
|
||||
#![deny(warnings)]
|
||||
|
||||
extern crate alloc;
|
||||
|
||||
pub mod defs {
|
||||
use bitflags::bitflags;
|
||||
use numeric_enum_macro::numeric_enum;
|
||||
|
||||
bitflags! {
|
||||
pub struct MMUFlags: usize {
|
||||
#[allow(clippy::identity_op)]
|
||||
const CACHE_1 = 1 << 0;
|
||||
const CACHE_2 = 1 << 1;
|
||||
const READ = 1 << 2;
|
||||
const WRITE = 1 << 3;
|
||||
const EXECUTE = 1 << 4;
|
||||
const USER = 1 << 5;
|
||||
const RXW = Self::READ.bits | Self::WRITE.bits | Self::EXECUTE.bits;
|
||||
}
|
||||
}
|
||||
numeric_enum! {
|
||||
#[repr(u32)]
|
||||
#[derive(Debug, PartialEq, Clone, Copy)]
|
||||
pub enum CachePolicy {
|
||||
Cached = 0,
|
||||
Uncached = 1,
|
||||
UncachedDevice = 2,
|
||||
WriteCombining = 3,
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for CachePolicy {
|
||||
fn default() -> Self {
|
||||
CachePolicy::Cached
|
||||
}
|
||||
}
|
||||
|
||||
pub const CACHE_POLICY_MASK: u32 = 3;
|
||||
|
||||
pub type PhysAddr = usize;
|
||||
pub type VirtAddr = usize;
|
||||
pub type DevVAddr = usize;
|
||||
pub const PAGE_SIZE: usize = 0x1000;
|
||||
}
|
||||
|
||||
mod dummy;
|
||||
pub mod user;
|
||||
pub mod vdso;
|
||||
|
||||
pub use self::defs::*;
|
||||
pub use self::dummy::*;
|
||||
pub use trapframe::{GeneralRegs, UserContext};
|
||||
@ -1,197 +0,0 @@
|
||||
use alloc::string::String;
|
||||
use alloc::vec::Vec;
|
||||
use core::fmt::{Debug, Formatter};
|
||||
use core::marker::PhantomData;
|
||||
|
||||
#[repr(C)]
|
||||
pub struct UserPtr<T, P: Policy> {
|
||||
ptr: *mut T,
|
||||
mark: PhantomData<P>,
|
||||
}
|
||||
|
||||
pub trait Policy {}
|
||||
pub trait Read: Policy {}
|
||||
pub trait Write: Policy {}
|
||||
pub enum In {}
|
||||
pub enum Out {}
|
||||
pub enum InOut {}
|
||||
|
||||
impl Policy for In {}
|
||||
impl Policy for Out {}
|
||||
impl Policy for InOut {}
|
||||
impl Read for In {}
|
||||
impl Write for Out {}
|
||||
impl Read for InOut {}
|
||||
impl Write for InOut {}
|
||||
|
||||
pub type UserInPtr<T> = UserPtr<T, In>;
|
||||
pub type UserOutPtr<T> = UserPtr<T, Out>;
|
||||
pub type UserInOutPtr<T> = UserPtr<T, InOut>;
|
||||
|
||||
type Result<T> = core::result::Result<T, Error>;
|
||||
|
||||
/// The error type which is returned from user pointer.
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
||||
pub enum Error {
|
||||
InvalidUtf8,
|
||||
InvalidPointer,
|
||||
BufferTooSmall,
|
||||
InvalidLength,
|
||||
InvalidVectorAddress,
|
||||
}
|
||||
|
||||
impl<T, P: Policy> Debug for UserPtr<T, P> {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
|
||||
write!(f, "{:?}", self.ptr)
|
||||
}
|
||||
}
|
||||
|
||||
// FIXME: this is a workaround for `clear_child_tid`.
|
||||
unsafe impl<T, P: Policy> Send for UserPtr<T, P> {}
|
||||
unsafe impl<T, P: Policy> Sync for UserPtr<T, P> {}
|
||||
|
||||
impl<T, P: Policy> From<usize> for UserPtr<T, P> {
|
||||
fn from(x: usize) -> Self {
|
||||
UserPtr {
|
||||
ptr: x as _,
|
||||
mark: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, P: Policy> UserPtr<T, P> {
|
||||
pub fn from_addr_size(addr: usize, size: usize) -> Result<Self> {
|
||||
if size < core::mem::size_of::<T>() {
|
||||
return Err(Error::BufferTooSmall);
|
||||
}
|
||||
Ok(Self::from(addr))
|
||||
}
|
||||
|
||||
pub fn is_null(&self) -> bool {
|
||||
self.ptr.is_null()
|
||||
}
|
||||
|
||||
pub fn add(&self, count: usize) -> Self {
|
||||
UserPtr {
|
||||
ptr: unsafe { self.ptr.add(count) },
|
||||
mark: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn as_ptr(&self) -> *mut T {
|
||||
self.ptr
|
||||
}
|
||||
|
||||
pub fn check(&self) -> Result<()> {
|
||||
if self.ptr.is_null() {
|
||||
return Err(Error::InvalidPointer);
|
||||
}
|
||||
if (self.ptr as usize) % core::mem::align_of::<T>() != 0 {
|
||||
return Err(Error::InvalidPointer);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, P: Read> UserPtr<T, P> {
|
||||
pub fn as_ref(&self) -> Result<&'static T> {
|
||||
Ok(unsafe { &*self.ptr })
|
||||
}
|
||||
|
||||
pub fn read(&self) -> Result<T> {
|
||||
// TODO: check ptr and return err
|
||||
self.check()?;
|
||||
Ok(unsafe { self.ptr.read() })
|
||||
}
|
||||
|
||||
pub fn read_if_not_null(&self) -> Result<Option<T>> {
|
||||
if self.ptr.is_null() {
|
||||
return Ok(None);
|
||||
}
|
||||
let value = self.read()?;
|
||||
Ok(Some(value))
|
||||
}
|
||||
|
||||
pub fn read_array(&self, len: usize) -> Result<Vec<T>> {
|
||||
if len == 0 {
|
||||
return Ok(Vec::default());
|
||||
}
|
||||
self.check()?;
|
||||
let mut ret = Vec::<T>::with_capacity(len);
|
||||
unsafe {
|
||||
ret.set_len(len);
|
||||
ret.as_mut_ptr().copy_from_nonoverlapping(self.ptr, len);
|
||||
}
|
||||
Ok(ret)
|
||||
}
|
||||
}
|
||||
|
||||
impl<P: Read> UserPtr<u8, P> {
|
||||
pub fn read_string(&self, len: usize) -> Result<String> {
|
||||
self.check()?;
|
||||
let src = unsafe { core::slice::from_raw_parts(self.ptr, len) };
|
||||
let s = core::str::from_utf8(src).map_err(|_| Error::InvalidUtf8)?;
|
||||
Ok(String::from(s))
|
||||
}
|
||||
|
||||
pub fn read_cstring(&self) -> Result<String> {
|
||||
self.check()?;
|
||||
let len = unsafe { (0usize..).find(|&i| *self.ptr.add(i) == 0).unwrap() };
|
||||
self.read_string(len)
|
||||
}
|
||||
}
|
||||
|
||||
impl<P: Read> UserPtr<UserPtr<u8, P>, P> {
|
||||
pub fn read_cstring_array(&self) -> Result<Vec<String>> {
|
||||
self.check()?;
|
||||
let len = unsafe {
|
||||
(0usize..)
|
||||
.find(|&i| self.ptr.add(i).read().is_null())
|
||||
.unwrap()
|
||||
};
|
||||
self.read_array(len)?
|
||||
.into_iter()
|
||||
.map(|ptr| ptr.read_cstring())
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, P: Write> UserPtr<T, P> {
|
||||
pub fn write(&mut self, value: T) -> Result<()> {
|
||||
self.check()?;
|
||||
unsafe {
|
||||
self.ptr.write(value);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn write_if_not_null(&mut self, value: T) -> Result<()> {
|
||||
if self.ptr.is_null() {
|
||||
return Ok(());
|
||||
}
|
||||
self.write(value)
|
||||
}
|
||||
|
||||
pub fn write_array(&mut self, values: &[T]) -> Result<()> {
|
||||
if values.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
self.check()?;
|
||||
unsafe {
|
||||
self.ptr
|
||||
.copy_from_nonoverlapping(values.as_ptr(), values.len());
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<P: Write> UserPtr<u8, P> {
|
||||
pub fn write_cstring(&mut self, s: &str) -> Result<()> {
|
||||
let bytes = s.as_bytes();
|
||||
self.write_array(bytes)?;
|
||||
unsafe {
|
||||
self.ptr.add(bytes.len()).write(0);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@ -1,71 +0,0 @@
|
||||
use core::fmt::{Debug, Error, Formatter};
|
||||
|
||||
/// This struct contains constants that are initialized by the kernel
|
||||
/// once at boot time. From the vDSO code's perspective, they are
|
||||
/// read-only data that can never change. Hence, no synchronization is
|
||||
/// required to read them.
|
||||
#[repr(C)]
|
||||
#[derive(Debug)]
|
||||
pub struct VdsoConstants {
|
||||
/// Maximum number of CPUs that might be online during the lifetime
|
||||
/// of the booted system.
|
||||
pub max_num_cpus: u32,
|
||||
/// Bit map indicating features.
|
||||
pub features: Features,
|
||||
/// Number of bytes in a data cache line.
|
||||
pub dcache_line_size: u32,
|
||||
/// Number of bytes in an instruction cache line.
|
||||
pub icache_line_size: u32,
|
||||
/// Conversion factor for zx_ticks_get return values to seconds.
|
||||
pub ticks_per_second: u64,
|
||||
/// Ratio which relates ticks (zx_ticks_get) to clock monotonic.
|
||||
///
|
||||
/// Specifically: ClockMono(ticks) = (ticks * N) / D
|
||||
pub ticks_to_mono_numerator: u32,
|
||||
pub ticks_to_mono_denominator: u32,
|
||||
/// Total amount of physical memory in the system, in bytes.
|
||||
pub physmem: u64,
|
||||
/// Actual length of `version_string`, not including the NUL terminator.
|
||||
pub version_string_len: u64,
|
||||
/// A NUL-terminated UTF-8 string returned by `zx_system_get_version_string`.
|
||||
pub version_string: VersionString,
|
||||
}
|
||||
|
||||
/// Bit map indicating features.
|
||||
///
|
||||
/// For specific feature bits, see zircon/features.h.
|
||||
#[repr(C)]
|
||||
#[derive(Debug)]
|
||||
pub struct Features {
|
||||
pub cpu: u32,
|
||||
/// Total amount of debug registers available in the system.
|
||||
pub hw_breakpoint_count: u32,
|
||||
pub hw_watchpoint_count: u32,
|
||||
}
|
||||
|
||||
impl VdsoConstants {
|
||||
/// Set version string.
|
||||
pub fn set_version_string(&mut self, s: &str) {
|
||||
let len = s.len().min(64);
|
||||
self.version_string_len = len as u64;
|
||||
self.version_string.0[..len].copy_from_slice(s.as_bytes());
|
||||
}
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
pub struct VersionString([u8; 64]);
|
||||
|
||||
impl Default for VersionString {
|
||||
fn default() -> Self {
|
||||
VersionString([0; 64])
|
||||
}
|
||||
}
|
||||
|
||||
impl Debug for VersionString {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> {
|
||||
for &c in self.0.iter().take_while(|&&c| c != 0) {
|
||||
write!(f, "{}", c as char)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -1,23 +0,0 @@
|
||||
[package]
|
||||
name = "zircon-loader"
|
||||
version = "0.1.0"
|
||||
authors = ["Runji Wang <wangrunji0408@163.com>"]
|
||||
edition = "2018"
|
||||
description = "Zircon user program (userboot) loader"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
log = "0.4"
|
||||
xmas-elf = "0.7"
|
||||
zircon-object = { path = "../zircon-object" }
|
||||
zircon-syscall = { path = "../zircon-syscall" }
|
||||
kernel-hal = { path = "../kernel-hal" }
|
||||
env_logger = { version = "0.8", optional = true }
|
||||
structopt = { version = "0.3", default-features = false, optional = true }
|
||||
kernel-hal-unix = { path = "../kernel-hal-unix" }
|
||||
async-std = { version = "1.9", features = ["attributes"], optional = true }
|
||||
|
||||
[features]
|
||||
default = ["std"]
|
||||
std = ["env_logger", "structopt", "async-std"]
|
||||
@ -1,54 +0,0 @@
|
||||
use {
|
||||
alloc::sync::Arc,
|
||||
core::mem::size_of,
|
||||
zircon_object::{object::KernelObject, vm::*},
|
||||
};
|
||||
|
||||
pub fn create_kcounter_vmo() -> (Arc<VmObject>, Arc<VmObject>) {
|
||||
const HEADER_SIZE: usize = size_of::<KCounterVmoHeader>();
|
||||
let counter_name_vmo = VmObject::new_paged(1);
|
||||
let header = KCounterVmoHeader {
|
||||
magic: KCOUNTER_MAGIC,
|
||||
max_cpu: 1,
|
||||
counter_table_size: 0,
|
||||
};
|
||||
let serde_header: [u8; HEADER_SIZE] = unsafe { core::mem::transmute(header) };
|
||||
counter_name_vmo.write(0, &serde_header).unwrap();
|
||||
counter_name_vmo.set_name("counters/desc");
|
||||
|
||||
let kcounters_vmo = VmObject::new_paged(1);
|
||||
kcounters_vmo.set_name("counters/arena");
|
||||
(counter_name_vmo, kcounters_vmo)
|
||||
}
|
||||
|
||||
// #[repr(C)]
|
||||
// struct KCounterDescItem {
|
||||
// name: [u8; 56],
|
||||
// type_: KCounterType,
|
||||
// }
|
||||
|
||||
// #[repr(u64)]
|
||||
// enum KCounterType {
|
||||
// Sum = 1,
|
||||
// }
|
||||
|
||||
// impl From<&KCounterDescriptor> for KCounterDescItem {
|
||||
// fn from(desc: &KCounterDescriptor) -> Self {
|
||||
// let mut name = [0u8; 56];
|
||||
// let length = desc.name.len().min(56);
|
||||
// name[..length].copy_from_slice(&desc.name.as_bytes()[..length]);
|
||||
// KCounterDescItem {
|
||||
// name,
|
||||
// type_: KCounterType::Sum,
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
|
||||
#[repr(C)]
|
||||
struct KCounterVmoHeader {
|
||||
magic: u64,
|
||||
max_cpu: u64,
|
||||
counter_table_size: usize,
|
||||
}
|
||||
|
||||
const KCOUNTER_MAGIC: u64 = 1_547_273_975;
|
||||
@ -1,221 +0,0 @@
|
||||
#![no_std]
|
||||
#![feature(asm)]
|
||||
#![deny(warnings, unused_must_use)]
|
||||
|
||||
#[macro_use]
|
||||
extern crate alloc;
|
||||
#[macro_use]
|
||||
extern crate log;
|
||||
|
||||
use {
|
||||
alloc::{boxed::Box, sync::Arc, vec::Vec},
|
||||
core::{future::Future, pin::Pin},
|
||||
kernel_hal::MMUFlags,
|
||||
xmas_elf::ElfFile,
|
||||
zircon_object::{dev::*, ipc::*, object::*, task::*, util::elf_loader::*, vm::*},
|
||||
zircon_syscall::Syscall,
|
||||
};
|
||||
|
||||
mod kcounter;
|
||||
|
||||
// These describe userboot itself
|
||||
const K_PROC_SELF: usize = 0;
|
||||
const K_VMARROOT_SELF: usize = 1;
|
||||
// Essential job and resource handles
|
||||
const K_ROOTJOB: usize = 2;
|
||||
const K_ROOTRESOURCE: usize = 3;
|
||||
// Essential VMO handles
|
||||
const K_ZBI: usize = 4;
|
||||
const K_FIRSTVDSO: usize = 5;
|
||||
const K_CRASHLOG: usize = 8;
|
||||
const K_COUNTERNAMES: usize = 9;
|
||||
const K_COUNTERS: usize = 10;
|
||||
const K_FISTINSTRUMENTATIONDATA: usize = 11;
|
||||
const K_HANDLECOUNT: usize = 15;
|
||||
|
||||
/// Program images to run.
|
||||
pub struct Images<T: AsRef<[u8]>> {
|
||||
pub userboot: T,
|
||||
pub vdso: T,
|
||||
pub zbi: T,
|
||||
}
|
||||
|
||||
pub fn run_userboot(images: &Images<impl AsRef<[u8]>>, cmdline: &str) -> Arc<Process> {
|
||||
let job = Job::root();
|
||||
let proc = Process::create(&job, "userboot").unwrap();
|
||||
let thread = Thread::create(&proc, "userboot").unwrap();
|
||||
let resource = Resource::create(
|
||||
"root",
|
||||
ResourceKind::ROOT,
|
||||
0,
|
||||
0x1_0000_0000,
|
||||
ResourceFlags::empty(),
|
||||
);
|
||||
let vmar = proc.vmar();
|
||||
|
||||
// userboot
|
||||
let (entry, userboot_size) = {
|
||||
let elf = ElfFile::new(images.userboot.as_ref()).unwrap();
|
||||
let size = elf.load_segment_size();
|
||||
let vmar = vmar
|
||||
.allocate(None, size, VmarFlags::CAN_MAP_RXW, PAGE_SIZE)
|
||||
.unwrap();
|
||||
vmar.load_from_elf(&elf).unwrap();
|
||||
(vmar.addr() + elf.header.pt2.entry_point() as usize, size)
|
||||
};
|
||||
|
||||
// vdso
|
||||
let vdso_vmo = {
|
||||
let elf = ElfFile::new(images.vdso.as_ref()).unwrap();
|
||||
let vdso_vmo = VmObject::new_paged(images.vdso.as_ref().len() / PAGE_SIZE + 1);
|
||||
vdso_vmo.write(0, images.vdso.as_ref()).unwrap();
|
||||
let size = elf.load_segment_size();
|
||||
let vmar = vmar
|
||||
.allocate_at(
|
||||
userboot_size,
|
||||
size,
|
||||
VmarFlags::CAN_MAP_RXW | VmarFlags::SPECIFIC,
|
||||
PAGE_SIZE,
|
||||
)
|
||||
.unwrap();
|
||||
vmar.map_from_elf(&elf, vdso_vmo.clone()).unwrap();
|
||||
let offset = elf
|
||||
.get_symbol_address("zcore_syscall_entry")
|
||||
.expect("failed to locate syscall entry") as usize;
|
||||
let syscall_entry = &(kernel_hal_unix::syscall_entry as usize).to_ne_bytes();
|
||||
// fill syscall entry x3
|
||||
vdso_vmo.write(offset, syscall_entry).unwrap();
|
||||
vdso_vmo.write(offset + 8, syscall_entry).unwrap();
|
||||
vdso_vmo.write(offset + 16, syscall_entry).unwrap();
|
||||
vdso_vmo
|
||||
};
|
||||
|
||||
// zbi
|
||||
let zbi_vmo = {
|
||||
let vmo = VmObject::new_paged(images.zbi.as_ref().len() / PAGE_SIZE + 1);
|
||||
vmo.write(0, images.zbi.as_ref()).unwrap();
|
||||
vmo.set_name("zbi");
|
||||
vmo
|
||||
};
|
||||
|
||||
// stack
|
||||
const STACK_PAGES: usize = 8;
|
||||
let stack_vmo = VmObject::new_paged(STACK_PAGES);
|
||||
let flags = MMUFlags::READ | MMUFlags::WRITE | MMUFlags::USER;
|
||||
let stack_bottom = vmar
|
||||
.map(None, stack_vmo.clone(), 0, stack_vmo.len(), flags)
|
||||
.unwrap();
|
||||
// WARN: align stack to 16B, then emulate a 'call' (push rip)
|
||||
let sp = stack_bottom + stack_vmo.len() - 8;
|
||||
|
||||
// channel
|
||||
let (user_channel, kernel_channel) = Channel::create();
|
||||
let handle = Handle::new(user_channel, Rights::DEFAULT_CHANNEL);
|
||||
|
||||
let mut handles = vec![Handle::new(proc.clone(), Rights::empty()); K_HANDLECOUNT];
|
||||
handles[K_PROC_SELF] = Handle::new(proc.clone(), Rights::DEFAULT_PROCESS);
|
||||
handles[K_VMARROOT_SELF] = Handle::new(proc.vmar(), Rights::DEFAULT_VMAR | Rights::IO);
|
||||
handles[K_ROOTJOB] = Handle::new(job, Rights::DEFAULT_JOB);
|
||||
handles[K_ROOTRESOURCE] = Handle::new(resource, Rights::DEFAULT_RESOURCE);
|
||||
handles[K_ZBI] = Handle::new(zbi_vmo, Rights::DEFAULT_VMO);
|
||||
// set up handles[K_FIRSTVDSO..K_LASTVDSO + 1]
|
||||
const VDSO_DATA_CONSTANTS: usize = 0x4a50;
|
||||
const VDSO_DATA_CONSTANTS_SIZE: usize = 0x78;
|
||||
let constants: [u8; VDSO_DATA_CONSTANTS_SIZE] =
|
||||
unsafe { core::mem::transmute(kernel_hal::vdso_constants()) };
|
||||
vdso_vmo.write(VDSO_DATA_CONSTANTS, &constants).unwrap();
|
||||
vdso_vmo.set_name("vdso/full");
|
||||
let vdso_test1 = vdso_vmo.create_child(false, 0, vdso_vmo.len()).unwrap();
|
||||
vdso_test1.set_name("vdso/test1");
|
||||
let vdso_test2 = vdso_vmo.create_child(false, 0, vdso_vmo.len()).unwrap();
|
||||
vdso_test2.set_name("vdso/test2");
|
||||
handles[K_FIRSTVDSO] = Handle::new(vdso_vmo, Rights::DEFAULT_VMO | Rights::EXECUTE);
|
||||
handles[K_FIRSTVDSO + 1] = Handle::new(vdso_test1, Rights::DEFAULT_VMO | Rights::EXECUTE);
|
||||
handles[K_FIRSTVDSO + 2] = Handle::new(vdso_test2, Rights::DEFAULT_VMO | Rights::EXECUTE);
|
||||
// TODO: use correct CrashLogVmo handle
|
||||
let crash_log_vmo = VmObject::new_paged(1);
|
||||
crash_log_vmo.set_name("crashlog");
|
||||
handles[K_CRASHLOG] = Handle::new(crash_log_vmo, Rights::DEFAULT_VMO);
|
||||
let (counter_name_vmo, kcounters_vmo) = kcounter::create_kcounter_vmo();
|
||||
handles[K_COUNTERNAMES] = Handle::new(counter_name_vmo, Rights::DEFAULT_VMO);
|
||||
handles[K_COUNTERS] = Handle::new(kcounters_vmo, Rights::DEFAULT_VMO);
|
||||
// TODO: use correct Instrumentation data handle
|
||||
let instrumentation_data_vmo = VmObject::new_paged(0);
|
||||
instrumentation_data_vmo.set_name("UNIMPLEMENTED_VMO");
|
||||
handles[K_FISTINSTRUMENTATIONDATA] =
|
||||
Handle::new(instrumentation_data_vmo.clone(), Rights::DEFAULT_VMO);
|
||||
handles[K_FISTINSTRUMENTATIONDATA + 1] =
|
||||
Handle::new(instrumentation_data_vmo.clone(), Rights::DEFAULT_VMO);
|
||||
handles[K_FISTINSTRUMENTATIONDATA + 2] =
|
||||
Handle::new(instrumentation_data_vmo.clone(), Rights::DEFAULT_VMO);
|
||||
handles[K_FISTINSTRUMENTATIONDATA + 3] =
|
||||
Handle::new(instrumentation_data_vmo, Rights::DEFAULT_VMO);
|
||||
|
||||
// check: handle to root proc should be only
|
||||
let data = Vec::from(cmdline.replace(':', "\0") + "\0");
|
||||
let msg = MessagePacket { data, handles };
|
||||
kernel_channel.write(msg).unwrap();
|
||||
|
||||
proc.start(&thread, entry, sp, Some(handle), 0, thread_fn)
|
||||
.expect("failed to start main thread");
|
||||
proc
|
||||
}
|
||||
|
||||
async fn new_thread(thread: CurrentThread) {
|
||||
kernel_hal::Thread::set_tid(thread.id(), thread.proc().id());
|
||||
|
||||
loop {
|
||||
let mut cx = thread.wait_for_run().await;
|
||||
if thread.state() == ThreadState::Dying {
|
||||
break;
|
||||
}
|
||||
trace!("go to user: {:#x?}", cx);
|
||||
debug!("switch to {}|{}", thread.proc().name(), thread.name());
|
||||
let tmp_time = kernel_hal::timer_now().as_nanos();
|
||||
// * Attention
|
||||
// The code will enter a magic zone from here.
|
||||
// `context run` will be executed into a wrapped library where context switching takes place.
|
||||
// The details are available in the trapframe crate on crates.io.
|
||||
kernel_hal::context_run(&mut cx);
|
||||
// Back from the userspace
|
||||
let time = kernel_hal::timer_now().as_nanos() - tmp_time;
|
||||
thread.time_add(time);
|
||||
trace!("back from user: {:#x?}", cx);
|
||||
let trap_num = cx.trap_num;
|
||||
let _error_code = cx.error_code;
|
||||
thread.end_running(cx);
|
||||
match trap_num {
|
||||
0x100 => handle_syscall(&thread).await,
|
||||
n => panic!("Unsupprted exception {:x}", n),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn thread_fn(thread: CurrentThread) -> Pin<Box<dyn Future<Output = ()> + Send + 'static>> {
|
||||
Box::pin(new_thread(thread))
|
||||
}
|
||||
|
||||
async fn handle_syscall(thread: &CurrentThread) {
|
||||
let (num, args) = thread.with_context(|cx| {
|
||||
let regs = cx.general;
|
||||
let num = regs.rax as u32;
|
||||
// LibOS: Function call ABI
|
||||
let args = unsafe {
|
||||
let a6 = (regs.rsp as *const usize).read();
|
||||
let a7 = (regs.rsp as *const usize).add(1).read();
|
||||
[
|
||||
regs.rdi, regs.rsi, regs.rdx, regs.rcx, regs.r8, regs.r9, a6, a7,
|
||||
]
|
||||
};
|
||||
// RealOS: Zircon syscall ABI
|
||||
// let args = [
|
||||
// regs.rdi, regs.rsi, regs.rdx, regs.r10, regs.r8, regs.r9, regs.r12, regs.r13,
|
||||
// ];
|
||||
(num, args)
|
||||
});
|
||||
let mut syscall = Syscall { thread, thread_fn };
|
||||
let ret = syscall.syscall(num, args).await as usize;
|
||||
thread.with_context(|cx| {
|
||||
cx.general.rax = ret;
|
||||
});
|
||||
}
|
||||
@ -1,85 +0,0 @@
|
||||
#![deny(warnings, unused_must_use)]
|
||||
|
||||
extern crate log;
|
||||
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
use structopt::StructOpt;
|
||||
use zircon_loader::*;
|
||||
use zircon_object::object::*;
|
||||
use zircon_object::task::Process;
|
||||
|
||||
#[derive(Debug, StructOpt)]
|
||||
#[structopt()]
|
||||
struct Opt {
|
||||
#[structopt(parse(from_os_str))]
|
||||
prebuilt_path: PathBuf,
|
||||
#[structopt(default_value = "")]
|
||||
cmdline: String,
|
||||
}
|
||||
|
||||
#[async_std::main]
|
||||
async fn main() {
|
||||
kernel_hal_unix::init();
|
||||
init_logger();
|
||||
let opt = Opt::from_args();
|
||||
let images = open_images(&opt.prebuilt_path).expect("failed to read file");
|
||||
let proc: Arc<dyn KernelObject> = run_userboot(&images, &opt.cmdline);
|
||||
drop(images);
|
||||
let proc = proc.downcast_arc::<Process>().unwrap();
|
||||
proc.wait_for_end().await;
|
||||
}
|
||||
|
||||
fn open_images(path: &Path) -> std::io::Result<Images<Vec<u8>>> {
|
||||
Ok(Images {
|
||||
userboot: std::fs::read(path.join("userboot-libos.so"))?,
|
||||
vdso: std::fs::read(path.join("libzircon-libos.so"))?,
|
||||
zbi: std::fs::read(path.join("bringup.zbi"))?,
|
||||
})
|
||||
}
|
||||
|
||||
fn init_logger() {
|
||||
env_logger::builder()
|
||||
.format(|buf, record| {
|
||||
use env_logger::fmt::Color;
|
||||
use log::Level;
|
||||
use std::io::Write;
|
||||
|
||||
let (tid, pid) = kernel_hal::Thread::get_tid();
|
||||
let mut style = buf.style();
|
||||
match record.level() {
|
||||
Level::Trace => style.set_color(Color::Black).set_intense(true),
|
||||
Level::Debug => style.set_color(Color::White),
|
||||
Level::Info => style.set_color(Color::Green),
|
||||
Level::Warn => style.set_color(Color::Yellow),
|
||||
Level::Error => style.set_color(Color::Red).set_bold(true),
|
||||
};
|
||||
let now = kernel_hal_unix::timer_now();
|
||||
let level = style.value(record.level());
|
||||
let args = record.args();
|
||||
writeln!(buf, "[{:?} {:>5} {}:{}] {}", now, level, pid, tid, args)
|
||||
})
|
||||
.init();
|
||||
}
|
||||
|
||||
// #[cfg(test)]
|
||||
// mod tests {
|
||||
// use super::*;
|
||||
|
||||
// #[async_std::test]
|
||||
// async fn userboot() {
|
||||
// kernel_hal_unix::init();
|
||||
|
||||
// let opt = Opt {
|
||||
// prebuilt_path: PathBuf::from("../prebuilt/zircon/x64"),
|
||||
// cmdline: String::from(""),
|
||||
// };
|
||||
// let images = open_images(&opt.prebuilt_path).expect("failed to read file");
|
||||
|
||||
// let proc: Arc<dyn KernelObject> = run_userboot(&images, &opt.cmdline);
|
||||
// drop(images);
|
||||
|
||||
// let proc = proc.downcast_arc::<Process>().unwrap();
|
||||
// proc.wait_for_end().await;
|
||||
// }
|
||||
// }
|
||||
@ -1,22 +0,0 @@
|
||||
[package]
|
||||
name = "zircon-object"
|
||||
version = "0.1.0"
|
||||
authors = ["Runji Wang <wangrunji0408@163.com>"]
|
||||
edition = "2018"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
log = "0.4"
|
||||
spin = "0.7"
|
||||
downcast-rs = { version = "1.2.0", default-features = false }
|
||||
bitflags = "1.2"
|
||||
hashbrown = "0.9"
|
||||
trapframe = "0.8.0"
|
||||
futures = { version = "0.3", default-features = false, features = ["alloc", "async-await"] }
|
||||
async-std = { version = "1.9", features = ["attributes", "unstable"] }
|
||||
numeric-enum-macro = "0.2"
|
||||
xmas-elf = { version = "0.7"}
|
||||
kernel-hal = { path = "../kernel-hal" }
|
||||
kernel-hal-unix = { path = "../kernel-hal-unix" }
|
||||
lazy_static = "1.4"
|
||||
@ -1,127 +0,0 @@
|
||||
//! Objects for Kernel Debuglog.
|
||||
use {
|
||||
super::*,
|
||||
crate::object::*,
|
||||
alloc::{sync::Arc, vec::Vec},
|
||||
kernel_hal::timer_now,
|
||||
lazy_static::lazy_static,
|
||||
spin::Mutex,
|
||||
};
|
||||
|
||||
lazy_static! {
|
||||
static ref DLOG: Mutex<DlogBuffer> = Mutex::new(DlogBuffer {
|
||||
buf: Vec::with_capacity(0x1000),
|
||||
});
|
||||
}
|
||||
|
||||
/// Debuglog - Kernel debuglog
|
||||
///
|
||||
/// ## SYNOPSIS
|
||||
///
|
||||
/// Debuglog objects allow userspace to read and write to kernel debug logs.
|
||||
pub struct DebugLog {
|
||||
base: KObjectBase,
|
||||
flags: u32,
|
||||
read_offset: Mutex<usize>,
|
||||
}
|
||||
|
||||
struct DlogBuffer {
|
||||
/// Append only buffer
|
||||
buf: Vec<u8>,
|
||||
}
|
||||
|
||||
impl_kobject!(DebugLog);
|
||||
|
||||
impl DebugLog {
|
||||
/// Create a new `DebugLog`.
|
||||
pub fn create(flags: u32) -> Arc<Self> {
|
||||
Arc::new(DebugLog {
|
||||
base: KObjectBase::new(),
|
||||
flags,
|
||||
read_offset: Default::default(),
|
||||
})
|
||||
}
|
||||
|
||||
/// Read a log, return the actual read size.
|
||||
pub fn read(&self, buf: &mut [u8]) -> usize {
|
||||
let mut offset = self.read_offset.lock();
|
||||
let len = DLOG.lock().read_at(*offset, buf);
|
||||
*offset += len;
|
||||
len
|
||||
}
|
||||
|
||||
/// Write a log.
|
||||
pub fn write(&self, severity: Severity, flags: u32, tid: u64, pid: u64, data: &str) {
|
||||
DLOG.lock()
|
||||
.write(severity, flags | self.flags, tid, pid, data.as_bytes());
|
||||
}
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Debug)]
|
||||
struct DlogHeader {
|
||||
rollout: u32,
|
||||
datalen: u16,
|
||||
severity: Severity,
|
||||
flags: u8,
|
||||
timestamp: u64,
|
||||
pid: u64,
|
||||
tid: u64,
|
||||
}
|
||||
|
||||
/// Log entry severity. Used for coarse filtering of log messages.
|
||||
#[allow(missing_docs)]
|
||||
#[repr(u8)]
|
||||
#[derive(Debug)]
|
||||
pub enum Severity {
|
||||
Trace = 0x10,
|
||||
Debug = 0x20,
|
||||
Info = 0x30,
|
||||
Warning = 0x40,
|
||||
Error = 0x50,
|
||||
Fatal = 0x60,
|
||||
}
|
||||
|
||||
const HEADER_SIZE: usize = core::mem::size_of::<DlogHeader>();
|
||||
/// Max length of Dlog read buffer.
|
||||
pub const DLOG_MAX_LEN: usize = 256;
|
||||
|
||||
#[allow(unsafe_code)]
|
||||
impl DlogBuffer {
|
||||
/// Read one record at offset.
|
||||
#[allow(clippy::cast_ptr_alignment)]
|
||||
fn read_at(&mut self, offset: usize, buf: &mut [u8]) -> usize {
|
||||
assert!(buf.len() >= DLOG_MAX_LEN);
|
||||
if offset == self.buf.len() {
|
||||
return 0;
|
||||
}
|
||||
let header_buf = &self.buf[offset..offset + HEADER_SIZE];
|
||||
buf[..HEADER_SIZE].copy_from_slice(header_buf);
|
||||
let header = unsafe { &*(header_buf.as_ptr() as *const DlogHeader) };
|
||||
let len = (header.rollout & 0xFFF) as usize;
|
||||
buf[HEADER_SIZE..len].copy_from_slice(&self.buf[offset + HEADER_SIZE..offset + len]);
|
||||
len
|
||||
}
|
||||
|
||||
fn write(&mut self, severity: Severity, flags: u32, tid: u64, pid: u64, data: &[u8]) {
|
||||
let wire_size = HEADER_SIZE + align_up_4(data.len());
|
||||
let size = HEADER_SIZE + data.len();
|
||||
let header = DlogHeader {
|
||||
rollout: ((size as u32) << 12) | (wire_size as u32),
|
||||
datalen: data.len() as u16,
|
||||
severity,
|
||||
flags: flags as u8,
|
||||
timestamp: timer_now().as_nanos() as u64,
|
||||
pid,
|
||||
tid,
|
||||
};
|
||||
let header_buf: [u8; HEADER_SIZE] = unsafe { core::mem::transmute(header) };
|
||||
self.buf.extend(header_buf.iter());
|
||||
self.buf.extend(data);
|
||||
self.buf.extend(&[0u8; 4][..wire_size - size]);
|
||||
}
|
||||
}
|
||||
|
||||
fn align_up_4(x: usize) -> usize {
|
||||
(x + 3) & !3
|
||||
}
|
||||
@ -1,5 +0,0 @@
|
||||
//! Objects for Device Drivers.
|
||||
|
||||
mod resource;
|
||||
|
||||
pub use self::resource::*;
|
||||
@ -1,96 +0,0 @@
|
||||
use {crate::object::*, alloc::sync::Arc, bitflags::bitflags, numeric_enum_macro::numeric_enum};
|
||||
|
||||
numeric_enum! {
|
||||
#[repr(u32)]
|
||||
/// ResourceKind definition from fuchsia/zircon/system/public/zircon/syscalls/resource.h
|
||||
#[allow(missing_docs)]
|
||||
#[allow(clippy::upper_case_acronyms)]
|
||||
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
|
||||
pub enum ResourceKind {
|
||||
MMIO = 0,
|
||||
IRQ = 1,
|
||||
IOPORT = 2,
|
||||
HYPERVISOR = 3,
|
||||
ROOT = 4,
|
||||
VMEX = 5,
|
||||
SMC = 6,
|
||||
COUNT = 7,
|
||||
}
|
||||
}
|
||||
|
||||
bitflags! {
|
||||
/// Bits for Resource.flags.
|
||||
pub struct ResourceFlags: u32 {
|
||||
#[allow(clippy::identity_op)]
|
||||
/// Exclusive resource.
|
||||
const EXCLUSIVE = 1 << 16;
|
||||
}
|
||||
}
|
||||
|
||||
/// Address space rights and accounting.
|
||||
pub struct Resource {
|
||||
base: KObjectBase,
|
||||
kind: ResourceKind,
|
||||
addr: usize,
|
||||
len: usize,
|
||||
flags: ResourceFlags,
|
||||
}
|
||||
|
||||
impl_kobject!(Resource);
|
||||
|
||||
impl Resource {
|
||||
/// Create a new `Resource`.
|
||||
pub fn create(
|
||||
name: &str,
|
||||
kind: ResourceKind,
|
||||
addr: usize,
|
||||
len: usize,
|
||||
flags: ResourceFlags,
|
||||
) -> Arc<Self> {
|
||||
Arc::new(Resource {
|
||||
base: KObjectBase::with_name(name),
|
||||
kind,
|
||||
addr,
|
||||
len,
|
||||
flags,
|
||||
})
|
||||
}
|
||||
|
||||
/// Validate the resource is the given kind or it is the root resource.
|
||||
pub fn validate(&self, kind: ResourceKind) -> ZxResult {
|
||||
if self.kind == kind || self.kind == ResourceKind::ROOT {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(ZxError::WRONG_TYPE)
|
||||
}
|
||||
}
|
||||
|
||||
/// Validate the resource is the given kind or it is the root resource,
|
||||
/// and [addr, addr+len] is within the range of the resource.
|
||||
pub fn validate_ranged_resource(
|
||||
&self,
|
||||
kind: ResourceKind,
|
||||
addr: usize,
|
||||
len: usize,
|
||||
) -> ZxResult {
|
||||
self.validate(kind)?;
|
||||
if addr >= self.addr && (addr + len) <= (self.addr + self.len) {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(ZxError::OUT_OF_RANGE)
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns `Err(ZxError::INVALID_ARGS)` if the resource is not the root resource, and
|
||||
/// either it's flags or parameter `flags` contains `ResourceFlags::EXCLUSIVE`.
|
||||
pub fn check_exclusive(&self, flags: ResourceFlags) -> ZxResult {
|
||||
if self.kind != ResourceKind::ROOT
|
||||
&& (self.flags.contains(ResourceFlags::EXCLUSIVE)
|
||||
|| flags.contains(ResourceFlags::EXCLUSIVE))
|
||||
{
|
||||
Err(ZxError::INVALID_ARGS)
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1,244 +0,0 @@
|
||||
// ANCHOR: result
|
||||
///
|
||||
pub type ZxResult<T = ()> = Result<T, ZxError>;
|
||||
// ANCHOR_END: result
|
||||
|
||||
/// Zircon statuses are signed 32 bit integers. The space of values is
|
||||
/// divided as follows:
|
||||
/// - The zero value is for the OK status.
|
||||
/// - Negative values are defined by the system, in this file.
|
||||
/// - Positive values are reserved for protocol-specific error values,
|
||||
/// and will never be defined by the system.
|
||||
#[allow(non_camel_case_types, dead_code)]
|
||||
#[repr(i32)]
|
||||
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
|
||||
pub enum ZxError {
|
||||
OK = 0,
|
||||
|
||||
// ======= Internal failures =======
|
||||
/// The system encountered an otherwise unspecified error
|
||||
/// while performing the operation.
|
||||
INTERNAL = -1,
|
||||
|
||||
/// The operation is not implemented, supported,
|
||||
/// or enabled.
|
||||
NOT_SUPPORTED = -2,
|
||||
// ANCHOR_END: error_begin
|
||||
/// The system was not able to allocate some resource
|
||||
/// needed for the operation.
|
||||
NO_RESOURCES = -3,
|
||||
|
||||
/// The system was not able to allocate memory needed
|
||||
/// for the operation.
|
||||
NO_MEMORY = -4,
|
||||
|
||||
// -5 used to be ZX_ERR_CALL_FAILED.
|
||||
/// The system call was interrupted, but should be
|
||||
/// retried. This should not be seen outside of the VDSO.
|
||||
INTERNAL_INTR_RETRY = -6,
|
||||
|
||||
// ======= Parameter errors =======
|
||||
/// an argument is invalid, ex. null pointer
|
||||
INVALID_ARGS = -10,
|
||||
|
||||
/// A specified handle value does not refer to a handle.
|
||||
BAD_HANDLE = -11,
|
||||
|
||||
/// The subject of the operation is the wrong type to
|
||||
/// perform the operation.
|
||||
/// Example: Attempting a message_read on a thread handle.
|
||||
WRONG_TYPE = -12,
|
||||
|
||||
/// The specified syscall number is invalid.
|
||||
BAD_SYSCALL = -13,
|
||||
|
||||
/// An argument is outside the valid range for this
|
||||
/// operation.
|
||||
OUT_OF_RANGE = -14,
|
||||
|
||||
/// A caller provided buffer is too small for
|
||||
/// this operation.
|
||||
BUFFER_TOO_SMALL = -15,
|
||||
|
||||
// ======= Precondition or state errors =======
|
||||
/// operation failed because the current state of the
|
||||
/// object does not allow it, or a precondition of the operation is
|
||||
/// not satisfied
|
||||
BAD_STATE = -20,
|
||||
|
||||
/// The time limit for the operation elapsed before
|
||||
/// the operation completed.
|
||||
TIMED_OUT = -21,
|
||||
|
||||
/// The operation cannot be performed currently but
|
||||
/// potentially could succeed if the caller waits for a prerequisite
|
||||
/// to be satisfied, for example waiting for a handle to be readable
|
||||
/// or writable.
|
||||
/// Example: Attempting to read from a channel that has no
|
||||
/// messages waiting but has an open remote will return ZX_ERR_SHOULD_WAIT.
|
||||
/// Attempting to read from a channel that has no messages waiting
|
||||
/// and has a closed remote end will return ZX_ERR_PEER_CLOSED.
|
||||
SHOULD_WAIT = -22,
|
||||
|
||||
/// The in-progress operation (e.g. a wait) has been
|
||||
/// canceled.
|
||||
CANCELED = -23,
|
||||
|
||||
/// The operation failed because the remote end of the
|
||||
/// subject of the operation was closed.
|
||||
PEER_CLOSED = -24,
|
||||
|
||||
/// The requested entity is not found.
|
||||
NOT_FOUND = -25,
|
||||
|
||||
/// An object with the specified identifier
|
||||
/// already exists.
|
||||
/// Example: Attempting to create a file when a file already exists
|
||||
/// with that name.
|
||||
ALREADY_EXISTS = -26,
|
||||
|
||||
/// The operation failed because the named entity
|
||||
/// is already owned or controlled by another entity. The operation
|
||||
/// could succeed later if the current owner releases the entity.
|
||||
ALREADY_BOUND = -27,
|
||||
|
||||
/// The subject of the operation is currently unable
|
||||
/// to perform the operation.
|
||||
/// Note: This is used when there's no direct way for the caller to
|
||||
/// observe when the subject will be able to perform the operation
|
||||
/// and should thus retry.
|
||||
UNAVAILABLE = -28,
|
||||
|
||||
// ======= Permission check errors =======
|
||||
/// The caller did not have permission to perform
|
||||
/// the specified operation.
|
||||
ACCESS_DENIED = -30,
|
||||
|
||||
// ======= Input-output errors =======
|
||||
/// Otherwise unspecified error occurred during I/O.
|
||||
IO = -40,
|
||||
|
||||
/// The entity the I/O operation is being performed on
|
||||
/// rejected the operation.
|
||||
/// Example: an I2C device NAK'ing a transaction or a disk controller
|
||||
/// rejecting an invalid command, or a stalled USB endpoint.
|
||||
IO_REFUSED = -41,
|
||||
|
||||
/// The data in the operation failed an integrity
|
||||
/// check and is possibly corrupted.
|
||||
/// Example: CRC or Parity error.
|
||||
IO_DATA_INTEGRITY = -42,
|
||||
|
||||
/// The data in the operation is currently unavailable
|
||||
/// and may be permanently lost.
|
||||
/// Example: A disk block is irrecoverably damaged.
|
||||
IO_DATA_LOSS = -43,
|
||||
|
||||
/// The device is no longer available (has been
|
||||
/// unplugged from the system, powered down, or the driver has been
|
||||
/// unloaded,
|
||||
IO_NOT_PRESENT = -44,
|
||||
|
||||
/// More data was received from the device than expected.
|
||||
/// Example: a USB "babble" error due to a device sending more data than
|
||||
/// the host queued to receive.
|
||||
IO_OVERRUN = -45,
|
||||
|
||||
/// An operation did not complete within the required timeframe.
|
||||
/// Example: A USB isochronous transfer that failed to complete due to an overrun or underrun.
|
||||
IO_MISSED_DEADLINE = -46,
|
||||
|
||||
/// The data in the operation is invalid parameter or is out of range.
|
||||
/// Example: A USB transfer that failed to complete with TRB Error
|
||||
IO_INVALID = -47,
|
||||
|
||||
// ======== Filesystem Errors ========
|
||||
/// Path name is too long.
|
||||
BAD_PATH = -50,
|
||||
|
||||
/// Object is not a directory or does not support
|
||||
/// directory operations.
|
||||
/// Example: Attempted to open a file as a directory or
|
||||
/// attempted to do directory operations on a file.
|
||||
NOT_DIR = -51,
|
||||
|
||||
/// Object is not a regular file.
|
||||
NOT_FILE = -52,
|
||||
|
||||
/// This operation would cause a file to exceed a
|
||||
/// filesystem-specific size limit
|
||||
FILE_BIG = -53,
|
||||
|
||||
/// Filesystem or device space is exhausted.
|
||||
NO_SPACE = -54,
|
||||
|
||||
/// Directory is not empty.
|
||||
NOT_EMPTY = -55,
|
||||
|
||||
// ======== Flow Control ========
|
||||
// These are not errors, as such, and will never be returned
|
||||
// by a syscall or public API. They exist to allow callbacks
|
||||
// to request changes in operation.
|
||||
/// Do not call again.
|
||||
/// Example: A notification callback will be called on every
|
||||
/// event until it returns something other than ZX_OK.
|
||||
/// This status allows differentiation between "stop due to
|
||||
/// an error" and "stop because the work is done."
|
||||
STOP = -60,
|
||||
|
||||
/// Advance to the next item.
|
||||
/// Example: A notification callback will use this response
|
||||
/// to indicate it did not "consume" an item passed to it,
|
||||
/// but by choice, not due to an error condition.
|
||||
NEXT = -61,
|
||||
|
||||
/// Ownership of the item has moved to an asynchronous worker.
|
||||
///
|
||||
/// Unlike ZX_ERR_STOP, which implies that iteration on an object
|
||||
/// should stop, and ZX_ERR_NEXT, which implies that iteration
|
||||
/// should continue to the next item, ZX_ERR_ASYNC implies
|
||||
/// that an asynchronous worker is responsible for continuing iteration.
|
||||
///
|
||||
/// Example: A notification callback will be called on every
|
||||
/// event, but one event needs to handle some work asynchronously
|
||||
/// before it can continue. ZX_ERR_ASYNC implies the worker is
|
||||
/// responsible for resuming iteration once its work has completed.
|
||||
ASYNC = -62,
|
||||
|
||||
// ======== Network-related errors ========
|
||||
/// Specified protocol is not
|
||||
/// supported.
|
||||
PROTOCOL_NOT_SUPPORTED = -70,
|
||||
|
||||
/// Host is unreachable.
|
||||
ADDRESS_UNREACHABLE = -71,
|
||||
|
||||
/// Address is being used by someone else.
|
||||
ADDRESS_IN_USE = -72,
|
||||
|
||||
/// Socket is not connected.
|
||||
NOT_CONNECTED = -73,
|
||||
|
||||
/// Remote peer rejected the connection.
|
||||
CONNECTION_REFUSED = -74,
|
||||
|
||||
/// Connection was reset.
|
||||
CONNECTION_RESET = -75,
|
||||
// ANCHOR: error_end
|
||||
/// Connection was aborted.
|
||||
CONNECTION_ABORTED = -76,
|
||||
}
|
||||
|
||||
use kernel_hal::user::Error;
|
||||
|
||||
impl From<Error> for ZxError {
|
||||
fn from(e: Error) -> Self {
|
||||
match e {
|
||||
Error::InvalidUtf8 => ZxError::INVALID_ARGS,
|
||||
Error::InvalidPointer => ZxError::INVALID_ARGS,
|
||||
Error::BufferTooSmall => ZxError::BUFFER_TOO_SMALL,
|
||||
Error::InvalidLength => ZxError::INVALID_ARGS,
|
||||
Error::InvalidVectorAddress => ZxError::NOT_FOUND,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1,193 +0,0 @@
|
||||
use {
|
||||
super::*,
|
||||
crate::error::*,
|
||||
crate::object::*,
|
||||
alloc::collections::VecDeque,
|
||||
alloc::sync::{Arc, Weak},
|
||||
alloc::vec::Vec,
|
||||
core::convert::TryInto,
|
||||
core::sync::atomic::{AtomicU32, Ordering},
|
||||
spin::Mutex,
|
||||
};
|
||||
|
||||
pub struct Channel {
|
||||
base: KObjectBase,
|
||||
peer: Weak<Channel>,
|
||||
recv_queue: Mutex<VecDeque<T>>,
|
||||
next_txid: AtomicU32,
|
||||
}
|
||||
|
||||
type T = MessagePacket;
|
||||
type TxID = u32;
|
||||
|
||||
impl_kobject!(Channel
|
||||
fn peer(&self) -> ZxResult<Arc<dyn KernelObject>> {
|
||||
let peer = self.peer.upgrade().ok_or(ZxError::PEER_CLOSED)?;
|
||||
Ok(peer)
|
||||
}
|
||||
fn related_koid(&self) -> KoID {
|
||||
self.peer.upgrade().map(|p| p.id()).unwrap_or(0)
|
||||
}
|
||||
);
|
||||
|
||||
impl Channel {
|
||||
/// Create a channel and return a pair of its endpoints
|
||||
#[allow(unsafe_code)]
|
||||
pub fn create() -> (Arc<Self>, Arc<Self>) {
|
||||
let mut channel0 = Arc::new(Channel {
|
||||
base: KObjectBase::default(),
|
||||
peer: Weak::default(),
|
||||
recv_queue: Default::default(),
|
||||
next_txid: AtomicU32::new(0x8000_0000),
|
||||
});
|
||||
let channel1 = Arc::new(Channel {
|
||||
base: KObjectBase::default(),
|
||||
peer: Arc::downgrade(&channel0),
|
||||
recv_queue: Default::default(),
|
||||
next_txid: AtomicU32::new(0x8000_0000),
|
||||
});
|
||||
// no other reference of `channel0`
|
||||
unsafe {
|
||||
Arc::get_mut_unchecked(&mut channel0).peer = Arc::downgrade(&channel1);
|
||||
}
|
||||
(channel0, channel1)
|
||||
}
|
||||
|
||||
/// Read a packet from the channel if check is ok, otherwise the msg will keep.
|
||||
pub fn check_and_read(&self, checker: impl FnOnce(&T) -> ZxResult) -> ZxResult<T> {
|
||||
let mut recv_queue = self.recv_queue.lock();
|
||||
if let Some(msg) = recv_queue.front() {
|
||||
checker(msg)?;
|
||||
let msg = recv_queue.pop_front().unwrap();
|
||||
return Ok(msg);
|
||||
}
|
||||
if self.peer_closed() {
|
||||
Err(ZxError::PEER_CLOSED)
|
||||
} else {
|
||||
Err(ZxError::SHOULD_WAIT)
|
||||
}
|
||||
}
|
||||
|
||||
/// Read a packet from the channel if check is ok, otherwise the msg will keep.
|
||||
pub fn read(&self) -> ZxResult<T> {
|
||||
self.check_and_read(|_| Ok(()))
|
||||
}
|
||||
|
||||
/// Write a packet to the channel
|
||||
pub fn write(&self, msg: T) -> ZxResult {
|
||||
let peer = self.peer.upgrade().ok_or(ZxError::PEER_CLOSED)?;
|
||||
peer.push_general(msg);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Push a message to general queue, called from peer.
|
||||
fn push_general(&self, msg: T) {
|
||||
let mut send_queue = self.recv_queue.lock();
|
||||
send_queue.push_back(msg);
|
||||
}
|
||||
|
||||
/// Generate a new transaction ID for `call`.
|
||||
fn new_txid(&self) -> TxID {
|
||||
self.next_txid.fetch_add(1, Ordering::SeqCst)
|
||||
}
|
||||
|
||||
/// Is peer channel closed?
|
||||
fn peer_closed(&self) -> bool {
|
||||
self.peer.strong_count() == 0
|
||||
}
|
||||
}
|
||||
|
||||
/// The message transferred in the channel.
|
||||
/// See [Channel](struct.Channel.html) for details.
|
||||
#[derive(Default)]
|
||||
pub struct MessagePacket {
|
||||
/// The data carried by the message packet
|
||||
pub data: Vec<u8>,
|
||||
/// See [Channel](struct.Channel.html) for details.
|
||||
pub handles: Vec<Handle>,
|
||||
}
|
||||
|
||||
impl MessagePacket {
|
||||
/// Set txid (the first 4 bytes)
|
||||
pub fn set_txid(&mut self, txid: TxID) {
|
||||
if self.data.len() >= core::mem::size_of::<TxID>() {
|
||||
self.data[..4].copy_from_slice(&txid.to_ne_bytes());
|
||||
}
|
||||
}
|
||||
|
||||
/// Get txid (the first 4 bytes)
|
||||
pub fn get_txid(&self) -> TxID {
|
||||
if self.data.len() >= core::mem::size_of::<TxID>() {
|
||||
TxID::from_ne_bytes(self.data[..4].try_into().unwrap())
|
||||
} else {
|
||||
0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_basics() {
|
||||
let (end0, end1) = Channel::create();
|
||||
assert!(Arc::ptr_eq(
|
||||
&end0.peer().unwrap().downcast_arc().unwrap(),
|
||||
&end1
|
||||
));
|
||||
assert_eq!(end0.related_koid(), end1.id());
|
||||
|
||||
drop(end1);
|
||||
assert_eq!(end0.peer().unwrap_err(), ZxError::PEER_CLOSED);
|
||||
assert_eq!(end0.related_koid(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn read_write() {
|
||||
let (channel0, channel1) = Channel::create();
|
||||
// write a message to each other
|
||||
channel0
|
||||
.write(MessagePacket {
|
||||
data: Vec::from("hello 1"),
|
||||
handles: Vec::new(),
|
||||
})
|
||||
.unwrap();
|
||||
channel1
|
||||
.write(MessagePacket {
|
||||
data: Vec::from("hello 0"),
|
||||
handles: Vec::new(),
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
// read message should success
|
||||
let recv_msg = channel1.read().unwrap();
|
||||
assert_eq!(recv_msg.data.as_slice(), b"hello 1");
|
||||
assert!(recv_msg.handles.is_empty());
|
||||
|
||||
let recv_msg = channel0.read().unwrap();
|
||||
assert_eq!(recv_msg.data.as_slice(), b"hello 0");
|
||||
assert!(recv_msg.handles.is_empty());
|
||||
|
||||
// read more message should fail.
|
||||
assert_eq!(channel0.read().err(), Some(ZxError::SHOULD_WAIT));
|
||||
assert_eq!(channel1.read().err(), Some(ZxError::SHOULD_WAIT));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn peer_closed() {
|
||||
let (channel0, channel1) = Channel::create();
|
||||
// write a message from peer, then drop it
|
||||
channel1.write(MessagePacket::default()).unwrap();
|
||||
drop(channel1);
|
||||
// read the first message should success.
|
||||
channel0.read().unwrap();
|
||||
// read more message should fail.
|
||||
assert_eq!(channel0.read().err(), Some(ZxError::PEER_CLOSED));
|
||||
// write message should fail.
|
||||
assert_eq!(
|
||||
channel0.write(MessagePacket::default()),
|
||||
Err(ZxError::PEER_CLOSED)
|
||||
);
|
||||
}
|
||||
}
|
||||
@ -1,4 +0,0 @@
|
||||
use super::*;
|
||||
|
||||
mod channel;
|
||||
pub use self::channel::*;
|
||||
@ -1,25 +0,0 @@
|
||||
#![no_std]
|
||||
#![deny(unused_imports)]
|
||||
#![allow(dead_code)]
|
||||
#![feature(get_mut_unchecked)]
|
||||
#![feature(drain_filter)]
|
||||
|
||||
extern crate alloc;
|
||||
|
||||
#[cfg(test)]
|
||||
#[macro_use]
|
||||
extern crate std;
|
||||
|
||||
#[macro_use]
|
||||
extern crate log;
|
||||
|
||||
pub mod debuglog;
|
||||
pub mod dev;
|
||||
pub mod error;
|
||||
pub mod ipc;
|
||||
pub mod object;
|
||||
pub mod task;
|
||||
pub mod util;
|
||||
pub mod vm;
|
||||
|
||||
pub use self::error::*;
|
||||
@ -1,21 +0,0 @@
|
||||
// ANCHOR: handle
|
||||
use super::{KernelObject, Rights};
|
||||
use alloc::sync::Arc;
|
||||
|
||||
pub type HandleValue = u32;
|
||||
pub const INVALID_HANDLE: HandleValue = 0;
|
||||
|
||||
/// 内核对象句柄
|
||||
#[derive(Clone)]
|
||||
pub struct Handle {
|
||||
pub object: Arc<dyn KernelObject>,
|
||||
pub rights: Rights,
|
||||
}
|
||||
|
||||
impl Handle {
|
||||
/// 创建一个新句柄
|
||||
pub fn new(object: Arc<dyn KernelObject>, rights: Rights) -> Self {
|
||||
Handle { object, rights }
|
||||
}
|
||||
}
|
||||
// ANCHOR_END: handle
|
||||
@ -1,409 +0,0 @@
|
||||
use {
|
||||
super::job_policy::*,
|
||||
super::process::Process,
|
||||
super::*,
|
||||
crate::error::*,
|
||||
crate::object::*,
|
||||
crate::task::Task,
|
||||
alloc::sync::{Arc, Weak},
|
||||
alloc::vec::Vec,
|
||||
spin::Mutex,
|
||||
};
|
||||
|
||||
/// Job 对象
|
||||
#[allow(dead_code)]
|
||||
pub struct Job {
|
||||
base: KObjectBase,
|
||||
parent: Option<Arc<Job>>,
|
||||
parent_policy: JobPolicy,
|
||||
inner: Mutex<JobInner>,
|
||||
}
|
||||
|
||||
impl_kobject!(Job
|
||||
fn get_child(&self, id: KoID) -> ZxResult<Arc<dyn KernelObject>> {
|
||||
let inner = self.inner.lock();
|
||||
if let Some(job) = inner.children.iter().filter_map(|o|o.upgrade()).find(|o| o.id() == id) {
|
||||
return Ok(job);
|
||||
}
|
||||
if let Some(proc) = inner.processes.iter().find(|o| o.id() == id) {
|
||||
return Ok(proc.clone());
|
||||
}
|
||||
Err(ZxError::NOT_FOUND)
|
||||
}
|
||||
fn related_koid(&self) -> KoID {
|
||||
self.parent.as_ref().map(|p| p.id()).unwrap_or(0)
|
||||
}
|
||||
);
|
||||
|
||||
#[derive(Default)]
|
||||
struct JobInner {
|
||||
policy: JobPolicy,
|
||||
children: Vec<Weak<Job>>,
|
||||
processes: Vec<Arc<Process>>,
|
||||
// if the job is killed, no more child creation should works
|
||||
killed: bool,
|
||||
self_ref: Weak<Job>,
|
||||
}
|
||||
|
||||
impl Job {
|
||||
/// Create the root job.
|
||||
pub fn root() -> Arc<Self> {
|
||||
let job = Arc::new(Job {
|
||||
base: KObjectBase::new(),
|
||||
parent: None,
|
||||
parent_policy: JobPolicy::default(),
|
||||
inner: Mutex::new(JobInner::default()),
|
||||
});
|
||||
job.inner.lock().self_ref = Arc::downgrade(&job);
|
||||
job
|
||||
}
|
||||
|
||||
/// Create a new child job object.
|
||||
pub fn create_child(self: &Arc<Self>) -> ZxResult<Arc<Self>> {
|
||||
let mut inner = self.inner.lock();
|
||||
if inner.killed {
|
||||
return Err(ZxError::BAD_STATE);
|
||||
}
|
||||
let child = Arc::new(Job {
|
||||
base: KObjectBase::new(),
|
||||
parent: Some(self.clone()),
|
||||
parent_policy: inner.policy.merge(&self.parent_policy),
|
||||
inner: Mutex::new(JobInner::default()),
|
||||
});
|
||||
let child_weak = Arc::downgrade(&child);
|
||||
child.inner.lock().self_ref = child_weak.clone();
|
||||
inner.children.push(child_weak);
|
||||
Ok(child)
|
||||
}
|
||||
|
||||
fn remove_child(&self, to_remove: &Weak<Job>) {
|
||||
let mut inner = self.inner.lock();
|
||||
inner.children.retain(|child| !to_remove.ptr_eq(child));
|
||||
if inner.killed && inner.processes.is_empty() && inner.children.is_empty() {
|
||||
drop(inner);
|
||||
self.terminate()
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the policy of the job.
|
||||
pub fn policy(&self) -> JobPolicy {
|
||||
self.inner.lock().policy.merge(&self.parent_policy)
|
||||
}
|
||||
|
||||
/// Get the parent job.
|
||||
pub fn parent(&self) -> Option<Arc<Self>> {
|
||||
self.parent.clone()
|
||||
}
|
||||
|
||||
/// Sets one or more security and/or resource policies to an empty job.
|
||||
///
|
||||
/// The job's effective policies is the combination of the parent's
|
||||
/// effective policies and the policies specified in policy.
|
||||
///
|
||||
/// After this call succeeds any new child process or child job will have
|
||||
/// the new effective policy applied to it.
|
||||
pub fn set_policy_basic(
|
||||
&self,
|
||||
options: SetPolicyOptions,
|
||||
policies: &[BasicPolicy],
|
||||
) -> ZxResult {
|
||||
let mut inner = self.inner.lock();
|
||||
if !inner.is_empty() {
|
||||
return Err(ZxError::BAD_STATE);
|
||||
}
|
||||
for policy in policies {
|
||||
if self.parent_policy.get_action(policy.condition).is_some() {
|
||||
match options {
|
||||
SetPolicyOptions::Absolute => return Err(ZxError::ALREADY_EXISTS),
|
||||
SetPolicyOptions::Relative => {}
|
||||
}
|
||||
} else {
|
||||
inner.policy.apply(*policy);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Add a process to the job.
|
||||
pub(super) fn add_process(&self, process: Arc<Process>) -> ZxResult {
|
||||
let mut inner = self.inner.lock();
|
||||
if inner.killed {
|
||||
return Err(ZxError::BAD_STATE);
|
||||
}
|
||||
inner.processes.push(process);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Remove a process from the job.
|
||||
pub(super) fn remove_process(&self, id: KoID) {
|
||||
let mut inner = self.inner.lock();
|
||||
inner.processes.retain(|proc| proc.id() != id);
|
||||
if inner.killed && inner.processes.is_empty() && inner.children.is_empty() {
|
||||
drop(inner);
|
||||
self.terminate()
|
||||
}
|
||||
}
|
||||
|
||||
/// Check whether this job is root job.
|
||||
pub fn check_root_job(&self) -> ZxResult {
|
||||
if self.parent.is_some() {
|
||||
Err(ZxError::ACCESS_DENIED)
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Get KoIDs of Processes.
|
||||
pub fn process_ids(&self) -> Vec<KoID> {
|
||||
self.inner.lock().processes.iter().map(|p| p.id()).collect()
|
||||
}
|
||||
|
||||
/// Get KoIDs of children Jobs.
|
||||
pub fn children_ids(&self) -> Vec<KoID> {
|
||||
self.inner
|
||||
.lock()
|
||||
.children
|
||||
.iter()
|
||||
.filter_map(|j| j.upgrade())
|
||||
.map(|j| j.id())
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Return true if this job has no processes and no child jobs.
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.inner.lock().is_empty()
|
||||
}
|
||||
|
||||
/// The job finally terminates.
|
||||
fn terminate(&self) {
|
||||
if let Some(parent) = self.parent.as_ref() {
|
||||
parent.remove_child(&self.inner.lock().self_ref)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Task for Job {
|
||||
/// Kill the job. The job do not terminate immediately when killed.
|
||||
/// It will terminate after all its children and processes are terminated.
|
||||
fn kill(&self) {
|
||||
let (children, processes) = {
|
||||
let mut inner = self.inner.lock();
|
||||
if inner.killed {
|
||||
return;
|
||||
}
|
||||
inner.killed = true;
|
||||
(inner.children.clone(), inner.processes.clone())
|
||||
};
|
||||
if children.is_empty() && processes.is_empty() {
|
||||
self.terminate();
|
||||
return;
|
||||
}
|
||||
for child in children {
|
||||
if let Some(child) = child.upgrade() {
|
||||
child.kill();
|
||||
}
|
||||
}
|
||||
for proc in processes {
|
||||
proc.kill();
|
||||
}
|
||||
}
|
||||
|
||||
fn suspend(&self) {
|
||||
panic!("job do not support suspend");
|
||||
}
|
||||
|
||||
fn resume(&self) {
|
||||
panic!("job do not support resume");
|
||||
}
|
||||
}
|
||||
|
||||
impl JobInner {
|
||||
fn is_empty(&self) -> bool {
|
||||
self.processes.is_empty() && self.children.is_empty()
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for Job {
|
||||
fn drop(&mut self) {
|
||||
self.terminate();
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::task::TASK_RETCODE_SYSCALL_KILL;
|
||||
|
||||
#[test]
|
||||
fn create() {
|
||||
let root_job = Job::root();
|
||||
let job = Job::create_child(&root_job).expect("failed to create job");
|
||||
|
||||
let child = root_job
|
||||
.get_child(job.id())
|
||||
.unwrap()
|
||||
.downcast_arc()
|
||||
.unwrap();
|
||||
assert!(Arc::ptr_eq(&child, &job));
|
||||
assert_eq!(job.related_koid(), root_job.id());
|
||||
assert_eq!(root_job.related_koid(), 0);
|
||||
|
||||
root_job.kill();
|
||||
assert_eq!(root_job.create_child().err(), Some(ZxError::BAD_STATE));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn set_policy() {
|
||||
let root_job = Job::root();
|
||||
|
||||
// default policy
|
||||
assert_eq!(
|
||||
root_job.policy().get_action(PolicyCondition::BadHandle),
|
||||
None
|
||||
);
|
||||
|
||||
// set policy for root job
|
||||
let policy = &[BasicPolicy {
|
||||
condition: PolicyCondition::BadHandle,
|
||||
action: PolicyAction::Deny,
|
||||
}];
|
||||
root_job
|
||||
.set_policy_basic(SetPolicyOptions::Relative, policy)
|
||||
.expect("failed to set policy");
|
||||
assert_eq!(
|
||||
root_job.policy().get_action(PolicyCondition::BadHandle),
|
||||
Some(PolicyAction::Deny)
|
||||
);
|
||||
|
||||
// override policy should success
|
||||
let policy = &[BasicPolicy {
|
||||
condition: PolicyCondition::BadHandle,
|
||||
action: PolicyAction::Allow,
|
||||
}];
|
||||
root_job
|
||||
.set_policy_basic(SetPolicyOptions::Relative, policy)
|
||||
.expect("failed to set policy");
|
||||
assert_eq!(
|
||||
root_job.policy().get_action(PolicyCondition::BadHandle),
|
||||
Some(PolicyAction::Allow)
|
||||
);
|
||||
|
||||
// create a child job
|
||||
let job = Job::create_child(&root_job).expect("failed to create job");
|
||||
|
||||
// should inherit parent's policy.
|
||||
assert_eq!(
|
||||
job.policy().get_action(PolicyCondition::BadHandle),
|
||||
Some(PolicyAction::Allow)
|
||||
);
|
||||
|
||||
// setting policy for a non-empty job should fail.
|
||||
assert_eq!(
|
||||
root_job.set_policy_basic(SetPolicyOptions::Relative, &[]),
|
||||
Err(ZxError::BAD_STATE)
|
||||
);
|
||||
|
||||
// set new policy should success.
|
||||
let policy = &[BasicPolicy {
|
||||
condition: PolicyCondition::WrongObject,
|
||||
action: PolicyAction::Allow,
|
||||
}];
|
||||
job.set_policy_basic(SetPolicyOptions::Relative, policy)
|
||||
.expect("failed to set policy");
|
||||
assert_eq!(
|
||||
job.policy().get_action(PolicyCondition::WrongObject),
|
||||
Some(PolicyAction::Allow)
|
||||
);
|
||||
|
||||
// relatively setting existing policy should be ignored.
|
||||
let policy = &[BasicPolicy {
|
||||
condition: PolicyCondition::BadHandle,
|
||||
action: PolicyAction::Deny,
|
||||
}];
|
||||
job.set_policy_basic(SetPolicyOptions::Relative, policy)
|
||||
.expect("failed to set policy");
|
||||
assert_eq!(
|
||||
job.policy().get_action(PolicyCondition::BadHandle),
|
||||
Some(PolicyAction::Allow)
|
||||
);
|
||||
|
||||
// absolutely setting existing policy should fail.
|
||||
assert_eq!(
|
||||
job.set_policy_basic(SetPolicyOptions::Absolute, policy),
|
||||
Err(ZxError::ALREADY_EXISTS)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parent_child() {
|
||||
let root_job = Job::root();
|
||||
let job = Job::create_child(&root_job).expect("failed to create job");
|
||||
let proc = Process::create(&root_job, "proc").expect("failed to create process");
|
||||
|
||||
assert_eq!(root_job.get_child(job.id()).unwrap().id(), job.id());
|
||||
assert_eq!(root_job.get_child(proc.id()).unwrap().id(), proc.id());
|
||||
assert_eq!(
|
||||
root_job.get_child(root_job.id()).err(),
|
||||
Some(ZxError::NOT_FOUND)
|
||||
);
|
||||
assert!(Arc::ptr_eq(&job.parent().unwrap(), &root_job));
|
||||
|
||||
let job1 = root_job.create_child().expect("failed to create job");
|
||||
let proc1 = Process::create(&root_job, "proc1").expect("failed to create process");
|
||||
assert_eq!(root_job.children_ids(), vec![job.id(), job1.id()]);
|
||||
assert_eq!(root_job.process_ids(), vec![proc.id(), proc1.id()]);
|
||||
|
||||
root_job.kill();
|
||||
assert_eq!(root_job.create_child().err(), Some(ZxError::BAD_STATE));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn check() {
|
||||
let root_job = Job::root();
|
||||
assert!(root_job.is_empty());
|
||||
let job = root_job.create_child().expect("failed to create job");
|
||||
assert_eq!(root_job.check_root_job(), Ok(()));
|
||||
assert_eq!(job.check_root_job(), Err(ZxError::ACCESS_DENIED));
|
||||
|
||||
assert!(!root_job.is_empty());
|
||||
assert!(job.is_empty());
|
||||
|
||||
let _proc = Process::create(&job, "proc").expect("failed to create process");
|
||||
assert!(!job.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn kill() {
|
||||
let root_job = Job::root();
|
||||
let job = Job::create_child(&root_job).expect("failed to create job");
|
||||
let proc = Process::create(&root_job, "proc").expect("failed to create process");
|
||||
let thread = Thread::create(&proc, "thread").expect("failed to create thread");
|
||||
let current_thread = CurrentThread(thread.clone());
|
||||
|
||||
root_job.kill();
|
||||
assert!(root_job.inner.lock().killed);
|
||||
assert!(job.inner.lock().killed);
|
||||
assert_eq!(proc.status(), Status::Exited(TASK_RETCODE_SYSCALL_KILL));
|
||||
assert_eq!(thread.state(), ThreadState::Dying);
|
||||
|
||||
std::mem::drop(current_thread);
|
||||
assert!(root_job.inner.lock().killed);
|
||||
assert!(job.inner.lock().killed);
|
||||
assert_eq!(proc.status(), Status::Exited(TASK_RETCODE_SYSCALL_KILL));
|
||||
assert_eq!(thread.state(), ThreadState::Dead);
|
||||
|
||||
// The job has no children.
|
||||
let root_job = Job::root();
|
||||
root_job.kill();
|
||||
assert!(root_job.inner.lock().killed);
|
||||
|
||||
// The job's process have no threads.
|
||||
let root_job = Job::root();
|
||||
let job = Job::create_child(&root_job).expect("failed to create job");
|
||||
let proc = Process::create(&root_job, "proc").expect("failed to create process");
|
||||
root_job.kill();
|
||||
assert!(root_job.inner.lock().killed);
|
||||
assert!(job.inner.lock().killed);
|
||||
assert_eq!(proc.status(), Status::Exited(TASK_RETCODE_SYSCALL_KILL));
|
||||
}
|
||||
}
|
||||
@ -1,108 +0,0 @@
|
||||
/// Security and resource policies of a job.
|
||||
#[derive(Default, Copy, Clone)]
|
||||
pub struct JobPolicy {
|
||||
// TODO: use bitset
|
||||
action: [Option<PolicyAction>; 15],
|
||||
}
|
||||
|
||||
impl JobPolicy {
|
||||
/// Get the action of a policy `condition`.
|
||||
pub fn get_action(&self, condition: PolicyCondition) -> Option<PolicyAction> {
|
||||
self.action[condition as usize]
|
||||
}
|
||||
|
||||
/// Apply a basic policy.
|
||||
pub fn apply(&mut self, policy: BasicPolicy) {
|
||||
self.action[policy.condition as usize] = Some(policy.action);
|
||||
}
|
||||
|
||||
/// Merge the policy with `parent`'s.
|
||||
pub fn merge(&self, parent: &Self) -> Self {
|
||||
let mut new = *self;
|
||||
for i in 0..15 {
|
||||
if parent.action[i].is_some() {
|
||||
new.action[i] = parent.action[i];
|
||||
}
|
||||
}
|
||||
new
|
||||
}
|
||||
}
|
||||
|
||||
/// Control the effect in the case of conflict between
|
||||
/// the existing policies and the new policies when setting new policies.
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
pub enum SetPolicyOptions {
|
||||
/// Policy is applied for all conditions in policy or the call fails.
|
||||
Absolute,
|
||||
/// Policy is applied for the conditions not specifically overridden by the parent policy.
|
||||
Relative,
|
||||
}
|
||||
|
||||
/// The policy type.
|
||||
#[repr(C)]
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
pub struct BasicPolicy {
|
||||
/// Condition when the policy is applied.
|
||||
pub condition: PolicyCondition,
|
||||
///
|
||||
pub action: PolicyAction,
|
||||
}
|
||||
|
||||
/// The condition when a policy is applied.
|
||||
#[repr(u32)]
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
pub enum PolicyCondition {
|
||||
/// A process under this job is attempting to issue a syscall with an invalid handle.
|
||||
/// In this case, `PolicyAction::Allow` and `PolicyAction::Deny` are equivalent:
|
||||
/// if the syscall returns, it will always return the error ZX_ERR_BAD_HANDLE.
|
||||
BadHandle = 0,
|
||||
/// A process under this job is attempting to issue a syscall with a handle that does not support such operation.
|
||||
WrongObject = 1,
|
||||
/// A process under this job is attempting to map an address region with write-execute access.
|
||||
VmarWx = 2,
|
||||
/// A special condition that stands for all of the above ZX_NEW conditions
|
||||
/// such as NEW_VMO, NEW_CHANNEL, NEW_EVENT, NEW_EVENTPAIR, NEW_PORT, NEW_SOCKET, NEW_FIFO,
|
||||
/// And any future ZX_NEW policy.
|
||||
/// This will include any new kernel objects which do not require a parent object for creation.
|
||||
NewAny = 3,
|
||||
/// A process under this job is attempting to create a new vm object.
|
||||
NewVMO = 4,
|
||||
/// A process under this job is attempting to create a new channel.
|
||||
NewChannel = 5,
|
||||
/// A process under this job is attempting to create a new event.
|
||||
NewEvent = 6,
|
||||
/// A process under this job is attempting to create a new event pair.
|
||||
NewEventPair = 7,
|
||||
/// A process under this job is attempting to create a new port.
|
||||
NewPort = 8,
|
||||
/// A process under this job is attempting to create a new socket.
|
||||
NewSocket = 9,
|
||||
/// A process under this job is attempting to create a new fifo.
|
||||
NewFIFO = 10,
|
||||
/// A process under this job is attempting to create a new timer.
|
||||
NewTimer = 11,
|
||||
/// A process under this job is attempting to create a new process.
|
||||
NewProcess = 12,
|
||||
/// A process under this job is attempting to create a new profile.
|
||||
NewProfile = 13,
|
||||
/// A process under this job is attempting to use zx_vmo_replace_as_executable()
|
||||
/// with a ZX_HANDLE_INVALID as the second argument rather than a valid ZX_RSRC_KIND_VMEX.
|
||||
AmbientMarkVMOExec = 14,
|
||||
}
|
||||
|
||||
/// The action taken when the condition happens specified by a policy.
|
||||
#[repr(u32)]
|
||||
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
|
||||
pub enum PolicyAction {
|
||||
/// Allow condition.
|
||||
Allow = 0,
|
||||
/// Prevent condition.
|
||||
Deny = 1,
|
||||
/// Generate an exception via the debug port. An exception generated this
|
||||
/// way acts as a breakpoint. The thread may be resumed after the exception.
|
||||
AllowException = 2,
|
||||
/// Just like `AllowException`, but after resuming condition is denied.
|
||||
DenyException = 3,
|
||||
/// Terminate the process.
|
||||
Kill = 4,
|
||||
}
|
||||
@ -1,24 +0,0 @@
|
||||
use super::*;
|
||||
|
||||
mod job;
|
||||
mod job_policy;
|
||||
mod process;
|
||||
mod thread;
|
||||
|
||||
pub use {self::job::*, self::job_policy::*, self::process::*, self::thread::*};
|
||||
|
||||
/// Task (Thread, Process, or Job)
|
||||
pub trait Task: Sync + Send {
|
||||
/// Kill the task. The task do not terminate immediately when killed.
|
||||
/// It will terminate after all its children are terminated or some cleanups are finished.
|
||||
fn kill(&self);
|
||||
|
||||
/// Suspend the task. Currently only thread or process handles may be suspended.
|
||||
fn suspend(&self);
|
||||
|
||||
/// Resume the task
|
||||
fn resume(&self);
|
||||
}
|
||||
|
||||
/// The return code set when a task is killed via zx_task_kill().
|
||||
pub const TASK_RETCODE_SYSCALL_KILL: i64 = -1028;
|
||||
@ -1,534 +0,0 @@
|
||||
use {
|
||||
super::{job::Job, job_policy::*, thread::*, *},
|
||||
crate::{error::*, object::*, vm::*},
|
||||
alloc::{sync::Arc, vec::Vec},
|
||||
core::{
|
||||
future::Future,
|
||||
pin::Pin,
|
||||
task::{Context, Poll},
|
||||
},
|
||||
hashbrown::HashMap,
|
||||
spin::Mutex,
|
||||
};
|
||||
|
||||
pub struct Process {
|
||||
base: KObjectBase,
|
||||
job: Arc<Job>,
|
||||
policy: JobPolicy,
|
||||
vmar: Arc<VmAddressRegion>,
|
||||
inner: Mutex<ProcessInner>,
|
||||
}
|
||||
|
||||
impl_kobject!(Process
|
||||
fn get_child(&self, id: KoID) -> ZxResult<Arc<dyn KernelObject>> {
|
||||
let inner = self.inner.lock();
|
||||
let thread = inner.threads.iter().find(|o| o.id() == id).ok_or(ZxError::NOT_FOUND)?;
|
||||
Ok(thread.clone())
|
||||
}
|
||||
fn related_koid(&self) -> KoID {
|
||||
self.job.id()
|
||||
}
|
||||
);
|
||||
|
||||
#[derive(Default)]
|
||||
struct ProcessInner {
|
||||
max_handle_id: u32,
|
||||
status: Status,
|
||||
handles: HashMap<HandleValue, Handle>,
|
||||
threads: Vec<Arc<Thread>>,
|
||||
}
|
||||
|
||||
/// Status of a process.
|
||||
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
|
||||
pub enum Status {
|
||||
/// Initial state, no thread present in process.
|
||||
Init,
|
||||
/// First thread has started and is running.
|
||||
Running,
|
||||
/// Process has exited with the code.
|
||||
Exited(i64),
|
||||
}
|
||||
|
||||
impl Default for Status {
|
||||
fn default() -> Self {
|
||||
Status::Init
|
||||
}
|
||||
}
|
||||
|
||||
impl Process {
|
||||
/// Create a new process in the `job`.
|
||||
pub fn create(job: &Arc<Job>, name: &str) -> ZxResult<Arc<Self>> {
|
||||
let proc = Arc::new(Process {
|
||||
base: KObjectBase::with_name(name),
|
||||
job: job.clone(),
|
||||
policy: job.policy(),
|
||||
vmar: VmAddressRegion::new_root(),
|
||||
inner: Mutex::new(ProcessInner::default()),
|
||||
});
|
||||
job.add_process(proc.clone())?;
|
||||
Ok(proc)
|
||||
}
|
||||
|
||||
/// Get a handle from the process
|
||||
fn get_handle(&self, handle_value: HandleValue) -> ZxResult<Handle> {
|
||||
self.inner.lock().get_handle(handle_value)
|
||||
}
|
||||
|
||||
/// 添加一个新的对象句柄
|
||||
pub fn add_handle(&self, handle: Handle) -> HandleValue {
|
||||
self.inner.lock().add_handle(handle)
|
||||
}
|
||||
|
||||
/// 删除一个对象句柄
|
||||
pub fn remove_handle(&self, handle_value: HandleValue) -> ZxResult<Handle> {
|
||||
self.inner.lock().remove_handle(handle_value)
|
||||
}
|
||||
|
||||
/// Add all handles to the process
|
||||
pub fn add_handles(&self, handles: Vec<Handle>) -> Vec<HandleValue> {
|
||||
let mut inner = self.inner.lock();
|
||||
handles.into_iter().map(|h| inner.add_handle(h)).collect()
|
||||
}
|
||||
|
||||
/// Remove all handles from the process.
|
||||
pub fn remove_handles(&self, handle_values: &[HandleValue]) -> ZxResult<Vec<Handle>> {
|
||||
let mut inner = self.inner.lock();
|
||||
handle_values
|
||||
.iter()
|
||||
.map(|h| inner.remove_handle(*h))
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Get the kernel object corresponding to this `handle_value`
|
||||
pub fn get_object<T: KernelObject>(&self, handle_value: HandleValue) -> ZxResult<Arc<T>> {
|
||||
let handle = self.get_handle(handle_value)?;
|
||||
let object = handle
|
||||
.object
|
||||
.downcast_arc::<T>()
|
||||
.map_err(|_| ZxError::WRONG_TYPE)?;
|
||||
Ok(object)
|
||||
}
|
||||
|
||||
/// 根据句柄值查找内核对象,并检查权限
|
||||
pub fn get_object_with_rights<T: KernelObject>(
|
||||
&self,
|
||||
handle_value: HandleValue,
|
||||
desired_rights: Rights,
|
||||
) -> ZxResult<Arc<T>> {
|
||||
let handle = self.get_handle(handle_value)?;
|
||||
// check type before rights
|
||||
let object = handle
|
||||
.object
|
||||
.downcast_arc::<T>()
|
||||
.map_err(|_| ZxError::WRONG_TYPE)?;
|
||||
if !handle.rights.contains(desired_rights) {
|
||||
return Err(ZxError::ACCESS_DENIED);
|
||||
}
|
||||
Ok(object)
|
||||
}
|
||||
|
||||
/// Get the kernel object corresponding to this `handle_value` and this handle's rights.
|
||||
pub fn get_object_and_rights<T: KernelObject>(
|
||||
&self,
|
||||
handle_value: HandleValue,
|
||||
) -> ZxResult<(Arc<T>, Rights)> {
|
||||
let handle = self.get_handle(handle_value)?;
|
||||
let object = handle
|
||||
.object
|
||||
.downcast_arc::<T>()
|
||||
.map_err(|_| ZxError::WRONG_TYPE)?;
|
||||
Ok((object, handle.rights))
|
||||
}
|
||||
|
||||
/// Remove a handle referring to a kernel object of the given type from the process.
|
||||
pub fn remove_object<T: KernelObject>(&self, handle_value: HandleValue) -> ZxResult<Arc<T>> {
|
||||
let handle = self.remove_handle(handle_value)?;
|
||||
let object = handle
|
||||
.object
|
||||
.downcast_arc::<T>()
|
||||
.map_err(|_| ZxError::WRONG_TYPE)?;
|
||||
Ok(object)
|
||||
}
|
||||
|
||||
pub fn start(
|
||||
&self,
|
||||
thread: &Arc<Thread>,
|
||||
entry: usize,
|
||||
stack: usize,
|
||||
arg1: Option<Handle>,
|
||||
arg2: usize,
|
||||
thread_fn: ThreadFn,
|
||||
) -> ZxResult {
|
||||
let handle_value;
|
||||
{
|
||||
let mut inner = self.inner.lock();
|
||||
if !inner.contains_thread(thread) {
|
||||
return Err(ZxError::ACCESS_DENIED);
|
||||
}
|
||||
if inner.status != Status::Init {
|
||||
return Err(ZxError::BAD_STATE);
|
||||
}
|
||||
inner.status = Status::Running;
|
||||
handle_value = arg1.map_or(INVALID_HANDLE, |handle| inner.add_handle(handle));
|
||||
}
|
||||
thread.set_first_thread();
|
||||
match thread.start(entry, stack, handle_value as usize, arg2, thread_fn) {
|
||||
Ok(_) => Ok(()),
|
||||
Err(err) => {
|
||||
let mut inner = self.inner.lock();
|
||||
if handle_value != INVALID_HANDLE {
|
||||
inner.remove_handle(handle_value).ok();
|
||||
}
|
||||
Err(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Exit current process with `retcode`.
|
||||
/// The process do not terminate immediately when exited.
|
||||
/// It will terminate after all its child threads are terminated.
|
||||
pub fn exit(&self, retcode: i64) {
|
||||
let mut inner = self.inner.lock();
|
||||
if let Status::Exited(_) = inner.status {
|
||||
return;
|
||||
}
|
||||
inner.status = Status::Exited(retcode);
|
||||
if inner.threads.is_empty() {
|
||||
inner.handles.clear();
|
||||
drop(inner);
|
||||
self.terminate();
|
||||
return;
|
||||
}
|
||||
for thread in inner.threads.iter() {
|
||||
thread.kill();
|
||||
}
|
||||
inner.handles.clear();
|
||||
}
|
||||
|
||||
/// The process finally terminates.
|
||||
fn terminate(&self) {
|
||||
let mut inner = self.inner.lock();
|
||||
let _retcode = match inner.status {
|
||||
Status::Exited(retcode) => retcode,
|
||||
_ => {
|
||||
inner.status = Status::Exited(0);
|
||||
0
|
||||
}
|
||||
};
|
||||
self.job.remove_process(self.base.id);
|
||||
}
|
||||
|
||||
/// Check whether `condition` is allowed in the parent job's policy.
|
||||
pub fn check_policy(&self, condition: PolicyCondition) -> ZxResult {
|
||||
match self
|
||||
.policy
|
||||
.get_action(condition)
|
||||
.unwrap_or(PolicyAction::Allow)
|
||||
{
|
||||
PolicyAction::Allow => Ok(()),
|
||||
PolicyAction::Deny => Err(ZxError::ACCESS_DENIED),
|
||||
_ => unimplemented!(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Get process status.
|
||||
pub fn status(&self) -> Status {
|
||||
self.inner.lock().status
|
||||
}
|
||||
|
||||
/// Get the `VmAddressRegion` of the process.
|
||||
pub fn vmar(&self) -> Arc<VmAddressRegion> {
|
||||
self.vmar.clone()
|
||||
}
|
||||
|
||||
/// Get the job of the process.
|
||||
pub fn job(&self) -> Arc<Job> {
|
||||
self.job.clone()
|
||||
}
|
||||
|
||||
/// Add a thread to the process.
|
||||
pub(super) fn add_thread(&self, thread: Arc<Thread>) -> ZxResult {
|
||||
let mut inner = self.inner.lock();
|
||||
if let Status::Exited(_) = inner.status {
|
||||
return Err(ZxError::BAD_STATE);
|
||||
}
|
||||
inner.threads.push(thread);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Remove a thread from the process.
|
||||
///
|
||||
/// If no more threads left, exit the process.
|
||||
pub(super) fn remove_thread(&self, tid: KoID) {
|
||||
let mut inner = self.inner.lock();
|
||||
inner.threads.retain(|t| t.id() != tid);
|
||||
if inner.threads.is_empty() {
|
||||
drop(inner);
|
||||
self.terminate();
|
||||
}
|
||||
}
|
||||
|
||||
/// Get KoIDs of Threads.
|
||||
pub fn thread_ids(&self) -> Vec<KoID> {
|
||||
self.inner.lock().threads.iter().map(|t| t.id()).collect()
|
||||
}
|
||||
|
||||
/// Get information of this process.
|
||||
pub fn get_info(&self) -> ProcessInfo {
|
||||
let mut info = ProcessInfo {
|
||||
..Default::default()
|
||||
};
|
||||
match self.inner.lock().status {
|
||||
Status::Init => {
|
||||
info.started = false;
|
||||
info.has_exited = false;
|
||||
}
|
||||
Status::Running => {
|
||||
info.started = true;
|
||||
info.has_exited = false;
|
||||
}
|
||||
Status::Exited(ret) => {
|
||||
info.return_code = ret;
|
||||
info.has_exited = true;
|
||||
info.started = true;
|
||||
}
|
||||
}
|
||||
info
|
||||
}
|
||||
}
|
||||
|
||||
impl Process {
|
||||
pub fn wait_for_end(self: Arc<Self>) -> impl Future<Output = i64> {
|
||||
struct ProcessEndFuture {
|
||||
proc: Arc<Process>,
|
||||
}
|
||||
impl Future for ProcessEndFuture {
|
||||
type Output = i64;
|
||||
|
||||
fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
|
||||
if let Status::Exited(exit_code) = self.proc.status() {
|
||||
Poll::Ready(exit_code)
|
||||
} else {
|
||||
let waker = cx.waker().clone();
|
||||
waker.wake_by_ref();
|
||||
Poll::Pending
|
||||
}
|
||||
}
|
||||
}
|
||||
ProcessEndFuture {
|
||||
proc: Arc::clone(&self),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Information of a process.
|
||||
#[allow(missing_docs)]
|
||||
#[repr(C)]
|
||||
#[derive(Default)]
|
||||
pub struct ProcessInfo {
|
||||
pub return_code: i64,
|
||||
pub started: bool,
|
||||
pub has_exited: bool,
|
||||
}
|
||||
|
||||
impl Task for Process {
|
||||
fn kill(&self) {
|
||||
self.exit(TASK_RETCODE_SYSCALL_KILL);
|
||||
}
|
||||
|
||||
fn suspend(&self) {
|
||||
let inner = self.inner.lock();
|
||||
for thread in inner.threads.iter() {
|
||||
thread.suspend();
|
||||
}
|
||||
}
|
||||
|
||||
fn resume(&self) {
|
||||
let inner = self.inner.lock();
|
||||
for thread in inner.threads.iter() {
|
||||
thread.resume();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ProcessInner {
|
||||
/// Add a handle to the process
|
||||
fn add_handle(&mut self, handle: Handle) -> HandleValue {
|
||||
let key = (self.max_handle_id << 2) | 0x3u32;
|
||||
self.max_handle_id += 1;
|
||||
self.handles.insert(key, handle);
|
||||
key
|
||||
}
|
||||
|
||||
fn remove_handle(&mut self, handle_value: HandleValue) -> ZxResult<Handle> {
|
||||
let handle = self
|
||||
.handles
|
||||
.remove(&handle_value)
|
||||
.ok_or(ZxError::BAD_HANDLE)?;
|
||||
Ok(handle)
|
||||
}
|
||||
|
||||
fn get_handle(&mut self, handle_value: HandleValue) -> ZxResult<Handle> {
|
||||
let handle = self.handles.get(&handle_value).ok_or(ZxError::BAD_HANDLE)?;
|
||||
Ok(handle.clone())
|
||||
}
|
||||
|
||||
/// Whether `thread` is in this process.
|
||||
fn contains_thread(&self, thread: &Arc<Thread>) -> bool {
|
||||
self.threads.iter().any(|t| Arc::ptr_eq(t, thread))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn create() {
|
||||
let root_job = Job::root();
|
||||
let proc = Process::create(&root_job, "proc").expect("failed to create process");
|
||||
|
||||
assert_eq!(proc.related_koid(), root_job.id());
|
||||
assert!(Arc::ptr_eq(&root_job, &proc.job()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn handle() {
|
||||
let root_job = Job::root();
|
||||
let proc = Process::create(&root_job, "proc").expect("failed to create process");
|
||||
let handle = Handle::new(proc.clone(), Rights::DEFAULT_PROCESS);
|
||||
|
||||
let handle_value = proc.add_handle(handle);
|
||||
|
||||
// getting object should success
|
||||
let object: Arc<Process> = proc
|
||||
.get_object_with_rights(handle_value, Rights::DEFAULT_PROCESS)
|
||||
.expect("failed to get object");
|
||||
assert!(Arc::ptr_eq(&object, &proc));
|
||||
|
||||
let (object, rights) = proc
|
||||
.get_object_and_rights::<Process>(handle_value)
|
||||
.expect("failed to get object");
|
||||
assert!(Arc::ptr_eq(&object, &proc));
|
||||
assert_eq!(rights, Rights::DEFAULT_PROCESS);
|
||||
|
||||
// getting object with an extra rights should fail.
|
||||
assert_eq!(
|
||||
proc.get_object_with_rights::<Process>(handle_value, Rights::MANAGE_JOB)
|
||||
.err(),
|
||||
Some(ZxError::ACCESS_DENIED)
|
||||
);
|
||||
|
||||
// getting object with invalid type should fail.
|
||||
assert_eq!(
|
||||
proc.get_object_with_rights::<Job>(handle_value, Rights::DEFAULT_PROCESS)
|
||||
.err(),
|
||||
Some(ZxError::WRONG_TYPE)
|
||||
);
|
||||
|
||||
proc.remove_handle(handle_value).unwrap();
|
||||
|
||||
// getting object with invalid handle should fail.
|
||||
assert_eq!(
|
||||
proc.get_object_with_rights::<Process>(handle_value, Rights::DEFAULT_PROCESS)
|
||||
.err(),
|
||||
Some(ZxError::BAD_HANDLE)
|
||||
);
|
||||
|
||||
let handle1 = Handle::new(proc.clone(), Rights::DEFAULT_PROCESS);
|
||||
let handle2 = Handle::new(proc.clone(), Rights::DEFAULT_PROCESS);
|
||||
|
||||
let handle_values = proc.add_handles(vec![handle1, handle2]);
|
||||
let object1: Arc<Process> = proc
|
||||
.get_object_with_rights(handle_values[0], Rights::DEFAULT_PROCESS)
|
||||
.expect("failed to get object");
|
||||
assert!(Arc::ptr_eq(&object1, &proc));
|
||||
|
||||
proc.remove_handles(&handle_values).unwrap();
|
||||
assert_eq!(
|
||||
proc.get_object_with_rights::<Process>(handle_values[0], Rights::DEFAULT_PROCESS)
|
||||
.err(),
|
||||
Some(ZxError::BAD_HANDLE)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn get_child() {
|
||||
let root_job = Job::root();
|
||||
let proc = Process::create(&root_job, "proc").expect("failed to create process");
|
||||
let thread = Thread::create(&proc, "thread").expect("failed to create thread");
|
||||
|
||||
assert_eq!(proc.get_child(thread.id()).unwrap().id(), thread.id());
|
||||
assert_eq!(proc.get_child(proc.id()).err(), Some(ZxError::NOT_FOUND));
|
||||
|
||||
let thread1 = Thread::create(&proc, "thread1").expect("failed to create thread");
|
||||
assert_eq!(proc.thread_ids(), vec![thread.id(), thread1.id()]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn contains_thread() {
|
||||
let root_job = Job::root();
|
||||
let proc = Process::create(&root_job, "proc").expect("failed to create process");
|
||||
let thread = Thread::create(&proc, "thread").expect("failed to create thread");
|
||||
|
||||
let proc1 = Process::create(&root_job, "proc1").expect("failed to create process");
|
||||
let thread1 = Thread::create(&proc1, "thread1").expect("failed to create thread");
|
||||
|
||||
let inner = proc.inner.lock();
|
||||
assert!(inner.contains_thread(&thread) && !inner.contains_thread(&thread1));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn check_policy() {
|
||||
let root_job = Job::root();
|
||||
let policy1 = BasicPolicy {
|
||||
condition: PolicyCondition::BadHandle,
|
||||
action: PolicyAction::Allow,
|
||||
};
|
||||
let policy2 = BasicPolicy {
|
||||
condition: PolicyCondition::NewChannel,
|
||||
action: PolicyAction::Deny,
|
||||
};
|
||||
|
||||
assert!(root_job
|
||||
.set_policy_basic(SetPolicyOptions::Absolute, &[policy1, policy2])
|
||||
.is_ok());
|
||||
let proc = Process::create(&root_job, "proc").expect("failed to create process");
|
||||
|
||||
assert!(proc.check_policy(PolicyCondition::BadHandle).is_ok());
|
||||
assert!(proc.check_policy(PolicyCondition::NewProcess).is_ok());
|
||||
assert_eq!(
|
||||
proc.check_policy(PolicyCondition::NewChannel).err(),
|
||||
Some(ZxError::ACCESS_DENIED)
|
||||
);
|
||||
|
||||
let _job = root_job.create_child().unwrap();
|
||||
assert_eq!(
|
||||
root_job
|
||||
.set_policy_basic(SetPolicyOptions::Absolute, &[policy1, policy2])
|
||||
.err(),
|
||||
Some(ZxError::BAD_STATE)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn exit() {
|
||||
let root_job = Job::root();
|
||||
let proc = Process::create(&root_job, "proc").expect("failed to create process");
|
||||
let thread = Thread::create(&proc, "thread").expect("failed to create thread");
|
||||
|
||||
let info = proc.get_info();
|
||||
assert!(!info.has_exited && !info.started && info.return_code == 0);
|
||||
|
||||
proc.exit(666);
|
||||
let info = proc.get_info();
|
||||
assert!(info.has_exited && info.started && info.return_code == 666);
|
||||
assert_eq!(thread.state(), ThreadState::Dying);
|
||||
// TODO: when is the thread dead?
|
||||
|
||||
assert_eq!(
|
||||
Thread::create(&proc, "thread1").err(),
|
||||
Some(ZxError::BAD_STATE)
|
||||
);
|
||||
}
|
||||
}
|
||||
@ -1,567 +0,0 @@
|
||||
use {
|
||||
super::process::Process,
|
||||
super::*,
|
||||
crate::object::*,
|
||||
alloc::{boxed::Box, sync::Arc},
|
||||
bitflags::bitflags,
|
||||
core::{
|
||||
future::Future,
|
||||
ops::Deref,
|
||||
pin::Pin,
|
||||
task::{Context, Poll, Waker},
|
||||
},
|
||||
spin::Mutex,
|
||||
trapframe::UserContext,
|
||||
};
|
||||
|
||||
pub use self::thread_state::*;
|
||||
|
||||
mod thread_state;
|
||||
|
||||
pub struct Thread {
|
||||
base: KObjectBase,
|
||||
proc: Arc<Process>,
|
||||
inner: Mutex<ThreadInner>,
|
||||
}
|
||||
|
||||
impl_kobject!(Thread
|
||||
fn related_koid(&self) -> KoID {
|
||||
self.proc.id()
|
||||
}
|
||||
);
|
||||
|
||||
#[derive(Default)]
|
||||
struct ThreadInner {
|
||||
/// Thread context
|
||||
///
|
||||
/// It will be taken away when running this thread.
|
||||
context: Option<Box<UserContext>>,
|
||||
/// The number of existing `SuspendToken`.
|
||||
suspend_count: usize,
|
||||
/// The waker of task when suspending.
|
||||
waker: Option<Waker>,
|
||||
/// Thread state
|
||||
///
|
||||
/// NOTE: This variable will never be `Suspended`. On suspended, the
|
||||
/// `suspend_count` is non-zero, and this represents the state before suspended.
|
||||
state: ThreadState,
|
||||
/// Should The ProcessStarting exception generated at start of this thread
|
||||
first_thread: bool,
|
||||
/// Should The ThreadExiting exception do not block this thread
|
||||
killed: bool,
|
||||
/// The time this thread has run on cpu
|
||||
time: u128,
|
||||
flags: ThreadFlag,
|
||||
}
|
||||
|
||||
impl ThreadInner {
|
||||
fn state(&self) -> ThreadState {
|
||||
// Dying > Exception > Suspend > Blocked
|
||||
if self.suspend_count == 0
|
||||
|| self.context.is_none()
|
||||
|| self.state == ThreadState::BlockedException
|
||||
|| self.state == ThreadState::Dying
|
||||
|| self.state == ThreadState::Dead
|
||||
{
|
||||
self.state
|
||||
} else {
|
||||
ThreadState::Suspended
|
||||
}
|
||||
}
|
||||
|
||||
/// Change state and update signal.
|
||||
fn change_state(&mut self, state: ThreadState) {
|
||||
self.state = state;
|
||||
}
|
||||
}
|
||||
|
||||
bitflags! {
|
||||
/// Thread flags.
|
||||
#[derive(Default)]
|
||||
pub struct ThreadFlag: usize {
|
||||
/// The thread currently has a VCPU.
|
||||
const VCPU = 1 << 3;
|
||||
}
|
||||
}
|
||||
|
||||
/// The type of a new thread function.
|
||||
pub type ThreadFn = fn(thread: CurrentThread) -> Pin<Box<dyn Future<Output = ()> + Send + 'static>>;
|
||||
|
||||
impl Thread {
|
||||
/// Create a new thread.
|
||||
pub fn create(proc: &Arc<Process>, name: &str) -> ZxResult<Arc<Self>> {
|
||||
let thread = Arc::new(Thread {
|
||||
base: KObjectBase::with_name(name),
|
||||
proc: proc.clone(),
|
||||
inner: Mutex::new(ThreadInner {
|
||||
context: Some(Box::new(UserContext::default())),
|
||||
..Default::default()
|
||||
}),
|
||||
});
|
||||
proc.add_thread(thread.clone())?;
|
||||
Ok(thread)
|
||||
}
|
||||
|
||||
/// Get the process.
|
||||
pub fn proc(&self) -> &Arc<Process> {
|
||||
&self.proc
|
||||
}
|
||||
|
||||
/// Start execution on the thread.
|
||||
pub fn start(
|
||||
self: &Arc<Self>,
|
||||
entry: usize,
|
||||
stack: usize,
|
||||
arg1: usize,
|
||||
arg2: usize,
|
||||
thread_fn: ThreadFn,
|
||||
) -> ZxResult {
|
||||
{
|
||||
let mut inner = self.inner.lock();
|
||||
let context = inner.context.as_mut().ok_or(ZxError::BAD_STATE)?;
|
||||
context.general.rip = entry;
|
||||
context.general.rsp = stack;
|
||||
context.general.rdi = arg1;
|
||||
context.general.rsi = arg2;
|
||||
context.general.rflags |= 0x3202;
|
||||
inner.change_state(ThreadState::Running);
|
||||
}
|
||||
kernel_hal::Thread::spawn(thread_fn(CurrentThread(self.clone())), 0);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Stop the thread. Internal implementation of `exit` and `kill`.
|
||||
///
|
||||
/// The thread do not terminate immediately when stopped. It is just made dying.
|
||||
/// It will terminate after some cleanups (when `terminate` are called **explicitly** by upper layer).
|
||||
fn stop(&self, killed: bool) {
|
||||
let mut inner = self.inner.lock();
|
||||
if inner.state == ThreadState::Dead {
|
||||
return;
|
||||
}
|
||||
if killed {
|
||||
inner.killed = true;
|
||||
}
|
||||
if inner.state == ThreadState::Dying {
|
||||
return;
|
||||
}
|
||||
inner.change_state(ThreadState::Dying);
|
||||
if let Some(waker) = inner.waker.take() {
|
||||
waker.wake();
|
||||
}
|
||||
}
|
||||
|
||||
/// Read one aspect of thread state.
|
||||
pub fn read_state(&self, kind: ThreadStateKind, buf: &mut [u8]) -> ZxResult<usize> {
|
||||
let inner = self.inner.lock();
|
||||
let state = inner.state();
|
||||
if state != ThreadState::BlockedException && state != ThreadState::Suspended {
|
||||
return Err(ZxError::BAD_STATE);
|
||||
}
|
||||
let context = inner.context.as_ref().ok_or(ZxError::BAD_STATE)?;
|
||||
context.read_state(kind, buf)
|
||||
}
|
||||
|
||||
/// Write one aspect of thread state.
|
||||
pub fn write_state(&self, kind: ThreadStateKind, buf: &[u8]) -> ZxResult {
|
||||
let mut inner = self.inner.lock();
|
||||
let state = inner.state();
|
||||
if state != ThreadState::BlockedException && state != ThreadState::Suspended {
|
||||
return Err(ZxError::BAD_STATE);
|
||||
}
|
||||
let context = inner.context.as_mut().ok_or(ZxError::BAD_STATE)?;
|
||||
context.write_state(kind, buf)
|
||||
}
|
||||
|
||||
/// Get the thread's information.
|
||||
pub fn get_thread_info(&self) -> ThreadInfo {
|
||||
let inner = self.inner.lock();
|
||||
ThreadInfo {
|
||||
state: inner.state() as u32,
|
||||
}
|
||||
}
|
||||
/// Get the thread state.
|
||||
pub fn state(&self) -> ThreadState {
|
||||
self.inner.lock().state()
|
||||
}
|
||||
|
||||
/// Add the parameter to the time this thread has run on cpu.
|
||||
pub fn time_add(&self, time: u128) {
|
||||
self.inner.lock().time += time;
|
||||
}
|
||||
|
||||
/// Get the time this thread has run on cpu.
|
||||
pub fn get_time(&self) -> u64 {
|
||||
self.inner.lock().time as u64
|
||||
}
|
||||
|
||||
/// Set this thread as the first thread of a process.
|
||||
pub(super) fn set_first_thread(&self) {
|
||||
self.inner.lock().first_thread = true;
|
||||
}
|
||||
|
||||
/// Whether this thread is the first thread of a process.
|
||||
pub fn is_first_thread(&self) -> bool {
|
||||
self.inner.lock().first_thread
|
||||
}
|
||||
|
||||
/// Get the thread's flags.
|
||||
pub fn flags(&self) -> ThreadFlag {
|
||||
self.inner.lock().flags
|
||||
}
|
||||
|
||||
/// Apply `f` to the thread's flags.
|
||||
pub fn update_flags(&self, f: impl FnOnce(&mut ThreadFlag)) {
|
||||
f(&mut self.inner.lock().flags)
|
||||
}
|
||||
|
||||
/// Set the thread local fsbase register on x86_64.
|
||||
pub fn set_fsbase(&self, fsbase: usize) -> ZxResult {
|
||||
let mut inner = self.inner.lock();
|
||||
let context = inner.context.as_mut().ok_or(ZxError::BAD_STATE)?;
|
||||
context.general.fsbase = fsbase;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Set the thread local gsbase register on x86_64.
|
||||
pub fn set_gsbase(&self, gsbase: usize) -> ZxResult {
|
||||
let mut inner = self.inner.lock();
|
||||
let context = inner.context.as_mut().ok_or(ZxError::BAD_STATE)?;
|
||||
context.general.gsbase = gsbase;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Task for Thread {
|
||||
fn kill(&self) {
|
||||
self.stop(true)
|
||||
}
|
||||
|
||||
fn suspend(&self) {
|
||||
let mut inner = self.inner.lock();
|
||||
inner.suspend_count += 1;
|
||||
// let state = inner.state;
|
||||
// inner.change_state(state);
|
||||
}
|
||||
|
||||
fn resume(&self) {
|
||||
let mut inner = self.inner.lock();
|
||||
assert_ne!(inner.suspend_count, 0);
|
||||
inner.suspend_count -= 1;
|
||||
if inner.suspend_count == 0 {
|
||||
// let state = inner.state;
|
||||
// inner.change_state(state);
|
||||
if let Some(waker) = inner.waker.take() {
|
||||
waker.wake();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A handle to current thread.
|
||||
///
|
||||
/// This is a wrapper of [`Thread`] that provides additional methods for the thread runner.
|
||||
/// It can only be obtained from the argument of `thread_fn` in a new thread started by [`Thread::start`].
|
||||
///
|
||||
/// It will terminate current thread on drop.
|
||||
///
|
||||
/// [`Thread`]: crate::task::Thread
|
||||
/// [`Thread::start`]: crate::task::Thread::start
|
||||
pub struct CurrentThread(pub(super) Arc<Thread>);
|
||||
|
||||
impl Deref for CurrentThread {
|
||||
type Target = Arc<Thread>;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for CurrentThread {
|
||||
/// Terminate the current running thread.
|
||||
fn drop(&mut self) {
|
||||
let mut inner = self.inner.lock();
|
||||
inner.change_state(ThreadState::Dead);
|
||||
self.proc().remove_thread(self.base.id);
|
||||
}
|
||||
}
|
||||
|
||||
impl CurrentThread {
|
||||
/// Exit the current thread.
|
||||
///
|
||||
/// The thread do not terminate immediately when exited. It is just made dying.
|
||||
/// It will terminate after some cleanups on this struct drop.
|
||||
pub fn exit(&self) {
|
||||
self.stop(false);
|
||||
}
|
||||
|
||||
/// Wait until the thread is ready to run (not suspended),
|
||||
/// and then take away its context to run the thread.
|
||||
pub fn wait_for_run(&self) -> impl Future<Output = Box<UserContext>> {
|
||||
#[must_use = "wait_for_run does nothing unless polled/`await`-ed"]
|
||||
struct RunnableChecker {
|
||||
thread: Arc<Thread>,
|
||||
}
|
||||
impl Future for RunnableChecker {
|
||||
type Output = Box<UserContext>;
|
||||
|
||||
fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
|
||||
let mut inner = self.thread.inner.lock();
|
||||
if inner.state() != ThreadState::Suspended {
|
||||
// resume: return the context token from thread object
|
||||
// There is no need to call change_state here
|
||||
// since take away the context of a non-suspended thread won't change it's state
|
||||
Poll::Ready(inner.context.take().unwrap())
|
||||
} else {
|
||||
// suspend: put waker into the thread object
|
||||
inner.waker = Some(cx.waker().clone());
|
||||
Poll::Pending
|
||||
}
|
||||
}
|
||||
}
|
||||
RunnableChecker {
|
||||
thread: self.0.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
/// The thread ends running and takes back the context.
|
||||
pub fn end_running(&self, context: Box<UserContext>) {
|
||||
let mut inner = self.inner.lock();
|
||||
inner.context = Some(context);
|
||||
// let state = inner.state;
|
||||
// inner.change_state(state);
|
||||
}
|
||||
|
||||
/// Access saved context of current thread.
|
||||
///
|
||||
/// Will panic if the context is not availiable.
|
||||
pub fn with_context<T, F>(&self, f: F) -> T
|
||||
where
|
||||
F: FnOnce(&mut UserContext) -> T,
|
||||
{
|
||||
let mut inner = self.inner.lock();
|
||||
let mut cx = inner.context.as_mut().unwrap();
|
||||
f(&mut cx)
|
||||
}
|
||||
}
|
||||
|
||||
/// The thread state.
|
||||
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
|
||||
pub enum ThreadState {
|
||||
/// The thread has been created but it has not started running yet.
|
||||
New = 0,
|
||||
/// The thread is running user code normally.
|
||||
Running = 1,
|
||||
/// Stopped due to `zx_task_suspend()`.
|
||||
Suspended = 2,
|
||||
/// In a syscall or handling an exception.
|
||||
Blocked = 3,
|
||||
/// The thread is in the process of being terminated, but it has not been stopped yet.
|
||||
Dying = 4,
|
||||
/// The thread has stopped running.
|
||||
Dead = 5,
|
||||
/// The thread is stopped in an exception.
|
||||
BlockedException = 0x103,
|
||||
/// The thread is stopped in `zx_nanosleep()`.
|
||||
BlockedSleeping = 0x203,
|
||||
/// The thread is stopped in `zx_futex_wait()`.
|
||||
BlockedFutex = 0x303,
|
||||
/// The thread is stopped in `zx_port_wait()`.
|
||||
BlockedPort = 0x403,
|
||||
/// The thread is stopped in `zx_channel_call()`.
|
||||
BlockedChannel = 0x503,
|
||||
/// The thread is stopped in `zx_object_wait_one()`.
|
||||
BlockedWaitOne = 0x603,
|
||||
/// The thread is stopped in `zx_object_wait_many()`.
|
||||
BlockedWaitMany = 0x703,
|
||||
/// The thread is stopped in `zx_interrupt_wait()`.
|
||||
BlockedInterrupt = 0x803,
|
||||
/// Pager.
|
||||
BlockedPager = 0x903,
|
||||
}
|
||||
|
||||
impl Default for ThreadState {
|
||||
fn default() -> Self {
|
||||
ThreadState::New
|
||||
}
|
||||
}
|
||||
|
||||
/// The thread information.
|
||||
#[repr(C)]
|
||||
pub struct ThreadInfo {
|
||||
state: u32,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::job::Job;
|
||||
use super::*;
|
||||
use core::time::Duration;
|
||||
use kernel_hal::timer_now;
|
||||
use kernel_hal::GeneralRegs;
|
||||
|
||||
#[test]
|
||||
fn create() {
|
||||
let root_job = Job::root();
|
||||
let proc = Process::create(&root_job, "proc").expect("failed to create process");
|
||||
let thread = Thread::create(&proc, "thread").expect("failed to create thread");
|
||||
assert_eq!(thread.flags(), ThreadFlag::empty());
|
||||
|
||||
assert_eq!(thread.related_koid(), proc.id());
|
||||
let child = proc.get_child(thread.id()).unwrap().downcast_arc().unwrap();
|
||||
assert!(Arc::ptr_eq(&child, &thread));
|
||||
}
|
||||
|
||||
#[async_std::test]
|
||||
async fn start() {
|
||||
kernel_hal_unix::init();
|
||||
let root_job = Job::root();
|
||||
let proc = Process::create(&root_job, "proc").expect("failed to create process");
|
||||
let thread = Thread::create(&proc, "thread").expect("failed to create thread");
|
||||
let thread1 = Thread::create(&proc, "thread1").expect("failed to create thread");
|
||||
|
||||
// function for new thread
|
||||
async fn new_thread(thread: CurrentThread) {
|
||||
let cx = thread.wait_for_run().await;
|
||||
assert_eq!(cx.general.rip, 1);
|
||||
assert_eq!(cx.general.rsp, 4);
|
||||
assert_eq!(cx.general.rdi, 3);
|
||||
assert_eq!(cx.general.rsi, 2);
|
||||
async_std::task::sleep(Duration::from_millis(10)).await;
|
||||
thread.end_running(cx);
|
||||
}
|
||||
|
||||
// start a new thread
|
||||
let handle = Handle::new(proc.clone(), Rights::DEFAULT_PROCESS);
|
||||
proc.start(&thread, 1, 4, Some(handle.clone()), 2, |thread| {
|
||||
Box::pin(new_thread(thread))
|
||||
})
|
||||
.expect("failed to start thread");
|
||||
|
||||
// check info and state
|
||||
let info = proc.get_info();
|
||||
assert!(info.started && !info.has_exited && info.return_code == 0);
|
||||
assert_eq!(proc.status(), Status::Running);
|
||||
assert_eq!(thread.state(), ThreadState::Running);
|
||||
|
||||
// start again should fail
|
||||
assert_eq!(
|
||||
proc.start(&thread, 1, 4, Some(handle.clone()), 2, |thread| Box::pin(
|
||||
new_thread(thread)
|
||||
)),
|
||||
Err(ZxError::BAD_STATE)
|
||||
);
|
||||
|
||||
// start another thread should fail
|
||||
assert_eq!(
|
||||
proc.start(&thread1, 1, 4, Some(handle.clone()), 2, |thread| Box::pin(
|
||||
new_thread(thread)
|
||||
)),
|
||||
Err(ZxError::BAD_STATE)
|
||||
);
|
||||
|
||||
// wait 100ms for the new thread to exit
|
||||
async_std::task::sleep(core::time::Duration::from_millis(100)).await;
|
||||
|
||||
// no other references to `Thread`
|
||||
assert_eq!(Arc::strong_count(&thread), 1);
|
||||
assert_eq!(thread.state(), ThreadState::Dead);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn info() {
|
||||
let root_job = Job::root();
|
||||
let proc = Process::create(&root_job, "proc").expect("failed to create process");
|
||||
let thread = Thread::create(&proc, "thread").expect("failed to create thread");
|
||||
|
||||
let info = thread.get_thread_info();
|
||||
assert!(info.state == thread.state() as u32);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn read_write_state() {
|
||||
let root_job = Job::root();
|
||||
let proc = Process::create(&root_job, "proc").expect("failed to create process");
|
||||
let thread = Thread::create(&proc, "thread").expect("failed to create thread");
|
||||
|
||||
const SIZE: usize = core::mem::size_of::<GeneralRegs>();
|
||||
let mut buf = [0; 10];
|
||||
assert_eq!(
|
||||
thread.read_state(ThreadStateKind::General, &mut buf).err(),
|
||||
Some(ZxError::BAD_STATE)
|
||||
);
|
||||
assert_eq!(
|
||||
thread.write_state(ThreadStateKind::General, &buf).err(),
|
||||
Some(ZxError::BAD_STATE)
|
||||
);
|
||||
|
||||
thread.suspend();
|
||||
|
||||
assert_eq!(
|
||||
thread.read_state(ThreadStateKind::General, &mut buf).err(),
|
||||
Some(ZxError::BUFFER_TOO_SMALL)
|
||||
);
|
||||
assert_eq!(
|
||||
thread.write_state(ThreadStateKind::General, &buf).err(),
|
||||
Some(ZxError::BUFFER_TOO_SMALL)
|
||||
);
|
||||
|
||||
let mut buf = [0; SIZE];
|
||||
assert!(thread
|
||||
.read_state(ThreadStateKind::General, &mut buf)
|
||||
.is_ok());
|
||||
assert!(thread.write_state(ThreadStateKind::General, &buf).is_ok());
|
||||
// TODO
|
||||
}
|
||||
|
||||
#[async_std::test]
|
||||
async fn wait_for_run() {
|
||||
let root_job = Job::root();
|
||||
let proc = Process::create(&root_job, "proc").expect("failed to create process");
|
||||
let thread = Thread::create(&proc, "thread").expect("failed to create thread");
|
||||
|
||||
assert_eq!(thread.state(), ThreadState::New);
|
||||
|
||||
thread
|
||||
.start(0, 0, 0, 0, |thread| Box::pin(new_thread(thread)))
|
||||
.unwrap();
|
||||
async fn new_thread(thread: CurrentThread) {
|
||||
assert_eq!(thread.state(), ThreadState::Running);
|
||||
|
||||
// without suspend
|
||||
let context = thread.wait_for_run().await;
|
||||
thread.end_running(context);
|
||||
|
||||
// with suspend
|
||||
thread.suspend();
|
||||
thread.suspend();
|
||||
assert_eq!(thread.state(), ThreadState::Suspended);
|
||||
async_std::task::spawn({
|
||||
let thread = (*thread).clone();
|
||||
async move {
|
||||
async_std::task::sleep(Duration::from_millis(10)).await;
|
||||
thread.resume();
|
||||
async_std::task::sleep(Duration::from_millis(10)).await;
|
||||
thread.resume();
|
||||
}
|
||||
});
|
||||
let time = timer_now();
|
||||
let _context = thread.wait_for_run().await;
|
||||
assert!(timer_now() - time >= Duration::from_millis(20));
|
||||
}
|
||||
// FIX ME
|
||||
// let thread: Arc<dyn KernelObject> = thread;
|
||||
// thread.wait_signal(Signal::THREAD_TERMINATED).await;
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn time() {
|
||||
let root_job = Job::root();
|
||||
let proc = Process::create(&root_job, "proc").expect("failed to create process");
|
||||
let thread = Thread::create(&proc, "thread").expect("failed to create thread");
|
||||
|
||||
assert_eq!(thread.get_time(), 0);
|
||||
thread.time_add(10);
|
||||
assert_eq!(thread.get_time(), 10);
|
||||
}
|
||||
}
|
||||
@ -1,64 +0,0 @@
|
||||
use crate::{ZxError, ZxResult};
|
||||
use kernel_hal::UserContext;
|
||||
use numeric_enum_macro::numeric_enum;
|
||||
|
||||
numeric_enum! {
|
||||
#[repr(u32)]
|
||||
/// Possible values for "kind" in zx_thread_read_state and zx_thread_write_state.
|
||||
#[allow(missing_docs)]
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
pub enum ThreadStateKind {
|
||||
General = 0,
|
||||
FS = 6,
|
||||
GS = 7,
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) trait ContextExt {
|
||||
fn read_state(&self, kind: ThreadStateKind, buf: &mut [u8]) -> ZxResult<usize>;
|
||||
fn write_state(&mut self, kind: ThreadStateKind, buf: &[u8]) -> ZxResult;
|
||||
}
|
||||
|
||||
impl ContextExt for UserContext {
|
||||
fn read_state(&self, kind: ThreadStateKind, buf: &mut [u8]) -> ZxResult<usize> {
|
||||
match kind {
|
||||
ThreadStateKind::General => buf.write_struct(&self.general),
|
||||
ThreadStateKind::FS => buf.write_struct(&self.general.fsbase),
|
||||
ThreadStateKind::GS => buf.write_struct(&self.general.gsbase),
|
||||
}
|
||||
}
|
||||
|
||||
fn write_state(&mut self, kind: ThreadStateKind, buf: &[u8]) -> ZxResult {
|
||||
match kind {
|
||||
ThreadStateKind::General => self.general = buf.read_struct()?,
|
||||
ThreadStateKind::FS => self.general.fsbase = buf.read_struct()?,
|
||||
ThreadStateKind::GS => self.general.gsbase = buf.read_struct()?,
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
trait BufExt {
|
||||
fn read_struct<T>(&self) -> ZxResult<T>;
|
||||
fn write_struct<T: Copy>(&mut self, value: &T) -> ZxResult<usize>;
|
||||
}
|
||||
|
||||
#[allow(unsafe_code)]
|
||||
impl BufExt for [u8] {
|
||||
fn read_struct<T>(&self) -> ZxResult<T> {
|
||||
if self.len() < core::mem::size_of::<T>() {
|
||||
return Err(ZxError::BUFFER_TOO_SMALL);
|
||||
}
|
||||
Ok(unsafe { (self.as_ptr() as *const T).read() })
|
||||
}
|
||||
|
||||
fn write_struct<T: Copy>(&mut self, value: &T) -> ZxResult<usize> {
|
||||
if self.len() < core::mem::size_of::<T>() {
|
||||
return Err(ZxError::BUFFER_TOO_SMALL);
|
||||
}
|
||||
unsafe {
|
||||
*(self.as_mut_ptr() as *mut T) = *value;
|
||||
}
|
||||
Ok(core::mem::size_of::<T>())
|
||||
}
|
||||
}
|
||||
@ -1,102 +0,0 @@
|
||||
#![allow(dead_code)]
|
||||
|
||||
/// Given a range and iterate sub-range for each block
|
||||
pub struct BlockIter {
|
||||
pub begin: usize,
|
||||
pub end: usize,
|
||||
pub block_size_log2: u8,
|
||||
}
|
||||
|
||||
#[derive(Debug, Eq, PartialEq)]
|
||||
pub struct BlockRange {
|
||||
pub block: usize,
|
||||
pub begin: usize,
|
||||
pub end: usize,
|
||||
pub block_size_log2: u8,
|
||||
}
|
||||
|
||||
impl BlockRange {
|
||||
pub fn len(&self) -> usize {
|
||||
self.end - self.begin
|
||||
}
|
||||
pub fn is_full(&self) -> bool {
|
||||
self.len() == (1usize << self.block_size_log2)
|
||||
}
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.len() == 0
|
||||
}
|
||||
pub fn origin_begin(&self) -> usize {
|
||||
(self.block << self.block_size_log2) + self.begin
|
||||
}
|
||||
pub fn origin_end(&self) -> usize {
|
||||
(self.block << self.block_size_log2) + self.end
|
||||
}
|
||||
}
|
||||
|
||||
impl Iterator for BlockIter {
|
||||
type Item = BlockRange;
|
||||
|
||||
fn next(&mut self) -> Option<<Self as Iterator>::Item> {
|
||||
if self.begin >= self.end {
|
||||
return None;
|
||||
}
|
||||
let block_size_log2 = self.block_size_log2;
|
||||
let block_size = 1usize << self.block_size_log2;
|
||||
let block = self.begin / block_size;
|
||||
let begin = self.begin % block_size;
|
||||
let end = if block == self.end / block_size {
|
||||
self.end % block_size
|
||||
} else {
|
||||
block_size
|
||||
};
|
||||
self.begin += end - begin;
|
||||
Some(BlockRange {
|
||||
block,
|
||||
begin,
|
||||
end,
|
||||
block_size_log2,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn block_iter() {
|
||||
let mut iter = BlockIter {
|
||||
begin: 0x123,
|
||||
end: 0x2018,
|
||||
block_size_log2: 12,
|
||||
};
|
||||
assert_eq!(
|
||||
iter.next(),
|
||||
Some(BlockRange {
|
||||
block: 0,
|
||||
begin: 0x123,
|
||||
end: 0x1000,
|
||||
block_size_log2: 12
|
||||
})
|
||||
);
|
||||
assert_eq!(
|
||||
iter.next(),
|
||||
Some(BlockRange {
|
||||
block: 1,
|
||||
begin: 0,
|
||||
end: 0x1000,
|
||||
block_size_log2: 12
|
||||
})
|
||||
);
|
||||
assert_eq!(
|
||||
iter.next(),
|
||||
Some(BlockRange {
|
||||
block: 2,
|
||||
begin: 0,
|
||||
end: 0x18,
|
||||
block_size_log2: 12
|
||||
})
|
||||
);
|
||||
assert_eq!(iter.next(), None);
|
||||
}
|
||||
}
|
||||
@ -1,228 +0,0 @@
|
||||
//! ELF loading of Zircon and Linux.
|
||||
use crate::{error::*, vm::*};
|
||||
use alloc::sync::Arc;
|
||||
use kernel_hal::MMUFlags;
|
||||
use xmas_elf::{
|
||||
program::{Flags, ProgramHeader, SegmentData, Type},
|
||||
sections::SectionData,
|
||||
symbol_table::{DynEntry64, Entry},
|
||||
ElfFile,
|
||||
};
|
||||
|
||||
/// Extensional ELF loading methods for `VmAddressRegion`.
|
||||
pub trait VmarExt {
|
||||
/// Create `VMObject` from all LOAD segments of `elf` and map them to this VMAR.
|
||||
/// Return the first `VMObject`.
|
||||
fn load_from_elf(&self, elf: &ElfFile) -> ZxResult<Arc<VmObject>>;
|
||||
/// Same as `load_from_elf`, but the `vmo` is an existing one instead of a lot of new ones.
|
||||
fn map_from_elf(&self, elf: &ElfFile, vmo: Arc<VmObject>) -> ZxResult;
|
||||
}
|
||||
|
||||
impl VmarExt for VmAddressRegion {
|
||||
fn load_from_elf(&self, elf: &ElfFile) -> ZxResult<Arc<VmObject>> {
|
||||
let mut first_vmo = None;
|
||||
for ph in elf.program_iter() {
|
||||
if ph.get_type().unwrap() != Type::Load {
|
||||
continue;
|
||||
}
|
||||
let vmo = make_vmo(elf, ph)?;
|
||||
let offset = ph.virtual_addr() as usize / PAGE_SIZE * PAGE_SIZE;
|
||||
let flags = ph.flags().to_mmu_flags();
|
||||
trace!("ph:{:#x?}, offset:{:#x?}, flags:{:#x?}", ph, offset, flags);
|
||||
//映射vmo物理内存块到 VMAR
|
||||
self.map_at(offset, vmo.clone(), 0, vmo.len(), flags)?;
|
||||
first_vmo.get_or_insert(vmo);
|
||||
}
|
||||
Ok(first_vmo.unwrap())
|
||||
}
|
||||
fn map_from_elf(&self, elf: &ElfFile, vmo: Arc<VmObject>) -> ZxResult {
|
||||
for ph in elf.program_iter() {
|
||||
if ph.get_type().unwrap() != Type::Load {
|
||||
continue;
|
||||
}
|
||||
let offset = ph.virtual_addr() as usize;
|
||||
let flags = ph.flags().to_mmu_flags();
|
||||
let vmo_offset = pages(ph.physical_addr() as usize) * PAGE_SIZE;
|
||||
let len = pages(ph.mem_size() as usize) * PAGE_SIZE;
|
||||
self.map_at(offset, vmo.clone(), vmo_offset, len, flags)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
trait FlagsExt {
|
||||
fn to_mmu_flags(&self) -> MMUFlags;
|
||||
}
|
||||
|
||||
impl FlagsExt for Flags {
|
||||
fn to_mmu_flags(&self) -> MMUFlags {
|
||||
let mut flags = MMUFlags::USER;
|
||||
if self.is_read() {
|
||||
flags.insert(MMUFlags::READ);
|
||||
}
|
||||
if self.is_write() {
|
||||
flags.insert(MMUFlags::WRITE);
|
||||
}
|
||||
if self.is_execute() {
|
||||
flags.insert(MMUFlags::EXECUTE);
|
||||
}
|
||||
flags
|
||||
}
|
||||
}
|
||||
|
||||
fn make_vmo(elf: &ElfFile, ph: ProgramHeader) -> ZxResult<Arc<VmObject>> {
|
||||
assert_eq!(ph.get_type().unwrap(), Type::Load);
|
||||
let page_offset = ph.virtual_addr() as usize % PAGE_SIZE;
|
||||
// (VirtAddr余数 + MemSiz)的pages
|
||||
let pages = pages(ph.mem_size() as usize + page_offset);
|
||||
trace!(
|
||||
"VmObject new pages: {:#x}, virtual_addr: {:#x}",
|
||||
pages,
|
||||
page_offset
|
||||
);
|
||||
let vmo = VmObject::new_paged(pages);
|
||||
let data = match ph.get_data(elf).unwrap() {
|
||||
SegmentData::Undefined(data) => data,
|
||||
_ => return Err(ZxError::INVALID_ARGS),
|
||||
};
|
||||
//调用 VMObjectTrait.write, 分配物理内存,后写入程序数据
|
||||
vmo.write(page_offset, data)?;
|
||||
Ok(vmo)
|
||||
}
|
||||
|
||||
/// Extensional ELF loading methods for `ElfFile`.
|
||||
pub trait ElfExt {
|
||||
/// Get total size of all LOAD segments.
|
||||
fn load_segment_size(&self) -> usize;
|
||||
/// Get address of the given `symbol`.
|
||||
fn get_symbol_address(&self, symbol: &str) -> Option<u64>;
|
||||
/// Get the program interpreter path name.
|
||||
fn get_interpreter(&self) -> Result<&str, &str>;
|
||||
/// Get address of elf phdr
|
||||
fn get_phdr_vaddr(&self) -> Option<u64>;
|
||||
/// Get the symbol table for dynamic linking (.dynsym section).
|
||||
fn dynsym(&self) -> Result<&[DynEntry64], &'static str>;
|
||||
/// Relocate according to the dynamic relocation section (.rel.dyn section).
|
||||
fn relocate(&self, base: usize) -> Result<(), &'static str>;
|
||||
}
|
||||
|
||||
impl ElfExt for ElfFile<'_> {
|
||||
fn load_segment_size(&self) -> usize {
|
||||
self.program_iter()
|
||||
.filter(|ph| ph.get_type().unwrap() == Type::Load)
|
||||
.map(|ph| pages((ph.virtual_addr() + ph.mem_size()) as usize))
|
||||
.max()
|
||||
.unwrap_or(0)
|
||||
* PAGE_SIZE
|
||||
}
|
||||
|
||||
fn get_symbol_address(&self, symbol: &str) -> Option<u64> {
|
||||
for section in self.section_iter() {
|
||||
if let SectionData::SymbolTable64(entries) = section.get_data(self).unwrap() {
|
||||
for e in entries {
|
||||
if e.get_name(self).unwrap() == symbol {
|
||||
return Some(e.value());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
fn get_interpreter(&self) -> Result<&str, &str> {
|
||||
let header = self
|
||||
.program_iter()
|
||||
.find(|ph| ph.get_type() == Ok(Type::Interp))
|
||||
.ok_or("no interp header")?;
|
||||
let data = match header.get_data(self)? {
|
||||
SegmentData::Undefined(data) => data,
|
||||
_ => return Err("bad interp"),
|
||||
};
|
||||
let len = (0..).find(|&i| data[i] == 0).unwrap();
|
||||
let path = core::str::from_utf8(&data[..len]).map_err(|_| "failed to convert to utf8")?;
|
||||
Ok(path)
|
||||
}
|
||||
|
||||
/*
|
||||
* [ ERROR ] page fualt from user mode 0x40 READ
|
||||
*/
|
||||
|
||||
fn get_phdr_vaddr(&self) -> Option<u64> {
|
||||
if let Some(phdr) = self
|
||||
.program_iter()
|
||||
.find(|ph| ph.get_type() == Ok(Type::Phdr))
|
||||
{
|
||||
// if phdr exists in program header, use it
|
||||
Some(phdr.virtual_addr())
|
||||
} else if let Some(elf_addr) = self
|
||||
.program_iter()
|
||||
.find(|ph| ph.get_type() == Ok(Type::Load) && ph.offset() == 0)
|
||||
{
|
||||
// otherwise, check if elf is loaded from the beginning, then phdr can be inferred.
|
||||
Some(elf_addr.virtual_addr() + self.header.pt2.ph_offset())
|
||||
} else {
|
||||
warn!("elf: no phdr found, tls might not work");
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
fn dynsym(&self) -> Result<&[DynEntry64], &'static str> {
|
||||
match self
|
||||
.find_section_by_name(".dynsym")
|
||||
.ok_or(".dynsym not found")?
|
||||
.get_data(self)
|
||||
.map_err(|_| "corrupted .dynsym")?
|
||||
{
|
||||
SectionData::DynSymbolTable64(dsym) => Ok(dsym),
|
||||
_ => Err("bad .dynsym"),
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(unsafe_code)]
|
||||
fn relocate(&self, base: usize) -> Result<(), &'static str> {
|
||||
let data = self
|
||||
.find_section_by_name(".rela.dyn")
|
||||
.ok_or(".rela.dyn not found")?
|
||||
.get_data(self)
|
||||
.map_err(|_| "corrupted .rela.dyn")?;
|
||||
let entries = match data {
|
||||
SectionData::Rela64(entries) => entries,
|
||||
_ => return Err("bad .rela.dyn"),
|
||||
};
|
||||
let dynsym = self.dynsym()?;
|
||||
for entry in entries {
|
||||
const REL_GOT: u32 = 6;
|
||||
const REL_PLT: u32 = 7;
|
||||
const REL_RELATIVE: u32 = 8;
|
||||
const R_RISCV_64: u32 = 2;
|
||||
const R_RISCV_RELATIVE: u32 = 3;
|
||||
match entry.get_type() {
|
||||
REL_GOT | REL_PLT | R_RISCV_64 => {
|
||||
let dynsym = &dynsym[entry.get_symbol_table_index() as usize];
|
||||
let symval = if dynsym.shndx() == 0 {
|
||||
let name = dynsym.get_name(self)?;
|
||||
panic!("need to find symbol: {:?}", name);
|
||||
} else {
|
||||
base + dynsym.value() as usize
|
||||
};
|
||||
let value = symval + entry.get_addend() as usize;
|
||||
unsafe {
|
||||
let ptr = (base + entry.get_offset() as usize) as *mut usize;
|
||||
debug!("GOT write: {:#x} @ {:#x}", value, ptr as usize);
|
||||
ptr.write(value);
|
||||
}
|
||||
}
|
||||
REL_RELATIVE | R_RISCV_RELATIVE => {
|
||||
let value = base + entry.get_addend() as usize;
|
||||
unsafe {
|
||||
let ptr = (base + entry.get_offset() as usize) as *mut usize;
|
||||
debug!("RELATIVE write: {:#x} @ {:#x}", value, ptr as usize);
|
||||
ptr.write(value);
|
||||
}
|
||||
}
|
||||
t => unimplemented!("unknown type: {}", t),
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@ -1,4 +0,0 @@
|
||||
//! Utilities.
|
||||
|
||||
pub(crate) mod block_range;
|
||||
pub mod elf_loader;
|
||||
@ -1,52 +0,0 @@
|
||||
//! Objects for Virtual Memory Management.
|
||||
|
||||
mod vmar;
|
||||
mod vmo;
|
||||
|
||||
pub use self::{vmar::*, vmo::*};
|
||||
|
||||
/// Physical Address
|
||||
pub type PhysAddr = usize;
|
||||
|
||||
/// Virtual Address
|
||||
pub type VirtAddr = usize;
|
||||
|
||||
/// Device Address
|
||||
pub type DevVAddr = usize;
|
||||
|
||||
/// Size of a page
|
||||
pub const PAGE_SIZE: usize = 0x1000;
|
||||
|
||||
/// log2(PAGE_SIZE)
|
||||
pub const PAGE_SIZE_LOG2: usize = 12;
|
||||
|
||||
/// Check whether `x` is a multiple of `PAGE_SIZE`.
|
||||
pub fn page_aligned(x: usize) -> bool {
|
||||
check_aligned(x, PAGE_SIZE)
|
||||
}
|
||||
|
||||
/// Check whether `x` is a multiple of `align`.
|
||||
pub fn check_aligned(x: usize, align: usize) -> bool {
|
||||
x % align == 0
|
||||
}
|
||||
|
||||
/// How many pages the `size` needs.
|
||||
/// To avoid overflow and pass more unit tests, use wrapping add
|
||||
pub fn pages(size: usize) -> usize {
|
||||
ceil(size, PAGE_SIZE)
|
||||
}
|
||||
|
||||
/// How many `align` the `x` needs.
|
||||
pub fn ceil(x: usize, align: usize) -> usize {
|
||||
x.wrapping_add(align - 1) / align
|
||||
}
|
||||
|
||||
/// Round up `size` to a multiple of `PAGE_SIZE`.
|
||||
pub fn roundup_pages(size: usize) -> usize {
|
||||
pages(size) * PAGE_SIZE
|
||||
}
|
||||
|
||||
/// Round down `size` to a multiple of `PAGE_SIZE`.
|
||||
pub fn round_down_pages(size: usize) -> usize {
|
||||
size / PAGE_SIZE * PAGE_SIZE
|
||||
}
|
||||
@ -1,848 +0,0 @@
|
||||
use {
|
||||
super::*,
|
||||
crate::object::*,
|
||||
alloc::sync::Arc,
|
||||
alloc::vec,
|
||||
alloc::vec::Vec,
|
||||
bitflags::bitflags,
|
||||
kernel_hal::{MMUFlags, PageTableTrait},
|
||||
spin::Mutex,
|
||||
};
|
||||
|
||||
bitflags! {
|
||||
/// Creation flags for VmAddressRegion.
|
||||
pub struct VmarFlags: u32 {
|
||||
#[allow(clippy::identity_op)]
|
||||
/// When randomly allocating subregions, reduce sprawl by placing allocations
|
||||
/// near each other.
|
||||
const COMPACT = 1 << 0;
|
||||
/// Request that the new region be at the specified offset in its parent region.
|
||||
const SPECIFIC = 1 << 1;
|
||||
/// Like SPECIFIC, but permits overwriting existing mappings. This
|
||||
/// flag will not overwrite through a subregion.
|
||||
const SPECIFIC_OVERWRITE = 1 << 2;
|
||||
/// Allow VmMappings to be created inside the new region with the SPECIFIC or
|
||||
/// OFFSET_IS_UPPER_LIMIT flag.
|
||||
const CAN_MAP_SPECIFIC = 1 << 3;
|
||||
/// Allow VmMappings to be created inside the region with read permissions.
|
||||
const CAN_MAP_READ = 1 << 4;
|
||||
/// Allow VmMappings to be created inside the region with write permissions.
|
||||
const CAN_MAP_WRITE = 1 << 5;
|
||||
/// Allow VmMappings to be created inside the region with execute permissions.
|
||||
const CAN_MAP_EXECUTE = 1 << 6;
|
||||
/// Require that VMO backing the mapping is non-resizable.
|
||||
const REQUIRE_NON_RESIZABLE = 1 << 7;
|
||||
/// Treat the offset as an upper limit when allocating a VMO or child VMAR.
|
||||
const ALLOW_FAULTS = 1 << 8;
|
||||
|
||||
/// Allow VmMappings to be created inside the region with read, write and execute permissions.
|
||||
const CAN_MAP_RXW = Self::CAN_MAP_READ.bits | Self::CAN_MAP_EXECUTE.bits | Self::CAN_MAP_WRITE.bits;
|
||||
/// Creation flags for root VmAddressRegion
|
||||
const ROOT_FLAGS = Self::CAN_MAP_RXW.bits | Self::CAN_MAP_SPECIFIC.bits;
|
||||
}
|
||||
}
|
||||
|
||||
/// Virtual Memory Address Regions
|
||||
pub struct VmAddressRegion {
|
||||
flags: VmarFlags,
|
||||
base: KObjectBase,
|
||||
addr: VirtAddr,
|
||||
size: usize,
|
||||
parent: Option<Arc<VmAddressRegion>>,
|
||||
page_table: Arc<Mutex<dyn PageTableTrait>>,
|
||||
/// If inner is None, this region is destroyed, all operations are invalid.
|
||||
inner: Mutex<Option<VmarInner>>,
|
||||
}
|
||||
|
||||
impl_kobject!(VmAddressRegion);
|
||||
|
||||
/// The mutable part of `VmAddressRegion`.
|
||||
#[derive(Default)]
|
||||
struct VmarInner {
|
||||
children: Vec<Arc<VmAddressRegion>>,
|
||||
mappings: Vec<Arc<VmMapping>>,
|
||||
}
|
||||
|
||||
impl VmAddressRegion {
|
||||
/// Create a new root VMAR.
|
||||
pub fn new_root() -> Arc<Self> {
|
||||
let (addr, size) = {
|
||||
use core::sync::atomic::*;
|
||||
static VMAR_ID: AtomicUsize = AtomicUsize::new(0);
|
||||
let i = VMAR_ID.fetch_add(1, Ordering::SeqCst);
|
||||
(0x2_0000_0000 + 0x100_0000_0000 * i, 0x100_0000_0000)
|
||||
};
|
||||
Arc::new(VmAddressRegion {
|
||||
flags: VmarFlags::ROOT_FLAGS,
|
||||
base: KObjectBase::new(),
|
||||
addr,
|
||||
size,
|
||||
parent: None,
|
||||
page_table: Arc::new(Mutex::new(kernel_hal::PageTable::new())), //hal PageTable
|
||||
inner: Mutex::new(Some(VmarInner::default())),
|
||||
})
|
||||
}
|
||||
|
||||
/// Create a kernel root VMAR.
|
||||
pub fn new_kernel() -> Arc<Self> {
|
||||
let kernel_vmar_base = KERNEL_ASPACE_BASE as usize;
|
||||
let kernel_vmar_size = KERNEL_ASPACE_SIZE as usize;
|
||||
Arc::new(VmAddressRegion {
|
||||
flags: VmarFlags::ROOT_FLAGS,
|
||||
base: KObjectBase::new(),
|
||||
addr: kernel_vmar_base,
|
||||
size: kernel_vmar_size,
|
||||
parent: None,
|
||||
page_table: Arc::new(Mutex::new(kernel_hal::PageTable::new())),
|
||||
inner: Mutex::new(Some(VmarInner::default())),
|
||||
})
|
||||
}
|
||||
|
||||
/// Create a child VMAR at the `offset`.
|
||||
pub fn allocate_at(
|
||||
self: &Arc<Self>,
|
||||
offset: usize,
|
||||
len: usize,
|
||||
flags: VmarFlags,
|
||||
align: usize,
|
||||
) -> ZxResult<Arc<Self>> {
|
||||
self.allocate(Some(offset), len, flags, align)
|
||||
}
|
||||
|
||||
/// Create a child VMAR with optional `offset`.
|
||||
pub fn allocate(
|
||||
self: &Arc<Self>,
|
||||
offset: Option<usize>,
|
||||
len: usize,
|
||||
flags: VmarFlags,
|
||||
align: usize,
|
||||
) -> ZxResult<Arc<Self>> {
|
||||
let mut guard = self.inner.lock();
|
||||
let inner = guard.as_mut().ok_or(ZxError::BAD_STATE)?;
|
||||
let offset = self.determine_offset(inner, offset, len, align)?;
|
||||
let child = Arc::new(VmAddressRegion {
|
||||
flags,
|
||||
base: KObjectBase::new(),
|
||||
addr: self.addr + offset,
|
||||
size: len,
|
||||
parent: Some(self.clone()),
|
||||
page_table: self.page_table.clone(),
|
||||
inner: Mutex::new(Some(VmarInner::default())),
|
||||
});
|
||||
inner.children.push(child.clone());
|
||||
Ok(child)
|
||||
}
|
||||
|
||||
/// Map the `vmo` into this VMAR at given `offset`.
|
||||
pub fn map_at(
|
||||
&self,
|
||||
vmar_offset: usize,
|
||||
vmo: Arc<VmObject>,
|
||||
vmo_offset: usize,
|
||||
len: usize,
|
||||
flags: MMUFlags,
|
||||
) -> ZxResult<VirtAddr> {
|
||||
self.map(Some(vmar_offset), vmo, vmo_offset, len, flags)
|
||||
}
|
||||
|
||||
/// Map the `vmo` into this VMAR.
|
||||
pub fn map(
|
||||
&self,
|
||||
vmar_offset: Option<usize>,
|
||||
vmo: Arc<VmObject>,
|
||||
vmo_offset: usize,
|
||||
len: usize,
|
||||
flags: MMUFlags,
|
||||
) -> ZxResult<VirtAddr> {
|
||||
self.map_ext(
|
||||
vmar_offset,
|
||||
vmo,
|
||||
vmo_offset,
|
||||
len,
|
||||
MMUFlags::RXW,
|
||||
flags,
|
||||
false,
|
||||
true,
|
||||
)
|
||||
}
|
||||
|
||||
/// Map the `vmo` into this VMAR.
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn map_ext(
|
||||
&self,
|
||||
vmar_offset: Option<usize>,
|
||||
vmo: Arc<VmObject>,
|
||||
vmo_offset: usize,
|
||||
len: usize,
|
||||
permissions: MMUFlags,
|
||||
flags: MMUFlags,
|
||||
overwrite: bool,
|
||||
map_range: bool,
|
||||
) -> ZxResult<VirtAddr> {
|
||||
if !page_aligned(vmo_offset) || !page_aligned(len) || vmo_offset.overflowing_add(len).1 {
|
||||
return Err(ZxError::INVALID_ARGS);
|
||||
}
|
||||
if !permissions.contains(flags & MMUFlags::RXW) {
|
||||
return Err(ZxError::ACCESS_DENIED);
|
||||
}
|
||||
if vmo_offset > vmo.len() || len > vmo.len() - vmo_offset {
|
||||
return Err(ZxError::INVALID_ARGS);
|
||||
}
|
||||
// Simplify: overwrite == false && map_range == true
|
||||
if overwrite || !map_range {
|
||||
warn!("Simplify: overwrite == false && map_range == true");
|
||||
return Err(ZxError::INVALID_ARGS);
|
||||
}
|
||||
let mut guard = self.inner.lock();
|
||||
let inner = guard.as_mut().ok_or(ZxError::BAD_STATE)?;
|
||||
let offset = self.determine_offset(inner, vmar_offset, len, PAGE_SIZE)?;
|
||||
let addr = self.addr + offset;
|
||||
let flags = flags | MMUFlags::from_bits_truncate(vmo.cache_policy() as u32 as usize);
|
||||
// align = 1K? 2K? 4K? 8K? ...
|
||||
if !self.test_map(inner, offset, len, PAGE_SIZE) {
|
||||
return Err(ZxError::NO_MEMORY);
|
||||
}
|
||||
let mapping = VmMapping::new(
|
||||
addr,
|
||||
len,
|
||||
vmo,
|
||||
vmo_offset,
|
||||
permissions,
|
||||
flags,
|
||||
self.page_table.clone(),
|
||||
);
|
||||
mapping.map()?;
|
||||
inner.mappings.push(mapping);
|
||||
Ok(addr)
|
||||
}
|
||||
|
||||
/// Unmaps all VMO mappings and destroys all sub-regions within the absolute range
|
||||
/// including `addr` and ending before exclusively at `addr + len`.
|
||||
/// Any sub-region that is in the range must be fully in the range
|
||||
/// (i.e. partial overlaps are an error).
|
||||
/// NOT SUPPORT:
|
||||
/// If a mapping is only partially in the range, the mapping is split and the requested
|
||||
/// portion is unmapped.
|
||||
pub fn unmap(&self, addr: VirtAddr, len: usize) -> ZxResult {
|
||||
if !page_aligned(addr) || !page_aligned(len) || len == 0 {
|
||||
return Err(ZxError::INVALID_ARGS);
|
||||
}
|
||||
let mut guard = self.inner.lock();
|
||||
let inner = guard.as_mut().ok_or(ZxError::BAD_STATE)?;
|
||||
let begin = addr;
|
||||
let end = addr + len;
|
||||
// check partial overlapped sub-regions
|
||||
if inner
|
||||
.children
|
||||
.iter()
|
||||
.any(|vmar| vmar.partial_overlap(begin, end))
|
||||
{
|
||||
return Err(ZxError::INVALID_ARGS);
|
||||
}
|
||||
|
||||
if inner
|
||||
.mappings
|
||||
.iter()
|
||||
.any(|map| map.partial_overlap(begin, end))
|
||||
{
|
||||
warn!("Simplify: Not support partial unmap.");
|
||||
return Err(ZxError::INVALID_ARGS);
|
||||
}
|
||||
inner.mappings.drain_filter(|map| map.within(begin, end));
|
||||
|
||||
for vmar in inner.children.drain_filter(|vmar| vmar.within(begin, end)) {
|
||||
vmar.destroy_internal()?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Change protections on a subset of the region of memory in the containing
|
||||
/// address space. If the requested range overlaps with a subregion,
|
||||
/// protect() will fail.
|
||||
pub fn protect(&self, addr: usize, len: usize, flags: MMUFlags) -> ZxResult {
|
||||
if !page_aligned(addr) || !page_aligned(len) {
|
||||
return Err(ZxError::INVALID_ARGS);
|
||||
}
|
||||
let mut guard = self.inner.lock();
|
||||
let inner = guard.as_mut().ok_or(ZxError::BAD_STATE)?;
|
||||
let end_addr = addr + len;
|
||||
// check if there are overlapping subregion
|
||||
if inner
|
||||
.children
|
||||
.iter()
|
||||
.any(|child| child.overlap(addr, end_addr))
|
||||
{
|
||||
return Err(ZxError::INVALID_ARGS);
|
||||
}
|
||||
let length = inner.mappings.iter().fold(0, |acc, map| {
|
||||
acc + end_addr
|
||||
.min(map.end_addr())
|
||||
.saturating_sub(addr.max(map.addr()))
|
||||
});
|
||||
if length != len {
|
||||
return Err(ZxError::NOT_FOUND);
|
||||
}
|
||||
// check if protect flags is valid
|
||||
if inner
|
||||
.mappings
|
||||
.iter()
|
||||
.filter(|map| map.overlap(addr, end_addr)) // get mappings in range: [addr, end_addr]
|
||||
.any(|map| !map.is_valid_mapping_flags(flags))
|
||||
{
|
||||
return Err(ZxError::ACCESS_DENIED);
|
||||
}
|
||||
inner
|
||||
.mappings
|
||||
.iter()
|
||||
.filter(|map| map.overlap(addr, end_addr))
|
||||
.for_each(|map| {
|
||||
let start_index = pages(addr.max(map.addr()) - map.addr());
|
||||
let end_index = pages(end_addr.min(map.end_addr()) - map.addr());
|
||||
map.protect(flags, start_index, end_index);
|
||||
});
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Unmap all mappings and destroy all sub-regions of VMAR.
|
||||
pub fn clear(&self) -> ZxResult {
|
||||
let mut guard = self.inner.lock();
|
||||
let inner = guard.as_mut().ok_or(ZxError::BAD_STATE)?;
|
||||
for vmar in inner.children.drain(..) {
|
||||
vmar.destroy_internal()?;
|
||||
}
|
||||
inner.mappings.clear();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Destroy but do not remove self from parent.
|
||||
fn destroy_internal(&self) -> ZxResult {
|
||||
let mut guard = self.inner.lock();
|
||||
let inner = guard.as_mut().ok_or(ZxError::BAD_STATE)?;
|
||||
for vmar in inner.children.drain(..) {
|
||||
vmar.destroy_internal()?;
|
||||
}
|
||||
inner.mappings.clear();
|
||||
*guard = None;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Unmap all mappings within the VMAR, and destroy all sub-regions of the region.
|
||||
pub fn destroy(self: &Arc<Self>) -> ZxResult {
|
||||
self.destroy_internal()?;
|
||||
// remove from parent
|
||||
if let Some(parent) = &self.parent {
|
||||
let mut guard = parent.inner.lock();
|
||||
let inner = guard.as_mut().ok_or(ZxError::BAD_STATE)?;
|
||||
inner.children.retain(|vmar| !Arc::ptr_eq(self, vmar));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Get physical address of the underlying page table.
|
||||
pub fn table_phys(&self) -> PhysAddr {
|
||||
self.page_table.lock().table_phys()
|
||||
}
|
||||
|
||||
/// Get start address of this VMAR.
|
||||
pub fn addr(&self) -> usize {
|
||||
self.addr
|
||||
}
|
||||
|
||||
/// Whether this VMAR is dead.
|
||||
pub fn is_dead(&self) -> bool {
|
||||
self.inner.lock().is_none()
|
||||
}
|
||||
|
||||
/// Whether this VMAR is alive.
|
||||
pub fn is_alive(&self) -> bool {
|
||||
!self.is_dead()
|
||||
}
|
||||
|
||||
/// Determine final address with given input `offset` and `len`.
|
||||
fn determine_offset(
|
||||
&self,
|
||||
inner: &VmarInner,
|
||||
offset: Option<usize>,
|
||||
len: usize,
|
||||
align: usize,
|
||||
) -> ZxResult<VirtAddr> {
|
||||
if !check_aligned(len, align) {
|
||||
Err(ZxError::INVALID_ARGS)
|
||||
} else if let Some(offset) = offset {
|
||||
if check_aligned(offset, align) && self.test_map(inner, offset, len, align) {
|
||||
Ok(offset)
|
||||
} else {
|
||||
Err(ZxError::INVALID_ARGS)
|
||||
}
|
||||
} else if len > self.size {
|
||||
Err(ZxError::INVALID_ARGS)
|
||||
} else {
|
||||
match self.find_free_area(inner, 0, len, align) {
|
||||
Some(offset) => Ok(offset),
|
||||
None => Err(ZxError::NO_MEMORY),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Test if can create a new mapping at `offset` with `len`.
|
||||
fn test_map(&self, inner: &VmarInner, offset: usize, len: usize, align: usize) -> bool {
|
||||
debug_assert!(check_aligned(offset, align));
|
||||
debug_assert!(check_aligned(len, align));
|
||||
let begin = self.addr + offset;
|
||||
let end = begin + len;
|
||||
if end > self.addr + self.size {
|
||||
return false;
|
||||
}
|
||||
// brute force
|
||||
if inner.children.iter().any(|vmar| vmar.overlap(begin, end)) {
|
||||
return false;
|
||||
}
|
||||
if inner.mappings.iter().any(|map| map.overlap(begin, end)) {
|
||||
return false;
|
||||
}
|
||||
true
|
||||
}
|
||||
|
||||
/// Find a free area with `len`.
|
||||
fn find_free_area(
|
||||
&self,
|
||||
inner: &VmarInner,
|
||||
offset_hint: usize,
|
||||
len: usize,
|
||||
align: usize,
|
||||
) -> Option<usize> {
|
||||
// TODO: randomize
|
||||
debug_assert!(check_aligned(offset_hint, align));
|
||||
debug_assert!(check_aligned(len, align));
|
||||
// brute force:
|
||||
// try each area's end address as the start
|
||||
core::iter::once(offset_hint)
|
||||
.chain(inner.children.iter().map(|map| map.end_addr() - self.addr))
|
||||
.chain(inner.mappings.iter().map(|map| map.end_addr() - self.addr))
|
||||
.find(|&offset| self.test_map(inner, offset, len, align))
|
||||
}
|
||||
|
||||
fn end_addr(&self) -> VirtAddr {
|
||||
self.addr + self.size
|
||||
}
|
||||
|
||||
fn overlap(&self, begin: VirtAddr, end: VirtAddr) -> bool {
|
||||
!(self.addr >= end || self.end_addr() <= begin)
|
||||
}
|
||||
|
||||
fn within(&self, begin: VirtAddr, end: VirtAddr) -> bool {
|
||||
begin <= self.addr && self.end_addr() <= end
|
||||
}
|
||||
|
||||
fn partial_overlap(&self, begin: VirtAddr, end: VirtAddr) -> bool {
|
||||
self.overlap(begin, end) && !self.within(begin, end)
|
||||
}
|
||||
|
||||
fn contains(&self, vaddr: VirtAddr) -> bool {
|
||||
self.addr <= vaddr && vaddr < self.end_addr()
|
||||
}
|
||||
|
||||
/// Get information of this VmAddressRegion
|
||||
pub fn get_info(&self) -> VmarInfo {
|
||||
// pub fn get_info(&self, va: usize) -> VmarInfo {
|
||||
// let _r = self.page_table.lock().query(va);
|
||||
VmarInfo {
|
||||
base: self.addr(),
|
||||
len: self.size,
|
||||
}
|
||||
}
|
||||
|
||||
/// Get VmarFlags of this VMAR.
|
||||
pub fn get_flags(&self) -> VmarFlags {
|
||||
self.flags
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
fn count(&self) -> usize {
|
||||
let mut guard = self.inner.lock();
|
||||
let inner = guard.as_mut().unwrap();
|
||||
println!("m = {}, c = {}", inner.mappings.len(), inner.children.len());
|
||||
inner.mappings.len() + inner.children.len()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
fn used_size(&self) -> usize {
|
||||
let mut guard = self.inner.lock();
|
||||
let inner = guard.as_mut().unwrap();
|
||||
let map_size: usize = inner.mappings.iter().map(|map| map.size()).sum();
|
||||
let vmar_size: usize = inner.children.iter().map(|vmar| vmar.size).sum();
|
||||
println!("size = {:#x?}", map_size + vmar_size);
|
||||
map_size + vmar_size
|
||||
}
|
||||
}
|
||||
|
||||
/// Information of a VmAddressRegion.
|
||||
#[repr(C)]
|
||||
#[derive(Debug)]
|
||||
pub struct VmarInfo {
|
||||
base: usize,
|
||||
len: usize,
|
||||
}
|
||||
|
||||
/// Virtual Memory Mapping
|
||||
pub struct VmMapping {
|
||||
/// The permission limitation of the vmar
|
||||
permissions: MMUFlags,
|
||||
vmo: Arc<VmObject>,
|
||||
page_table: Arc<Mutex<dyn PageTableTrait>>,
|
||||
inner: Mutex<VmMappingInner>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
struct VmMappingInner {
|
||||
/// The actual flags used in the mapping of each page
|
||||
flags: Vec<MMUFlags>,
|
||||
addr: VirtAddr,
|
||||
size: usize,
|
||||
vmo_offset: usize,
|
||||
}
|
||||
|
||||
impl core::fmt::Debug for VmMapping {
|
||||
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
|
||||
let inner = self.inner.lock();
|
||||
f.debug_struct("VmMapping")
|
||||
.field("addr", &inner.addr)
|
||||
.field("size", &inner.size)
|
||||
.field("permissions", &self.permissions)
|
||||
.field("flags", &inner.flags)
|
||||
.field("vmo_id", &self.vmo.id())
|
||||
.field("vmo_offset", &inner.vmo_offset)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl VmMapping {
|
||||
fn new(
|
||||
addr: VirtAddr,
|
||||
size: usize,
|
||||
vmo: Arc<VmObject>,
|
||||
vmo_offset: usize,
|
||||
permissions: MMUFlags,
|
||||
flags: MMUFlags,
|
||||
page_table: Arc<Mutex<dyn PageTableTrait>>,
|
||||
) -> Arc<Self> {
|
||||
let mapping = Arc::new(VmMapping {
|
||||
inner: Mutex::new(VmMappingInner {
|
||||
flags: vec![flags; pages(size)],
|
||||
addr,
|
||||
size,
|
||||
vmo_offset,
|
||||
}),
|
||||
permissions,
|
||||
page_table,
|
||||
vmo: vmo.clone(),
|
||||
});
|
||||
vmo.append_mapping(Arc::downgrade(&mapping));
|
||||
mapping
|
||||
}
|
||||
|
||||
/// Map range and commit.
|
||||
/// Commit pages to vmo, and map those to frames in page_table.
|
||||
/// Temporarily used for development. A standard procedure for
|
||||
/// vmo is: create_vmo, op_range(commit), map
|
||||
fn map(self: &Arc<Self>) -> ZxResult {
|
||||
self.vmo.commit_pages_with(&mut |commit| {
|
||||
let inner = self.inner.lock();
|
||||
let mut page_table = self.page_table.lock();
|
||||
let page_num = inner.size / PAGE_SIZE;
|
||||
let vmo_offset = inner.vmo_offset / PAGE_SIZE;
|
||||
for i in 0..page_num {
|
||||
let paddr = commit(vmo_offset + i, inner.flags[i])?;
|
||||
//通过 PageTableTrait 的 hal_pt_map 进行页表映射
|
||||
page_table
|
||||
.map(inner.addr + i * PAGE_SIZE, paddr, inner.flags[i])
|
||||
.expect("failed to map");
|
||||
}
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
|
||||
fn unmap(&self) {
|
||||
let inner = self.inner.lock();
|
||||
let pages = inner.size / PAGE_SIZE;
|
||||
// TODO inner.vmo_offset unused?
|
||||
self.page_table
|
||||
.lock()
|
||||
.unmap_cont(inner.addr, pages)
|
||||
.expect("failed to unmap")
|
||||
}
|
||||
|
||||
fn overlap(&self, begin: VirtAddr, end: VirtAddr) -> bool {
|
||||
let inner = self.inner.lock();
|
||||
!(inner.addr >= end || inner.end_addr() <= begin)
|
||||
}
|
||||
|
||||
fn within(&self, begin: VirtAddr, end: VirtAddr) -> bool {
|
||||
let inner = self.inner.lock();
|
||||
begin <= inner.addr && inner.end_addr() <= end
|
||||
}
|
||||
|
||||
fn partial_overlap(&self, begin: VirtAddr, end: VirtAddr) -> bool {
|
||||
self.overlap(begin, end) && !self.within(begin, end)
|
||||
}
|
||||
|
||||
fn contains(&self, vaddr: VirtAddr) -> bool {
|
||||
let inner = self.inner.lock();
|
||||
inner.addr <= vaddr && vaddr < inner.end_addr()
|
||||
}
|
||||
|
||||
fn is_valid_mapping_flags(&self, flags: MMUFlags) -> bool {
|
||||
self.permissions.contains(flags & MMUFlags::RXW)
|
||||
}
|
||||
|
||||
fn protect(&self, flags: MMUFlags, start_index: usize, end_index: usize) {
|
||||
let mut inner = self.inner.lock();
|
||||
let mut pg_table = self.page_table.lock();
|
||||
for i in start_index..end_index {
|
||||
inner.flags[i] = (inner.flags[i] & !MMUFlags::RXW) | (flags & MMUFlags::RXW);
|
||||
pg_table
|
||||
.protect(inner.addr + i * PAGE_SIZE, inner.flags[i])
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
fn size(&self) -> usize {
|
||||
self.inner.lock().size
|
||||
}
|
||||
|
||||
fn addr(&self) -> VirtAddr {
|
||||
self.inner.lock().addr
|
||||
}
|
||||
|
||||
fn end_addr(&self) -> VirtAddr {
|
||||
self.inner.lock().end_addr()
|
||||
}
|
||||
|
||||
/// Get MMUFlags of this VmMapping.
|
||||
pub fn get_flags(&self, vaddr: usize) -> ZxResult<MMUFlags> {
|
||||
if self.contains(vaddr) {
|
||||
let page_id = (vaddr - self.addr()) / PAGE_SIZE;
|
||||
Ok(self.inner.lock().flags[page_id])
|
||||
} else {
|
||||
Err(ZxError::NO_MEMORY)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl VmMappingInner {
|
||||
fn end_addr(&self) -> VirtAddr {
|
||||
self.addr + self.size
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for VmMapping {
|
||||
fn drop(&mut self) {
|
||||
self.unmap();
|
||||
}
|
||||
}
|
||||
|
||||
/// The base of kernel address space
|
||||
/// In x86 fuchsia this is 0xffff_ff80_0000_0000 instead
|
||||
pub const KERNEL_ASPACE_BASE: u64 = 0xffff_ff02_0000_0000;
|
||||
/// The size of kernel address space
|
||||
pub const KERNEL_ASPACE_SIZE: u64 = 0x0000_0080_0000_0000;
|
||||
/// The base of user address space
|
||||
pub const USER_ASPACE_BASE: u64 = 0;
|
||||
// pub const USER_ASPACE_BASE: u64 = 0x0000_0000_0100_0000;
|
||||
/// The size of user address space
|
||||
pub const USER_ASPACE_SIZE: u64 = (1u64 << 47) - 4096 - USER_ASPACE_BASE;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn create_child() {
|
||||
let root_vmar = VmAddressRegion::new_root();
|
||||
let child = root_vmar
|
||||
.allocate_at(0, 0x2000, VmarFlags::CAN_MAP_RXW, PAGE_SIZE)
|
||||
.expect("failed to create child VMAR");
|
||||
|
||||
// test invalid argument
|
||||
assert_eq!(
|
||||
root_vmar
|
||||
.allocate_at(0x2001, 0x1000, VmarFlags::CAN_MAP_RXW, PAGE_SIZE)
|
||||
.err(),
|
||||
Some(ZxError::INVALID_ARGS)
|
||||
);
|
||||
assert_eq!(
|
||||
root_vmar
|
||||
.allocate_at(0x2000, 1, VmarFlags::CAN_MAP_RXW, PAGE_SIZE)
|
||||
.err(),
|
||||
Some(ZxError::INVALID_ARGS)
|
||||
);
|
||||
assert_eq!(
|
||||
root_vmar
|
||||
.allocate_at(0, 0x1000, VmarFlags::CAN_MAP_RXW, PAGE_SIZE)
|
||||
.err(),
|
||||
Some(ZxError::INVALID_ARGS)
|
||||
);
|
||||
assert_eq!(
|
||||
child
|
||||
.allocate_at(0x1000, 0x2000, VmarFlags::CAN_MAP_RXW, PAGE_SIZE)
|
||||
.err(),
|
||||
Some(ZxError::INVALID_ARGS)
|
||||
);
|
||||
}
|
||||
|
||||
/// A valid virtual address base to mmap.
|
||||
const MAGIC: usize = 0xdead_beaf;
|
||||
|
||||
#[test]
|
||||
#[allow(unsafe_code)]
|
||||
fn map() {
|
||||
let vmar = VmAddressRegion::new_root();
|
||||
let vmo = VmObject::new_paged(4);
|
||||
let flags = MMUFlags::READ | MMUFlags::WRITE;
|
||||
|
||||
// invalid argument
|
||||
assert_eq!(
|
||||
vmar.map_at(0, vmo.clone(), 0x4000, 0x1000, flags),
|
||||
Err(ZxError::INVALID_ARGS)
|
||||
);
|
||||
assert_eq!(
|
||||
vmar.map_at(0, vmo.clone(), 0, 0x5000, flags),
|
||||
Err(ZxError::INVALID_ARGS)
|
||||
);
|
||||
assert_eq!(
|
||||
vmar.map_at(0, vmo.clone(), 0x1000, 1, flags),
|
||||
Err(ZxError::INVALID_ARGS)
|
||||
);
|
||||
assert_eq!(
|
||||
vmar.map_at(0, vmo.clone(), 1, 0x1000, flags),
|
||||
Err(ZxError::INVALID_ARGS)
|
||||
);
|
||||
vmar.map_at(0, vmo.clone(), 0, 0x4000, flags).unwrap();
|
||||
vmar.map_at(0x12000, vmo.clone(), 0x2000, 0x1000, flags)
|
||||
.unwrap();
|
||||
unsafe {
|
||||
((vmar.addr() + 0x2000) as *mut usize).write(MAGIC);
|
||||
assert_eq!(((vmar.addr() + 0x12000) as *const usize).read(), MAGIC);
|
||||
}
|
||||
}
|
||||
|
||||
/// ```text
|
||||
/// +--------+--------+--------+--------+
|
||||
/// | root .... |
|
||||
/// +--------+--------+--------+--------+
|
||||
/// | child1 | child2 |
|
||||
/// +--------+--------+--------+
|
||||
/// | g-son1 | g-son2 |
|
||||
/// +--------+--------+
|
||||
/// ```
|
||||
struct Sample {
|
||||
root: Arc<VmAddressRegion>,
|
||||
child1: Arc<VmAddressRegion>,
|
||||
child2: Arc<VmAddressRegion>,
|
||||
grandson1: Arc<VmAddressRegion>,
|
||||
grandson2: Arc<VmAddressRegion>,
|
||||
}
|
||||
|
||||
impl Sample {
|
||||
fn new() -> Self {
|
||||
let root = VmAddressRegion::new_root();
|
||||
let child1 = root
|
||||
.allocate_at(0, 0x2000, VmarFlags::CAN_MAP_RXW, PAGE_SIZE)
|
||||
.unwrap();
|
||||
let child2 = root
|
||||
.allocate_at(0x2000, 0x1000, VmarFlags::CAN_MAP_RXW, PAGE_SIZE)
|
||||
.unwrap();
|
||||
let grandson1 = child1
|
||||
.allocate_at(0, 0x1000, VmarFlags::CAN_MAP_RXW, PAGE_SIZE)
|
||||
.unwrap();
|
||||
let grandson2 = child1
|
||||
.allocate_at(0x1000, 0x1000, VmarFlags::CAN_MAP_RXW, PAGE_SIZE)
|
||||
.unwrap();
|
||||
Sample {
|
||||
root,
|
||||
child1,
|
||||
child2,
|
||||
grandson1,
|
||||
grandson2,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn unmap_vmar() {
|
||||
let s = Sample::new();
|
||||
let base = s.root.addr();
|
||||
s.child1.unmap(base, 0x1000).unwrap();
|
||||
assert!(s.grandson1.is_dead());
|
||||
assert!(s.grandson2.is_alive());
|
||||
|
||||
// partial overlap sub-region should fail.
|
||||
let s = Sample::new();
|
||||
let base = s.root.addr();
|
||||
assert_eq!(
|
||||
s.root.unmap(base + 0x1000, 0x2000),
|
||||
Err(ZxError::INVALID_ARGS)
|
||||
);
|
||||
|
||||
// unmap nothing should success.
|
||||
let s = Sample::new();
|
||||
let base = s.root.addr();
|
||||
s.child1.unmap(base + 0x8000, 0x1000).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn destroy() {
|
||||
let s = Sample::new();
|
||||
s.child1.destroy().unwrap();
|
||||
assert!(s.child1.is_dead());
|
||||
assert!(s.grandson1.is_dead());
|
||||
assert!(s.grandson2.is_dead());
|
||||
assert!(s.child2.is_alive());
|
||||
// address space should be released
|
||||
assert!(s
|
||||
.root
|
||||
.allocate_at(0, 0x1000, VmarFlags::CAN_MAP_RXW, PAGE_SIZE)
|
||||
.is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn unmap_mapping() {
|
||||
// +--------+--------+--------+--------+--------+
|
||||
// 1 [--------------------------|xxxxxxxx|--------]
|
||||
// 2 [xxxxxxxx|-----------------]
|
||||
// 3 [--------|xxxxxxxx]
|
||||
// 4 [xxxxxxxx]
|
||||
let vmar = VmAddressRegion::new_root();
|
||||
let base = vmar.addr();
|
||||
let vmo = VmObject::new_paged(5);
|
||||
let flags = MMUFlags::READ | MMUFlags::WRITE;
|
||||
vmar.map_at(0, vmo, 0, 0x5000, flags).unwrap();
|
||||
assert_eq!(vmar.count(), 1);
|
||||
assert_eq!(vmar.used_size(), 0x5000);
|
||||
|
||||
// 0. unmap none.
|
||||
vmar.unmap(base + 0x5000, 0x1000).unwrap();
|
||||
assert_eq!(vmar.count(), 1);
|
||||
assert_eq!(vmar.used_size(), 0x5000);
|
||||
|
||||
// // 1. unmap middle.
|
||||
// vmar.unmap(base + 0x3000, 0x1000).unwrap();
|
||||
// assert_eq!(vmar.count(), 2);
|
||||
// assert_eq!(vmar.used_size(), 0x4000);
|
||||
|
||||
// // 2. unmap prefix.
|
||||
// vmar.unmap(base, 0x1000).unwrap();
|
||||
// assert_eq!(vmar.count(), 2);
|
||||
// assert_eq!(vmar.used_size(), 0x3000);
|
||||
|
||||
// // 3. unmap postfix.
|
||||
// vmar.unmap(base + 0x2000, 0x1000).unwrap();
|
||||
// assert_eq!(vmar.count(), 2);
|
||||
// assert_eq!(vmar.used_size(), 0x2000);
|
||||
|
||||
// 4. unmap all.
|
||||
vmar.unmap(base, 0x5000).unwrap();
|
||||
assert_eq!(vmar.count(), 0);
|
||||
assert_eq!(vmar.used_size(), 0x0);
|
||||
}
|
||||
}
|
||||
@ -1,469 +0,0 @@
|
||||
use {
|
||||
self::{paged::*, physical::*, slice::*},
|
||||
super::*,
|
||||
crate::object::*,
|
||||
alloc::{
|
||||
sync::{Arc, Weak},
|
||||
vec::Vec,
|
||||
},
|
||||
bitflags::bitflags,
|
||||
core::ops::Deref,
|
||||
kernel_hal::{CachePolicy, MMUFlags},
|
||||
spin::Mutex,
|
||||
};
|
||||
|
||||
mod paged;
|
||||
mod physical;
|
||||
mod slice;
|
||||
|
||||
/// Virtual Memory Object Trait
|
||||
#[allow(clippy::len_without_is_empty)]
|
||||
pub trait VMObjectTrait: Sync + Send {
|
||||
/// Read memory to `buf` from VMO at `offset`.
|
||||
fn read(&self, offset: usize, buf: &mut [u8]) -> ZxResult;
|
||||
|
||||
/// Write memory from `buf` to VMO at `offset`.
|
||||
fn write(&self, offset: usize, buf: &[u8]) -> ZxResult;
|
||||
|
||||
/// Resets the range of bytes in the VMO from `offset` to `offset+len` to 0.
|
||||
fn zero(&self, offset: usize, len: usize) -> ZxResult;
|
||||
|
||||
/// Get the length of VMO.
|
||||
fn len(&self) -> usize;
|
||||
|
||||
/// Set the length of VMO.
|
||||
fn set_len(&self, len: usize) -> ZxResult;
|
||||
|
||||
/// Commit a page.
|
||||
fn commit_page(&self, page_idx: usize, flags: MMUFlags) -> ZxResult<PhysAddr>;
|
||||
|
||||
/// Commit pages with an external function f.
|
||||
/// the vmo is internally locked before it calls f,
|
||||
/// allowing `VmMapping` to avoid deadlock
|
||||
fn commit_pages_with(
|
||||
&self,
|
||||
f: &mut dyn FnMut(&mut dyn FnMut(usize, MMUFlags) -> ZxResult<PhysAddr>) -> ZxResult,
|
||||
) -> ZxResult;
|
||||
|
||||
/// Commit allocating physical memory.
|
||||
fn commit(&self, offset: usize, len: usize) -> ZxResult;
|
||||
|
||||
/// Decommit allocated physical memory.
|
||||
fn decommit(&self, offset: usize, len: usize) -> ZxResult;
|
||||
|
||||
/// Create a child VMO.
|
||||
fn create_child(&self, offset: usize, len: usize) -> ZxResult<Arc<dyn VMObjectTrait>>;
|
||||
|
||||
/// Append a mapping to the VMO's mapping list.
|
||||
fn append_mapping(&self, _mapping: Weak<VmMapping>) {}
|
||||
|
||||
/// Remove a mapping from the VMO's mapping list.
|
||||
fn remove_mapping(&self, _mapping: Weak<VmMapping>) {}
|
||||
|
||||
/// Complete the VmoInfo.
|
||||
fn complete_info(&self, info: &mut VmoInfo);
|
||||
|
||||
/// Get the cache policy.
|
||||
fn cache_policy(&self) -> CachePolicy;
|
||||
|
||||
/// Set the cache policy.
|
||||
fn set_cache_policy(&self, policy: CachePolicy) -> ZxResult;
|
||||
|
||||
/// Count committed pages of the VMO.
|
||||
fn committed_pages_in_range(&self, start_idx: usize, end_idx: usize) -> usize;
|
||||
|
||||
/// Pin the given range of the VMO.
|
||||
fn pin(&self, _offset: usize, _len: usize) -> ZxResult {
|
||||
Err(ZxError::NOT_SUPPORTED)
|
||||
}
|
||||
|
||||
/// Unpin the given range of the VMO.
|
||||
fn unpin(&self, _offset: usize, _len: usize) -> ZxResult {
|
||||
Err(ZxError::NOT_SUPPORTED)
|
||||
}
|
||||
|
||||
/// Returns true if the object is backed by a contiguous range of physical memory.
|
||||
fn is_contiguous(&self) -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
/// Returns true if the object is backed by RAM.
|
||||
fn is_paged(&self) -> bool {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/// Virtual memory containers
|
||||
///
|
||||
/// ## SYNOPSIS
|
||||
///
|
||||
/// A Virtual Memory Object (VMO) represents a contiguous region of virtual memory
|
||||
/// that may be mapped into multiple address spaces.
|
||||
pub struct VmObject {
|
||||
base: KObjectBase,
|
||||
resizable: bool,
|
||||
trait_: Arc<dyn VMObjectTrait>,
|
||||
inner: Mutex<VmObjectInner>,
|
||||
}
|
||||
|
||||
impl_kobject!(VmObject);
|
||||
|
||||
#[derive(Default)]
|
||||
struct VmObjectInner {
|
||||
parent: Weak<VmObject>,
|
||||
children: Vec<Weak<VmObject>>,
|
||||
mapping_count: usize,
|
||||
content_size: usize,
|
||||
}
|
||||
|
||||
impl VmObject {
|
||||
/// Create a new VMO backing on physical memory allocated in pages.
|
||||
pub fn new_paged(pages: usize) -> Arc<Self> {
|
||||
Self::new_paged_with_resizable(false, pages)
|
||||
}
|
||||
|
||||
/// Create a new VMO, which can be resizable, backing on physical memory allocated in pages.
|
||||
pub fn new_paged_with_resizable(resizable: bool, pages: usize) -> Arc<Self> {
|
||||
let base = KObjectBase::new();
|
||||
Arc::new(VmObject {
|
||||
resizable,
|
||||
trait_: VMObjectPaged::new(pages),
|
||||
inner: Mutex::new(VmObjectInner::default()),
|
||||
base,
|
||||
})
|
||||
}
|
||||
|
||||
/// Create a new VMO representing a piece of contiguous physical memory.
|
||||
pub fn new_physical(paddr: PhysAddr, pages: usize) -> Arc<Self> {
|
||||
Arc::new(VmObject {
|
||||
base: KObjectBase::new(),
|
||||
resizable: false,
|
||||
trait_: VMObjectPhysical::new(paddr, pages),
|
||||
inner: Mutex::new(VmObjectInner::default()),
|
||||
})
|
||||
}
|
||||
|
||||
/// Create a VM object referring to a specific contiguous range of physical frame.
|
||||
pub fn new_contiguous(pages: usize, align_log2: usize) -> ZxResult<Arc<Self>> {
|
||||
let vmo = Arc::new(VmObject {
|
||||
base: KObjectBase::new(),
|
||||
resizable: false,
|
||||
trait_: VMObjectPaged::new_contiguous(pages, align_log2)?,
|
||||
inner: Mutex::new(VmObjectInner::default()),
|
||||
});
|
||||
Ok(vmo)
|
||||
}
|
||||
|
||||
/// Create a child VMO.
|
||||
pub fn create_child(
|
||||
self: &Arc<Self>,
|
||||
resizable: bool,
|
||||
offset: usize,
|
||||
len: usize,
|
||||
) -> ZxResult<Arc<Self>> {
|
||||
let base = KObjectBase::with_name(&self.base.name());
|
||||
let trait_ = self.trait_.create_child(offset, len)?;
|
||||
let child = Arc::new(VmObject {
|
||||
base,
|
||||
resizable,
|
||||
trait_,
|
||||
inner: Mutex::new(VmObjectInner {
|
||||
parent: Arc::downgrade(self),
|
||||
..VmObjectInner::default()
|
||||
}),
|
||||
});
|
||||
self.add_child(&child);
|
||||
Ok(child)
|
||||
}
|
||||
|
||||
/// Create a child slice as an VMO
|
||||
pub fn create_slice(self: &Arc<Self>, offset: usize, p_size: usize) -> ZxResult<Arc<Self>> {
|
||||
let size = roundup_pages(p_size);
|
||||
// why 32 * PAGE_SIZE? Refered to zircon source codes
|
||||
if size < p_size || size > usize::MAX & !(32 * PAGE_SIZE) {
|
||||
return Err(ZxError::OUT_OF_RANGE);
|
||||
}
|
||||
// child slice must be wholly contained
|
||||
let parent_size = self.trait_.len();
|
||||
if !page_aligned(offset) {
|
||||
return Err(ZxError::INVALID_ARGS);
|
||||
}
|
||||
if offset > parent_size || size > parent_size - offset {
|
||||
return Err(ZxError::INVALID_ARGS);
|
||||
}
|
||||
if self.resizable {
|
||||
return Err(ZxError::NOT_SUPPORTED);
|
||||
}
|
||||
if self.trait_.cache_policy() != CachePolicy::Cached && !self.trait_.is_contiguous() {
|
||||
return Err(ZxError::BAD_STATE);
|
||||
}
|
||||
let child = Arc::new(VmObject {
|
||||
base: KObjectBase::with_name(&self.base.name()),
|
||||
resizable: false,
|
||||
trait_: VMObjectSlice::new(self.trait_.clone(), offset, size),
|
||||
inner: Mutex::new(VmObjectInner {
|
||||
parent: Arc::downgrade(self),
|
||||
..VmObjectInner::default()
|
||||
}),
|
||||
});
|
||||
self.add_child(&child);
|
||||
Ok(child)
|
||||
}
|
||||
|
||||
/// Add child to the list and signal if ZeroChildren signal is active.
|
||||
/// If the number of children turns 0 to 1, signal it
|
||||
fn add_child(&self, child: &Arc<VmObject>) {
|
||||
let mut inner = self.inner.lock();
|
||||
inner.children.retain(|x| x.strong_count() != 0);
|
||||
inner.children.push(Arc::downgrade(child));
|
||||
// if inner.children.len() == 1 {
|
||||
// self.base.signal_clear(Signal::VMO_ZERO_CHILDREN);
|
||||
// }
|
||||
}
|
||||
|
||||
/// Set the length of this VMO if resizable.
|
||||
pub fn set_len(&self, len: usize) -> ZxResult {
|
||||
let size = roundup_pages(len);
|
||||
if size < len {
|
||||
return Err(ZxError::OUT_OF_RANGE);
|
||||
}
|
||||
if !self.resizable {
|
||||
return Err(ZxError::UNAVAILABLE);
|
||||
}
|
||||
self.trait_.set_len(size)
|
||||
}
|
||||
|
||||
/// Set the size of the content stored in the VMO in bytes, resize vmo if needed
|
||||
pub fn set_content_size_and_resize(
|
||||
&self,
|
||||
size: usize,
|
||||
zero_until_offset: usize,
|
||||
) -> ZxResult<usize> {
|
||||
let mut inner = self.inner.lock();
|
||||
let content_size = inner.content_size;
|
||||
let len = self.trait_.len();
|
||||
if size < content_size {
|
||||
return Ok(content_size);
|
||||
}
|
||||
let required_len = roundup_pages(size);
|
||||
let new_content_size = if required_len > len && self.set_len(required_len).is_err() {
|
||||
len
|
||||
} else {
|
||||
size
|
||||
};
|
||||
let zero_until_offset = zero_until_offset.min(new_content_size);
|
||||
if zero_until_offset > content_size {
|
||||
self.trait_
|
||||
.zero(content_size, zero_until_offset - content_size)?;
|
||||
}
|
||||
inner.content_size = new_content_size;
|
||||
Ok(new_content_size)
|
||||
}
|
||||
|
||||
/// Get the size of the content stored in the VMO in bytes.
|
||||
pub fn content_size(&self) -> usize {
|
||||
let inner = self.inner.lock();
|
||||
inner.content_size
|
||||
}
|
||||
|
||||
/// Get the size of the content stored in the VMO in bytes.
|
||||
pub fn set_content_size(&self, size: usize) -> ZxResult {
|
||||
let mut inner = self.inner.lock();
|
||||
inner.content_size = size;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Get information of this VMO.
|
||||
pub fn get_info(&self) -> VmoInfo {
|
||||
let inner = self.inner.lock();
|
||||
let mut ret = VmoInfo {
|
||||
koid: self.base.id,
|
||||
name: {
|
||||
let mut arr = [0u8; 32];
|
||||
let name = self.base.name();
|
||||
let length = name.len().min(32);
|
||||
arr[..length].copy_from_slice(&name.as_bytes()[..length]);
|
||||
arr
|
||||
},
|
||||
size: self.trait_.len() as u64,
|
||||
parent_koid: inner.parent.upgrade().map(|p| p.id()).unwrap_or(0),
|
||||
num_children: inner.children.len() as u64,
|
||||
flags: if self.resizable {
|
||||
VmoInfoFlags::RESIZABLE
|
||||
} else {
|
||||
VmoInfoFlags::empty()
|
||||
},
|
||||
cache_policy: self.trait_.cache_policy() as u32,
|
||||
share_count: inner.mapping_count as u64,
|
||||
..Default::default()
|
||||
};
|
||||
self.trait_.complete_info(&mut ret);
|
||||
ret
|
||||
}
|
||||
|
||||
/// Set the cache policy.
|
||||
pub fn set_cache_policy(&self, policy: CachePolicy) -> ZxResult {
|
||||
let inner = self.inner.lock();
|
||||
if !inner.children.is_empty() {
|
||||
return Err(ZxError::BAD_STATE);
|
||||
}
|
||||
if inner.mapping_count != 0 {
|
||||
return Err(ZxError::BAD_STATE);
|
||||
}
|
||||
self.trait_.set_cache_policy(policy)
|
||||
}
|
||||
|
||||
/// Append a mapping to the VMO's mapping list.
|
||||
pub fn append_mapping(&self, mapping: Weak<VmMapping>) {
|
||||
self.inner.lock().mapping_count += 1;
|
||||
self.trait_.append_mapping(mapping);
|
||||
}
|
||||
|
||||
/// Remove a mapping from the VMO's mapping list.
|
||||
pub fn remove_mapping(&self, mapping: Weak<VmMapping>) {
|
||||
self.inner.lock().mapping_count -= 1;
|
||||
self.trait_.remove_mapping(mapping);
|
||||
}
|
||||
|
||||
/// Returns an estimate of the number of unique VmAspaces that this object
|
||||
/// is mapped into.
|
||||
pub fn share_count(&self) -> usize {
|
||||
let inner = self.inner.lock();
|
||||
inner.mapping_count
|
||||
}
|
||||
|
||||
/// Returns true if the object size can be changed.
|
||||
pub fn is_resizable(&self) -> bool {
|
||||
self.resizable
|
||||
}
|
||||
|
||||
/// Returns true if the object is backed by a contiguous range of physical memory.
|
||||
pub fn is_contiguous(&self) -> bool {
|
||||
self.trait_.is_contiguous()
|
||||
}
|
||||
}
|
||||
|
||||
impl Deref for VmObject {
|
||||
type Target = Arc<dyn VMObjectTrait>;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.trait_
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for VmObject {
|
||||
fn drop(&mut self) {
|
||||
let mut inner = self.inner.lock();
|
||||
let parent = match inner.parent.upgrade() {
|
||||
Some(parent) => parent,
|
||||
None => return,
|
||||
};
|
||||
for child in inner.children.iter() {
|
||||
if let Some(child) = child.upgrade() {
|
||||
child.inner.lock().parent = Arc::downgrade(&parent);
|
||||
}
|
||||
}
|
||||
let mut parent_inner = parent.inner.lock();
|
||||
let children = &mut parent_inner.children;
|
||||
children.append(&mut inner.children);
|
||||
children.retain(|c| c.strong_count() != 0);
|
||||
for child in children.iter() {
|
||||
let child = child.upgrade().unwrap();
|
||||
let mut inner = child.inner.lock();
|
||||
inner.children.retain(|c| c.strong_count() != 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Describes a VMO.
|
||||
#[repr(C)]
|
||||
#[derive(Default)]
|
||||
pub struct VmoInfo {
|
||||
/// The koid of this VMO.
|
||||
koid: KoID,
|
||||
/// The name of this VMO.
|
||||
name: [u8; 32],
|
||||
/// The size of this VMO; i.e., the amount of virtual address space it
|
||||
/// would consume if mapped.
|
||||
size: u64,
|
||||
/// If this VMO is a clone, the koid of its parent. Otherwise, zero.
|
||||
parent_koid: KoID,
|
||||
/// The number of clones of this VMO, if any.
|
||||
num_children: u64,
|
||||
/// The number of times this VMO is currently mapped into VMARs.
|
||||
num_mappings: u64,
|
||||
/// The number of unique address space we're mapped into.
|
||||
share_count: u64,
|
||||
/// Flags.
|
||||
pub flags: VmoInfoFlags,
|
||||
/// Padding.
|
||||
padding1: [u8; 4],
|
||||
/// If the type is `PAGED`, the amount of
|
||||
/// memory currently allocated to this VMO; i.e., the amount of physical
|
||||
/// memory it consumes. Undefined otherwise.
|
||||
committed_bytes: u64,
|
||||
/// If `flags & ZX_INFO_VMO_VIA_HANDLE`, the handle rights.
|
||||
/// Undefined otherwise.
|
||||
pub rights: Rights,
|
||||
/// VMO mapping cache policy.
|
||||
cache_policy: u32,
|
||||
}
|
||||
|
||||
bitflags! {
|
||||
#[derive(Default)]
|
||||
/// Values used by ZX_INFO_PROCESS_VMOS.
|
||||
pub struct VmoInfoFlags: u32 {
|
||||
/// The VMO points to a physical address range, and does not consume memory.
|
||||
/// Typically used to access memory-mapped hardware.
|
||||
/// Mutually exclusive with TYPE_PAGED.
|
||||
const TYPE_PHYSICAL = 0;
|
||||
|
||||
#[allow(clippy::identity_op)]
|
||||
/// The VMO is backed by RAM, consuming memory.
|
||||
/// Mutually exclusive with TYPE_PHYSICAL.
|
||||
const TYPE_PAGED = 1 << 0;
|
||||
|
||||
/// The VMO is resizable.
|
||||
const RESIZABLE = 1 << 1;
|
||||
|
||||
/// The VMO is a child, and is a copy-on-write clone.
|
||||
const IS_COW_CLONE = 1 << 2;
|
||||
|
||||
/// When reading a list of VMOs pointed to by a process, indicates that the
|
||||
/// process has a handle to the VMO, which isn't necessarily mapped.
|
||||
const VIA_HANDLE = 1 << 3;
|
||||
|
||||
/// When reading a list of VMOs pointed to by a process, indicates that the
|
||||
/// process maps the VMO into a VMAR, but doesn't necessarily have a handle to
|
||||
/// the VMO.
|
||||
const VIA_MAPPING = 1 << 4;
|
||||
|
||||
/// The VMO is a pager owned VMO created by zx_pager_create_vmo or is
|
||||
/// a clone of a VMO with this flag set. Will only be set on VMOs with
|
||||
/// the ZX_INFO_VMO_TYPE_PAGED flag set.
|
||||
const PAGER_BACKED = 1 << 5;
|
||||
|
||||
/// The VMO is contiguous.
|
||||
const CONTIGUOUS = 1 << 6;
|
||||
}
|
||||
}
|
||||
|
||||
/// Different operations that `range_change` can perform against any VmMappings that are found.
|
||||
#[allow(dead_code)]
|
||||
#[derive(PartialEq, Eq, Clone, Copy)]
|
||||
pub(super) enum RangeChangeOp {
|
||||
Unmap,
|
||||
RemoveWrite,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
pub fn read_write(vmo: &VmObject) {
|
||||
let mut buf = [0u8; 4];
|
||||
vmo.write(0, &[0, 1, 2, 3]).unwrap();
|
||||
vmo.read(0, &mut buf).unwrap();
|
||||
assert_eq!(&buf, &[0, 1, 2, 3]);
|
||||
}
|
||||
}
|
||||
@ -1,337 +0,0 @@
|
||||
use {
|
||||
super::*,
|
||||
crate::util::block_range::BlockIter,
|
||||
alloc::sync::Arc,
|
||||
alloc::vec::Vec,
|
||||
core::ops::Range,
|
||||
kernel_hal::{MMUFlags, PhysFrame, PAGE_SIZE},
|
||||
spin::Mutex,
|
||||
};
|
||||
|
||||
/// The main VM object type, holding a list of pages.
|
||||
pub struct VMObjectPaged {
|
||||
inner: Mutex<VMObjectPagedInner>,
|
||||
}
|
||||
|
||||
/// The mutable part of `VMObjectPaged`.
|
||||
#[derive(Default)]
|
||||
struct VMObjectPagedInner {
|
||||
/// Physical frames of this VMO.
|
||||
frames: Vec<PhysFrame>,
|
||||
/// Cache Policy
|
||||
cache_policy: CachePolicy,
|
||||
/// Is contiguous
|
||||
contiguous: bool,
|
||||
/// Sum of pin_count
|
||||
pin_count: usize,
|
||||
/// All mappings to this VMO.
|
||||
mappings: Vec<Weak<VmMapping>>,
|
||||
}
|
||||
|
||||
impl VMObjectPaged {
|
||||
/// Create a new VMO backing on physical memory allocated in pages.
|
||||
pub fn new(pages: usize) -> Arc<Self> {
|
||||
let mut frames = Vec::new();
|
||||
frames.resize_with(pages, || PhysFrame::alloc_zeroed().unwrap());
|
||||
Arc::new(VMObjectPaged {
|
||||
inner: Mutex::new(VMObjectPagedInner {
|
||||
frames,
|
||||
..Default::default()
|
||||
}),
|
||||
})
|
||||
}
|
||||
|
||||
/// Create a list of contiguous pages
|
||||
pub fn new_contiguous(pages: usize, align_log2: usize) -> ZxResult<Arc<Self>> {
|
||||
let frames = PhysFrame::alloc_contiguous_zeroed(pages, align_log2 - PAGE_SIZE_LOG2);
|
||||
if frames.is_empty() {
|
||||
return Err(ZxError::NO_MEMORY);
|
||||
}
|
||||
Ok(Arc::new(VMObjectPaged {
|
||||
inner: Mutex::new(VMObjectPagedInner {
|
||||
frames,
|
||||
contiguous: true,
|
||||
..Default::default()
|
||||
}),
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
impl VMObjectTrait for VMObjectPaged {
|
||||
fn read(&self, offset: usize, buf: &mut [u8]) -> ZxResult {
|
||||
let mut inner = self.inner.lock();
|
||||
if inner.cache_policy != CachePolicy::Cached {
|
||||
return Err(ZxError::BAD_STATE);
|
||||
}
|
||||
inner.for_each_page(offset, buf.len(), |paddr, buf_range| {
|
||||
kernel_hal::pmem_read(paddr, &mut buf[buf_range]);
|
||||
});
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn write(&self, offset: usize, buf: &[u8]) -> ZxResult {
|
||||
let mut inner = self.inner.lock();
|
||||
if inner.cache_policy != CachePolicy::Cached {
|
||||
return Err(ZxError::BAD_STATE);
|
||||
}
|
||||
inner.for_each_page(offset, buf.len(), |paddr, buf_range| {
|
||||
kernel_hal::pmem_write(paddr, &buf[buf_range]);
|
||||
});
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn zero(&self, offset: usize, len: usize) -> ZxResult {
|
||||
let mut inner = self.inner.lock();
|
||||
if inner.cache_policy != CachePolicy::Cached {
|
||||
return Err(ZxError::BAD_STATE);
|
||||
}
|
||||
inner.for_each_page(offset, len, |paddr, buf_range| {
|
||||
kernel_hal::pmem_zero(paddr, buf_range.len());
|
||||
});
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn len(&self) -> usize {
|
||||
let inner = self.inner.lock();
|
||||
inner.frames.len() * PAGE_SIZE
|
||||
}
|
||||
|
||||
fn set_len(&self, len: usize) -> ZxResult {
|
||||
assert!(page_aligned(len));
|
||||
let mut inner = self.inner.lock();
|
||||
inner.frames.resize_with(len / PAGE_SIZE, || {
|
||||
PhysFrame::alloc().ok_or(ZxError::NO_MEMORY).unwrap()
|
||||
});
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn commit_page(&self, page_idx: usize, _flags: MMUFlags) -> ZxResult<PhysAddr> {
|
||||
let inner = self.inner.lock();
|
||||
Ok(inner.frames[page_idx].addr())
|
||||
}
|
||||
|
||||
fn commit_pages_with(
|
||||
&self,
|
||||
f: &mut dyn FnMut(&mut dyn FnMut(usize, MMUFlags) -> ZxResult<PhysAddr>) -> ZxResult,
|
||||
) -> ZxResult {
|
||||
let inner = self.inner.lock();
|
||||
f(&mut |page_idx, _| Ok(inner.frames[page_idx].addr()))
|
||||
}
|
||||
|
||||
fn commit(&self, _offset: usize, _len: usize) -> ZxResult {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn decommit(&self, _offset: usize, _len: usize) -> ZxResult {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn create_child(&self, offset: usize, len: usize) -> ZxResult<Arc<dyn VMObjectTrait>> {
|
||||
assert!(page_aligned(offset));
|
||||
assert!(page_aligned(len));
|
||||
let mut inner = self.inner.lock();
|
||||
let child = inner.create_child(offset, len)?;
|
||||
Ok(child)
|
||||
}
|
||||
|
||||
fn append_mapping(&self, mapping: Weak<VmMapping>) {
|
||||
let mut inner = self.inner.lock();
|
||||
inner.mappings.push(mapping);
|
||||
}
|
||||
|
||||
fn remove_mapping(&self, mapping: Weak<VmMapping>) {
|
||||
let mut inner = self.inner.lock();
|
||||
inner
|
||||
.mappings
|
||||
.drain_filter(|x| x.strong_count() == 0 || Weak::ptr_eq(x, &mapping));
|
||||
}
|
||||
|
||||
fn complete_info(&self, info: &mut VmoInfo) {
|
||||
let inner = self.inner.lock();
|
||||
info.flags |= VmoInfoFlags::TYPE_PAGED;
|
||||
inner.complete_info(info);
|
||||
}
|
||||
|
||||
fn cache_policy(&self) -> CachePolicy {
|
||||
let inner = self.inner.lock();
|
||||
inner.cache_policy
|
||||
}
|
||||
|
||||
fn set_cache_policy(&self, policy: CachePolicy) -> ZxResult {
|
||||
// conditions for allowing the cache policy to be set:
|
||||
// 1) vmo either has no pages committed currently or is transitioning from being cached
|
||||
// 2) vmo has no pinned pages
|
||||
// 3) vmo has no mappings
|
||||
// 4) vmo has no children (TODO)
|
||||
// 5) vmo is not a child
|
||||
let mut inner = self.inner.lock();
|
||||
if !inner.frames.is_empty() && inner.cache_policy != CachePolicy::Cached {
|
||||
return Err(ZxError::BAD_STATE);
|
||||
}
|
||||
if inner.pin_count != 0 {
|
||||
return Err(ZxError::BAD_STATE);
|
||||
}
|
||||
if inner.cache_policy == CachePolicy::Cached && policy != CachePolicy::Cached {
|
||||
for frame in inner.frames.iter() {
|
||||
kernel_hal::frame_flush(frame.addr());
|
||||
}
|
||||
}
|
||||
inner.cache_policy = policy;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn committed_pages_in_range(&self, start_idx: usize, end_idx: usize) -> usize {
|
||||
end_idx - start_idx
|
||||
}
|
||||
|
||||
fn pin(&self, offset: usize, len: usize) -> ZxResult {
|
||||
let mut inner = self.inner.lock();
|
||||
if offset + len > inner.frames.len() * PAGE_SIZE {
|
||||
return Err(ZxError::OUT_OF_RANGE);
|
||||
}
|
||||
if len == 0 {
|
||||
return Ok(());
|
||||
}
|
||||
inner.pin_count += pages(len);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn unpin(&self, offset: usize, len: usize) -> ZxResult {
|
||||
let mut inner = self.inner.lock();
|
||||
if offset + len > inner.frames.len() * PAGE_SIZE {
|
||||
return Err(ZxError::OUT_OF_RANGE);
|
||||
}
|
||||
if len == 0 {
|
||||
return Ok(());
|
||||
}
|
||||
inner.pin_count -= pages(len);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn is_contiguous(&self) -> bool {
|
||||
let inner = self.inner.lock();
|
||||
inner.contiguous
|
||||
}
|
||||
|
||||
fn is_paged(&self) -> bool {
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
impl VMObjectPagedInner {
|
||||
/// Helper function to split range into sub-ranges within pages.
|
||||
///
|
||||
/// ```text
|
||||
/// VMO range:
|
||||
/// |----|----|----|----|----|
|
||||
///
|
||||
/// buf:
|
||||
/// [====len====]
|
||||
/// |--offset--|
|
||||
///
|
||||
/// sub-ranges:
|
||||
/// [===]
|
||||
/// [====]
|
||||
/// [==]
|
||||
/// ```
|
||||
///
|
||||
/// `f` is a function to process in-page ranges.
|
||||
/// It takes 2 arguments:
|
||||
/// * `paddr`: the start physical address of the in-page range.
|
||||
/// * `buf_range`: the range in view of the input buffer.
|
||||
fn for_each_page(
|
||||
&mut self,
|
||||
offset: usize,
|
||||
buf_len: usize,
|
||||
mut f: impl FnMut(PhysAddr, Range<usize>),
|
||||
) {
|
||||
let iter = BlockIter {
|
||||
begin: offset,
|
||||
end: offset + buf_len,
|
||||
block_size_log2: 12,
|
||||
};
|
||||
for block in iter {
|
||||
let paddr = self.frames[block.block].addr();
|
||||
let buf_range = block.origin_begin() - offset..block.origin_end() - offset;
|
||||
f(paddr + block.begin, buf_range);
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a snapshot child VMO.
|
||||
fn create_child(&mut self, offset: usize, len: usize) -> ZxResult<Arc<VMObjectPaged>> {
|
||||
// clone contiguous vmo is no longer permitted
|
||||
// https://fuchsia.googlesource.com/fuchsia/+/e6b4c6751bbdc9ed2795e81b8211ea294f139a45
|
||||
if self.contiguous {
|
||||
return Err(ZxError::INVALID_ARGS);
|
||||
}
|
||||
if self.cache_policy != CachePolicy::Cached || self.pin_count != 0 {
|
||||
return Err(ZxError::BAD_STATE);
|
||||
}
|
||||
let mut frames = Vec::with_capacity(pages(len));
|
||||
for _ in 0..pages(len) {
|
||||
frames.push(PhysFrame::alloc().ok_or(ZxError::NO_MEMORY)?);
|
||||
}
|
||||
for (i, frame) in frames.iter().enumerate() {
|
||||
if let Some(src_frame) = self.frames.get(pages(offset) + i) {
|
||||
kernel_hal::frame_copy(src_frame.addr(), frame.addr())
|
||||
} else {
|
||||
kernel_hal::pmem_zero(frame.addr(), PAGE_SIZE);
|
||||
}
|
||||
}
|
||||
// create child VMO
|
||||
let child = Arc::new(VMObjectPaged {
|
||||
inner: Mutex::new(VMObjectPagedInner {
|
||||
frames,
|
||||
..Default::default()
|
||||
}),
|
||||
});
|
||||
Ok(child)
|
||||
}
|
||||
|
||||
fn complete_info(&self, info: &mut VmoInfo) {
|
||||
if self.contiguous {
|
||||
info.flags |= VmoInfoFlags::CONTIGUOUS;
|
||||
}
|
||||
// info.num_children = if self.type_.is_hidden() { 2 } else { 0 };
|
||||
info.committed_bytes = (self.frames.len() * PAGE_SIZE) as u64;
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn read_write() {
|
||||
let vmo = VmObject::new_paged(2);
|
||||
super::super::tests::read_write(&*vmo);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn create_child() {
|
||||
let vmo = VmObject::new_paged(1);
|
||||
let child_vmo = vmo.create_child(false, 0, PAGE_SIZE).unwrap();
|
||||
|
||||
// write to parent and make sure clone doesn't see it
|
||||
vmo.test_write(0, 1);
|
||||
assert_eq!(vmo.test_read(0), 1);
|
||||
assert_eq!(child_vmo.test_read(0), 0);
|
||||
|
||||
// write to clone and make sure parent doesn't see it
|
||||
child_vmo.test_write(0, 2);
|
||||
assert_eq!(vmo.test_read(0), 1);
|
||||
assert_eq!(child_vmo.test_read(0), 2);
|
||||
}
|
||||
|
||||
impl VmObject {
|
||||
pub fn test_write(&self, page: usize, value: u8) {
|
||||
self.write(page * PAGE_SIZE, &[value]).unwrap();
|
||||
}
|
||||
|
||||
pub fn test_read(&self, page: usize) -> u8 {
|
||||
let mut buf = [0; 1];
|
||||
self.read(page * PAGE_SIZE, &mut buf).unwrap();
|
||||
buf[0]
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1,129 +0,0 @@
|
||||
use {super::*, alloc::sync::Arc, kernel_hal::MMUFlags, spin::Mutex};
|
||||
|
||||
/// VMO representing a physical range of memory.
|
||||
pub struct VMObjectPhysical {
|
||||
paddr: PhysAddr,
|
||||
pages: usize,
|
||||
/// Lock this when access physical memory.
|
||||
data_lock: Mutex<()>,
|
||||
inner: Mutex<VMObjectPhysicalInner>,
|
||||
}
|
||||
|
||||
struct VMObjectPhysicalInner {
|
||||
cache_policy: CachePolicy,
|
||||
}
|
||||
|
||||
impl VMObjectPhysicalInner {
|
||||
pub fn new() -> VMObjectPhysicalInner {
|
||||
VMObjectPhysicalInner {
|
||||
cache_policy: CachePolicy::Uncached,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl VMObjectPhysical {
|
||||
/// Create a new VMO representing a piece of contiguous physical memory.
|
||||
/// You must ensure nobody has the ownership of this piece of memory yet.
|
||||
pub fn new(paddr: PhysAddr, pages: usize) -> Arc<Self> {
|
||||
assert!(page_aligned(paddr));
|
||||
Arc::new(VMObjectPhysical {
|
||||
paddr,
|
||||
pages,
|
||||
data_lock: Mutex::default(),
|
||||
inner: Mutex::new(VMObjectPhysicalInner::new()),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl VMObjectTrait for VMObjectPhysical {
|
||||
fn read(&self, offset: usize, buf: &mut [u8]) -> ZxResult {
|
||||
let _ = self.data_lock.lock();
|
||||
assert!(offset + buf.len() <= self.len());
|
||||
kernel_hal::pmem_read(self.paddr + offset, buf);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn write(&self, offset: usize, buf: &[u8]) -> ZxResult {
|
||||
let _ = self.data_lock.lock();
|
||||
assert!(offset + buf.len() <= self.len());
|
||||
kernel_hal::pmem_write(self.paddr + offset, buf);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn zero(&self, offset: usize, len: usize) -> ZxResult {
|
||||
let _ = self.data_lock.lock();
|
||||
assert!(offset + len <= self.len());
|
||||
kernel_hal::pmem_zero(self.paddr + offset, len);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn len(&self) -> usize {
|
||||
self.pages * PAGE_SIZE
|
||||
}
|
||||
|
||||
fn set_len(&self, _len: usize) -> ZxResult {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
fn commit_page(&self, page_idx: usize, _flags: MMUFlags) -> ZxResult<PhysAddr> {
|
||||
Ok(self.paddr + page_idx * PAGE_SIZE)
|
||||
}
|
||||
|
||||
fn commit_pages_with(
|
||||
&self,
|
||||
f: &mut dyn FnMut(&mut dyn FnMut(usize, MMUFlags) -> ZxResult<PhysAddr>) -> ZxResult,
|
||||
) -> ZxResult {
|
||||
f(&mut |page_idx, _flags| Ok(self.paddr + page_idx * PAGE_SIZE))
|
||||
}
|
||||
|
||||
fn commit(&self, _offset: usize, _len: usize) -> ZxResult {
|
||||
// do nothing
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn decommit(&self, _offset: usize, _len: usize) -> ZxResult {
|
||||
// do nothing
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn create_child(&self, _offset: usize, _len: usize) -> ZxResult<Arc<dyn VMObjectTrait>> {
|
||||
Err(ZxError::NOT_SUPPORTED)
|
||||
}
|
||||
|
||||
fn complete_info(&self, _info: &mut VmoInfo) {
|
||||
warn!("VmoInfo for physical is unimplemented");
|
||||
}
|
||||
|
||||
fn cache_policy(&self) -> CachePolicy {
|
||||
let inner = self.inner.lock();
|
||||
inner.cache_policy
|
||||
}
|
||||
|
||||
fn set_cache_policy(&self, policy: CachePolicy) -> ZxResult {
|
||||
let mut inner = self.inner.lock();
|
||||
inner.cache_policy = policy;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn committed_pages_in_range(&self, _start_idx: usize, _end_idx: usize) -> usize {
|
||||
0
|
||||
}
|
||||
|
||||
fn is_contiguous(&self) -> bool {
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
#![allow(unsafe_code)]
|
||||
use super::*;
|
||||
use kernel_hal::CachePolicy;
|
||||
|
||||
#[test]
|
||||
fn read_write() {
|
||||
let vmo = VmObject::new_physical(0x1000, 2);
|
||||
assert_eq!(vmo.cache_policy(), CachePolicy::Uncached);
|
||||
super::super::tests::read_write(&vmo);
|
||||
}
|
||||
}
|
||||
@ -1,112 +0,0 @@
|
||||
use {super::*, kernel_hal::MMUFlags};
|
||||
|
||||
pub struct VMObjectSlice {
|
||||
/// Parent node.
|
||||
parent: Arc<dyn VMObjectTrait>,
|
||||
/// The offset from parent.
|
||||
offset: usize,
|
||||
/// The size in bytes.
|
||||
size: usize,
|
||||
}
|
||||
|
||||
impl VMObjectSlice {
|
||||
pub fn new(parent: Arc<dyn VMObjectTrait>, offset: usize, size: usize) -> Arc<Self> {
|
||||
Arc::new(VMObjectSlice {
|
||||
parent,
|
||||
offset,
|
||||
size,
|
||||
})
|
||||
}
|
||||
|
||||
fn check_range(&self, offset: usize, len: usize) -> ZxResult {
|
||||
if offset + len >= self.size {
|
||||
return Err(ZxError::OUT_OF_RANGE);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl VMObjectTrait for VMObjectSlice {
|
||||
fn read(&self, offset: usize, buf: &mut [u8]) -> ZxResult {
|
||||
self.check_range(offset, buf.len())?;
|
||||
self.parent.read(offset + self.offset, buf)
|
||||
}
|
||||
|
||||
fn write(&self, offset: usize, buf: &[u8]) -> ZxResult {
|
||||
self.check_range(offset, buf.len())?;
|
||||
self.parent.write(offset + self.offset, buf)
|
||||
}
|
||||
|
||||
fn zero(&self, offset: usize, len: usize) -> ZxResult {
|
||||
self.check_range(offset, len)?;
|
||||
self.parent.zero(offset + self.offset, len)
|
||||
}
|
||||
|
||||
fn len(&self) -> usize {
|
||||
self.size
|
||||
}
|
||||
|
||||
fn set_len(&self, _len: usize) -> ZxResult {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
fn commit_page(&self, page_idx: usize, flags: MMUFlags) -> ZxResult<usize> {
|
||||
self.parent
|
||||
.commit_page(page_idx + self.offset / PAGE_SIZE, flags)
|
||||
}
|
||||
|
||||
fn commit_pages_with(
|
||||
&self,
|
||||
f: &mut dyn FnMut(&mut dyn FnMut(usize, MMUFlags) -> ZxResult<PhysAddr>) -> ZxResult,
|
||||
) -> ZxResult {
|
||||
self.parent.commit_pages_with(f)
|
||||
}
|
||||
|
||||
fn commit(&self, offset: usize, len: usize) -> ZxResult {
|
||||
self.parent.commit(offset + self.offset, len)
|
||||
}
|
||||
|
||||
fn decommit(&self, offset: usize, len: usize) -> ZxResult {
|
||||
self.parent.decommit(offset + self.offset, len)
|
||||
}
|
||||
|
||||
fn create_child(&self, _offset: usize, _len: usize) -> ZxResult<Arc<dyn VMObjectTrait>> {
|
||||
Err(ZxError::NOT_SUPPORTED)
|
||||
}
|
||||
|
||||
fn complete_info(&self, info: &mut VmoInfo) {
|
||||
self.parent.complete_info(info);
|
||||
}
|
||||
|
||||
fn cache_policy(&self) -> CachePolicy {
|
||||
self.parent.cache_policy()
|
||||
}
|
||||
|
||||
fn set_cache_policy(&self, _policy: CachePolicy) -> ZxResult {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn committed_pages_in_range(&self, start_idx: usize, end_idx: usize) -> usize {
|
||||
let po = pages(self.offset);
|
||||
self.parent
|
||||
.committed_pages_in_range(start_idx + po, end_idx + po)
|
||||
}
|
||||
|
||||
fn pin(&self, offset: usize, len: usize) -> ZxResult {
|
||||
self.check_range(offset, len)?;
|
||||
self.parent.pin(offset + self.offset, len)
|
||||
}
|
||||
|
||||
fn unpin(&self, offset: usize, len: usize) -> ZxResult {
|
||||
self.check_range(offset, len)?;
|
||||
self.parent.unpin(offset + self.offset, len)
|
||||
}
|
||||
|
||||
fn is_contiguous(&self) -> bool {
|
||||
self.parent.is_contiguous()
|
||||
}
|
||||
|
||||
fn is_paged(&self) -> bool {
|
||||
self.parent.is_paged()
|
||||
}
|
||||
}
|
||||
@ -1,17 +0,0 @@
|
||||
[package]
|
||||
name = "zircon-syscall"
|
||||
version = "0.1.0"
|
||||
authors = ["Runji Wang <wangrunji0408@163.com>"]
|
||||
edition = "2018"
|
||||
description = "Zircon syscalls implementation"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
log = "0.4"
|
||||
spin = "0.7"
|
||||
bitflags = "1.2"
|
||||
numeric-enum-macro = "0.2"
|
||||
zircon-object = { path = "../zircon-object" }
|
||||
kernel-hal = { path = "../kernel-hal" }
|
||||
futures = { version = "0.3", default-features = false, features = ["alloc", "async-await"] }
|
||||
@ -1,31 +0,0 @@
|
||||
use std::io::Write;
|
||||
|
||||
fn main() {
|
||||
println!("cargo:rerun-if-changed=src/zx-syscall-numbers.h");
|
||||
|
||||
let mut fout = std::fs::File::create("src/consts.rs").unwrap();
|
||||
writeln!(fout, "// Generated by build.rs. DO NOT EDIT.").unwrap();
|
||||
writeln!(fout, "use numeric_enum_macro::numeric_enum;\n").unwrap();
|
||||
writeln!(fout, "numeric_enum! {{").unwrap();
|
||||
writeln!(fout, "#[repr(u32)]").unwrap();
|
||||
writeln!(fout, "#[derive(Debug, Eq, PartialEq)]").unwrap();
|
||||
writeln!(fout, "#[allow(non_camel_case_types)]").unwrap();
|
||||
writeln!(fout, "#[allow(clippy::upper_case_acronyms)]").unwrap();
|
||||
writeln!(fout, "pub enum SyscallType {{").unwrap();
|
||||
|
||||
let data = std::fs::read_to_string("src/zx-syscall-numbers.h").unwrap();
|
||||
for line in data.lines() {
|
||||
if !line.starts_with("#define") {
|
||||
continue;
|
||||
}
|
||||
let mut iter = line.split(' ');
|
||||
let _ = iter.next().unwrap();
|
||||
let name = iter.next().unwrap();
|
||||
let id = iter.next().unwrap();
|
||||
|
||||
let name = &name[7..].to_uppercase();
|
||||
writeln!(fout, " {} = {},", name, id).unwrap();
|
||||
}
|
||||
writeln!(fout, "}}").unwrap();
|
||||
writeln!(fout, "}}").unwrap();
|
||||
}
|
||||
@ -1,155 +0,0 @@
|
||||
use {
|
||||
super::*,
|
||||
zircon_object::ipc::{Channel, MessagePacket},
|
||||
};
|
||||
|
||||
impl Syscall<'_> {
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
/// Read/Receive a message from a channel.
|
||||
pub fn sys_channel_read(
|
||||
&self,
|
||||
handle_value: HandleValue,
|
||||
options: u32,
|
||||
mut bytes: UserOutPtr<u8>,
|
||||
handles: usize,
|
||||
num_bytes: u32,
|
||||
num_handles: u32,
|
||||
mut actual_bytes: UserOutPtr<u32>,
|
||||
mut actual_handles: UserOutPtr<u32>,
|
||||
) -> ZxResult {
|
||||
info!(
|
||||
"channel.read: handle={:#x?}, options={:?}, bytes=({:#x?}; {:#x?}), handles=({:#x?}; {:#x?})",
|
||||
handle_value, options, bytes, num_bytes, handles, num_handles,
|
||||
);
|
||||
let proc = self.thread.proc();
|
||||
let channel = proc.get_object_with_rights::<Channel>(handle_value, Rights::READ)?;
|
||||
// FIX ME:
|
||||
const MAY_DISCARD: u32 = 1;
|
||||
let never_discard = options & MAY_DISCARD == 0;
|
||||
|
||||
let msg = if never_discard {
|
||||
channel.check_and_read(|front_msg| {
|
||||
if num_bytes < front_msg.data.len() as u32
|
||||
|| num_handles < front_msg.handles.len() as u32
|
||||
{
|
||||
let bytes = front_msg.data.len();
|
||||
actual_bytes.write_if_not_null(bytes as u32)?;
|
||||
actual_handles.write_if_not_null(front_msg.handles.len() as u32)?;
|
||||
Err(ZxError::BUFFER_TOO_SMALL)
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
})?
|
||||
} else {
|
||||
channel.read()?
|
||||
};
|
||||
|
||||
// 如果要过 core-tests 把这个打开
|
||||
// hack_core_tests(handle_value, &self.thread.proc().name(), &mut msg.data);
|
||||
|
||||
actual_bytes.write_if_not_null(msg.data.len() as u32)?;
|
||||
actual_handles.write_if_not_null(msg.handles.len() as u32)?;
|
||||
if num_bytes < msg.data.len() as u32 || num_handles < msg.handles.len() as u32 {
|
||||
return Err(ZxError::BUFFER_TOO_SMALL);
|
||||
}
|
||||
bytes.write_array(msg.data.as_slice())?;
|
||||
let values = proc.add_handles(msg.handles);
|
||||
UserOutPtr::<HandleValue>::from(handles).write_array(&values)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Write a message to a channel.
|
||||
pub fn sys_channel_write(
|
||||
&self,
|
||||
handle_value: HandleValue,
|
||||
options: u32,
|
||||
user_bytes: UserInPtr<u8>,
|
||||
num_bytes: u32,
|
||||
user_handles: UserInPtr<HandleValue>,
|
||||
num_handles: u32,
|
||||
) -> ZxResult {
|
||||
info!(
|
||||
"channel.write: handle_value={:#x}, num_bytes={:#x}, num_handles={:#x}",
|
||||
handle_value, num_bytes, num_handles,
|
||||
);
|
||||
if options != 0 {
|
||||
return Err(ZxError::INVALID_ARGS);
|
||||
}
|
||||
if num_bytes > 65536 {
|
||||
return Err(ZxError::OUT_OF_RANGE);
|
||||
}
|
||||
let proc = self.thread.proc();
|
||||
let data = user_bytes.read_array(num_bytes as usize)?;
|
||||
let handles = user_handles.read_array(num_handles as usize)?;
|
||||
let transfer_self = handles.iter().any(|&handle| handle == handle_value);
|
||||
let handles = proc.remove_handles(&handles)?;
|
||||
if transfer_self {
|
||||
return Err(ZxError::NOT_SUPPORTED);
|
||||
}
|
||||
if handles.len() > 64 {
|
||||
return Err(ZxError::OUT_OF_RANGE);
|
||||
}
|
||||
for handle in handles.iter() {
|
||||
if !handle.rights.contains(Rights::TRANSFER) {
|
||||
return Err(ZxError::ACCESS_DENIED);
|
||||
}
|
||||
}
|
||||
let channel = proc.get_object_with_rights::<Channel>(handle_value, Rights::WRITE)?;
|
||||
channel.write(MessagePacket { data, handles })?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Create a new channel.
|
||||
pub fn sys_channel_create(
|
||||
&self,
|
||||
options: u32,
|
||||
mut out0: UserOutPtr<HandleValue>,
|
||||
mut out1: UserOutPtr<HandleValue>,
|
||||
) -> ZxResult {
|
||||
info!("channel.create: options={:#x}", options);
|
||||
if options != 0u32 {
|
||||
return Err(ZxError::INVALID_ARGS);
|
||||
}
|
||||
let proc = self.thread.proc();
|
||||
let (end0, end1) = Channel::create();
|
||||
let handle0 = proc.add_handle(Handle::new(end0, Rights::DEFAULT_CHANNEL));
|
||||
let handle1 = proc.add_handle(Handle::new(end1, Rights::DEFAULT_CHANNEL));
|
||||
out0.write(handle0)?;
|
||||
out1.write(handle1)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
// HACK: pass arguments to standalone-test
|
||||
// #[allow(clippy::naive_bytecount)]
|
||||
// fn hack_core_tests(handle: HandleValue, thread_name: &str, data: &mut Vec<u8>) {
|
||||
// if handle == 3 && thread_name == "userboot" {
|
||||
// let cmdline = core::str::from_utf8(data).unwrap();
|
||||
// for kv in cmdline.split('\0') {
|
||||
// if let Some(v) = kv.strip_prefix("core-tests=") {
|
||||
// *TESTS_ARGS.lock() = format!("test\0-f\0{}\0", v.replace(',', ":"));
|
||||
// }
|
||||
// }
|
||||
// } else if handle == 3 && thread_name == "test/core-standalone-test" {
|
||||
// let test_args = &*TESTS_ARGS.lock();
|
||||
// let len = data.len();
|
||||
// data.extend(test_args.bytes());
|
||||
// #[repr(C)]
|
||||
// #[derive(Debug)]
|
||||
// struct ProcArgs {
|
||||
// protocol: u32,
|
||||
// version: u32,
|
||||
// handle_info_off: u32,
|
||||
// args_off: u32,
|
||||
// args_num: u32,
|
||||
// environ_off: u32,
|
||||
// environ_num: u32,
|
||||
// }
|
||||
// #[allow(unsafe_code)]
|
||||
// #[allow(clippy::cast_ptr_alignment)]
|
||||
// let header = unsafe { &mut *(data.as_mut_ptr() as *mut ProcArgs) };
|
||||
// header.args_off = len as u32;
|
||||
// header.args_num = test_args.as_bytes().iter().filter(|&&b| b == 0).count() as u32;
|
||||
// warn!("HACKED: test args = {:?}", test_args);
|
||||
// }
|
||||
// }
|
||||
@ -1,181 +0,0 @@
|
||||
// Generated by build.rs. DO NOT EDIT.
|
||||
use numeric_enum_macro::numeric_enum;
|
||||
|
||||
numeric_enum! {
|
||||
#[repr(u32)]
|
||||
#[derive(Debug, Eq, PartialEq)]
|
||||
#[allow(non_camel_case_types)]
|
||||
#[allow(clippy::upper_case_acronyms)]
|
||||
pub enum SyscallType {
|
||||
BTI_CREATE = 0,
|
||||
BTI_PIN = 1,
|
||||
BTI_RELEASE_QUARANTINE = 2,
|
||||
CHANNEL_CREATE = 3,
|
||||
CHANNEL_READ = 4,
|
||||
CHANNEL_READ_ETC = 5,
|
||||
CHANNEL_WRITE = 6,
|
||||
CHANNEL_WRITE_ETC = 7,
|
||||
CHANNEL_CALL_NORETRY = 8,
|
||||
CHANNEL_CALL_FINISH = 9,
|
||||
CLOCK_GET = 10,
|
||||
CLOCK_ADJUST = 11,
|
||||
CLOCK_GET_MONOTONIC_VIA_KERNEL = 12,
|
||||
CLOCK_CREATE = 13,
|
||||
CLOCK_READ = 14,
|
||||
CLOCK_GET_DETAILS = 15,
|
||||
CLOCK_UPDATE = 16,
|
||||
CPRNG_DRAW_ONCE = 17,
|
||||
CPRNG_ADD_ENTROPY = 18,
|
||||
DEBUG_READ = 19,
|
||||
DEBUG_WRITE = 20,
|
||||
DEBUG_SEND_COMMAND = 21,
|
||||
DEBUGLOG_CREATE = 22,
|
||||
DEBUGLOG_WRITE = 23,
|
||||
DEBUGLOG_READ = 24,
|
||||
EVENT_CREATE = 25,
|
||||
EVENTPAIR_CREATE = 26,
|
||||
EXCEPTION_GET_THREAD = 27,
|
||||
EXCEPTION_GET_PROCESS = 28,
|
||||
FIFO_CREATE = 29,
|
||||
FIFO_READ = 30,
|
||||
FIFO_WRITE = 31,
|
||||
FRAMEBUFFER_GET_INFO = 32,
|
||||
FRAMEBUFFER_SET_RANGE = 33,
|
||||
FUTEX_WAIT = 34,
|
||||
FUTEX_WAKE = 35,
|
||||
FUTEX_REQUEUE = 36,
|
||||
FUTEX_WAKE_SINGLE_OWNER = 37,
|
||||
FUTEX_REQUEUE_SINGLE_OWNER = 38,
|
||||
FUTEX_GET_OWNER = 39,
|
||||
GUEST_CREATE = 40,
|
||||
GUEST_SET_TRAP = 41,
|
||||
HANDLE_CLOSE = 42,
|
||||
HANDLE_CLOSE_MANY = 43,
|
||||
HANDLE_DUPLICATE = 44,
|
||||
HANDLE_REPLACE = 45,
|
||||
INTERRUPT_CREATE = 46,
|
||||
INTERRUPT_BIND = 47,
|
||||
INTERRUPT_WAIT = 48,
|
||||
INTERRUPT_DESTROY = 49,
|
||||
INTERRUPT_ACK = 50,
|
||||
INTERRUPT_TRIGGER = 51,
|
||||
INTERRUPT_BIND_VCPU = 52,
|
||||
IOMMU_CREATE = 53,
|
||||
IOPORTS_REQUEST = 54,
|
||||
IOPORTS_RELEASE = 55,
|
||||
JOB_CREATE = 56,
|
||||
JOB_SET_POLICY = 57,
|
||||
JOB_SET_CRITICAL = 58,
|
||||
KTRACE_READ = 59,
|
||||
KTRACE_CONTROL = 60,
|
||||
KTRACE_WRITE = 61,
|
||||
NANOSLEEP = 62,
|
||||
TICKS_GET_VIA_KERNEL = 63,
|
||||
MSI_ALLOCATE = 64,
|
||||
MSI_CREATE = 65,
|
||||
MTRACE_CONTROL = 66,
|
||||
OBJECT_WAIT_ONE = 67,
|
||||
OBJECT_WAIT_MANY = 68,
|
||||
OBJECT_WAIT_ASYNC = 69,
|
||||
OBJECT_SIGNAL = 70,
|
||||
OBJECT_SIGNAL_PEER = 71,
|
||||
OBJECT_GET_PROPERTY = 72,
|
||||
OBJECT_SET_PROPERTY = 73,
|
||||
OBJECT_GET_INFO = 74,
|
||||
OBJECT_GET_CHILD = 75,
|
||||
OBJECT_SET_PROFILE = 76,
|
||||
PAGER_CREATE = 77,
|
||||
PAGER_CREATE_VMO = 78,
|
||||
PAGER_DETACH_VMO = 79,
|
||||
PAGER_SUPPLY_PAGES = 80,
|
||||
PAGER_OP_RANGE = 81,
|
||||
PC_FIRMWARE_TABLES = 82,
|
||||
PCI_GET_NTH_DEVICE = 83,
|
||||
PCI_ENABLE_BUS_MASTER = 84,
|
||||
PCI_RESET_DEVICE = 85,
|
||||
PCI_CONFIG_READ = 86,
|
||||
PCI_CONFIG_WRITE = 87,
|
||||
PCI_CFG_PIO_RW = 88,
|
||||
PCI_GET_BAR = 89,
|
||||
PCI_MAP_INTERRUPT = 90,
|
||||
PCI_QUERY_IRQ_MODE = 91,
|
||||
PCI_SET_IRQ_MODE = 92,
|
||||
PCI_INIT = 93,
|
||||
PCI_ADD_SUBTRACT_IO_RANGE = 94,
|
||||
PMT_UNPIN = 95,
|
||||
PORT_CREATE = 96,
|
||||
PORT_QUEUE = 97,
|
||||
PORT_WAIT = 98,
|
||||
PORT_CANCEL = 99,
|
||||
PROCESS_EXIT = 100,
|
||||
PROCESS_CREATE = 101,
|
||||
PROCESS_START = 102,
|
||||
PROCESS_READ_MEMORY = 103,
|
||||
PROCESS_WRITE_MEMORY = 104,
|
||||
PROFILE_CREATE = 105,
|
||||
RESOURCE_CREATE = 106,
|
||||
SMC_CALL = 107,
|
||||
SOCKET_CREATE = 108,
|
||||
SOCKET_WRITE = 109,
|
||||
SOCKET_READ = 110,
|
||||
SOCKET_SHUTDOWN = 111,
|
||||
STREAM_CREATE = 112,
|
||||
STREAM_WRITEV = 113,
|
||||
STREAM_WRITEV_AT = 114,
|
||||
STREAM_READV = 115,
|
||||
STREAM_READV_AT = 116,
|
||||
STREAM_SEEK = 117,
|
||||
SYSCALL_TEST_0 = 118,
|
||||
SYSCALL_TEST_1 = 119,
|
||||
SYSCALL_TEST_2 = 120,
|
||||
SYSCALL_TEST_3 = 121,
|
||||
SYSCALL_TEST_4 = 122,
|
||||
SYSCALL_TEST_5 = 123,
|
||||
SYSCALL_TEST_6 = 124,
|
||||
SYSCALL_TEST_7 = 125,
|
||||
SYSCALL_TEST_8 = 126,
|
||||
SYSCALL_TEST_WRAPPER = 127,
|
||||
SYSCALL_TEST_HANDLE_CREATE = 128,
|
||||
SYSTEM_GET_EVENT = 129,
|
||||
SYSTEM_MEXEC = 130,
|
||||
SYSTEM_MEXEC_PAYLOAD_GET = 131,
|
||||
SYSTEM_POWERCTL = 132,
|
||||
TASK_SUSPEND = 133,
|
||||
TASK_SUSPEND_TOKEN = 134,
|
||||
TASK_CREATE_EXCEPTION_CHANNEL = 135,
|
||||
TASK_KILL = 136,
|
||||
THREAD_EXIT = 137,
|
||||
THREAD_CREATE = 138,
|
||||
THREAD_START = 139,
|
||||
THREAD_READ_STATE = 140,
|
||||
THREAD_WRITE_STATE = 141,
|
||||
TIMER_CREATE = 142,
|
||||
TIMER_SET = 143,
|
||||
TIMER_CANCEL = 144,
|
||||
VCPU_CREATE = 145,
|
||||
VCPU_RESUME = 146,
|
||||
VCPU_INTERRUPT = 147,
|
||||
VCPU_READ_STATE = 148,
|
||||
VCPU_WRITE_STATE = 149,
|
||||
VMAR_ALLOCATE = 150,
|
||||
VMAR_DESTROY = 151,
|
||||
VMAR_MAP = 152,
|
||||
VMAR_UNMAP = 153,
|
||||
VMAR_PROTECT = 154,
|
||||
VMAR_OP_RANGE = 155,
|
||||
VMO_CREATE = 156,
|
||||
VMO_READ = 157,
|
||||
VMO_WRITE = 158,
|
||||
VMO_GET_SIZE = 159,
|
||||
VMO_SET_SIZE = 160,
|
||||
VMO_OP_RANGE = 161,
|
||||
VMO_CREATE_CHILD = 162,
|
||||
VMO_SET_CACHE_POLICY = 163,
|
||||
VMO_REPLACE_AS_EXECUTABLE = 164,
|
||||
VMO_CREATE_CONTIGUOUS = 165,
|
||||
VMO_CREATE_PHYSICAL = 166,
|
||||
COUNT = 167,
|
||||
FUTEX_WAKE_HANDLE_CLOSE_THREAD_EXIT = 200,
|
||||
VMAR_UNMAP_HANDLE_CLOSE_THREAD_EXIT = 201,
|
||||
}
|
||||
}
|
||||
@ -1,92 +0,0 @@
|
||||
use {
|
||||
super::*,
|
||||
zircon_object::{debuglog::*, dev::*},
|
||||
};
|
||||
|
||||
impl Syscall<'_> {
|
||||
/// Create a kernel managed debuglog reader or writer.
|
||||
pub fn sys_debuglog_create(
|
||||
&self,
|
||||
rsrc: HandleValue,
|
||||
options: u32,
|
||||
mut target: UserOutPtr<HandleValue>,
|
||||
) -> ZxResult {
|
||||
info!(
|
||||
"debuglog.create: resource_handle={:#x?}, options={:#x?}",
|
||||
rsrc, options,
|
||||
);
|
||||
let proc = self.thread.proc();
|
||||
if rsrc != 0 {
|
||||
proc.get_object::<Resource>(rsrc)?
|
||||
.validate(ResourceKind::ROOT)?;
|
||||
}
|
||||
let dlog = DebugLog::create(options);
|
||||
// FIX ME:
|
||||
const FLAG_READABLE: u32 = 0x4000_0000u32;
|
||||
let dlog_right = if options & FLAG_READABLE == 0 {
|
||||
Rights::DEFAULT_DEBUGLOG
|
||||
} else {
|
||||
Rights::DEFAULT_DEBUGLOG | Rights::READ
|
||||
};
|
||||
let dlog_handle = proc.add_handle(Handle::new(dlog, dlog_right));
|
||||
target.write(dlog_handle)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Write log entry to debuglog.
|
||||
pub fn sys_debuglog_write(
|
||||
&self,
|
||||
handle_value: HandleValue,
|
||||
options: u32,
|
||||
buf: UserInPtr<u8>,
|
||||
len: usize,
|
||||
) -> ZxResult {
|
||||
info!(
|
||||
"debuglog.write: handle={:#x?}, options={:#x?}, buf=({:#x?}; {:#x?})",
|
||||
handle_value, options, buf, len,
|
||||
);
|
||||
const LOG_FLAGS_MASK: u32 = 0x10;
|
||||
if options & !LOG_FLAGS_MASK != 0 {
|
||||
return Err(ZxError::INVALID_ARGS);
|
||||
}
|
||||
let datalen = len.min(224);
|
||||
let data = buf.read_string(datalen as usize)?;
|
||||
let proc = self.thread.proc();
|
||||
let dlog = proc.get_object_with_rights::<DebugLog>(handle_value, Rights::WRITE)?;
|
||||
dlog.write(Severity::Info, options, self.thread.id(), proc.id(), &data);
|
||||
// print to kernel console
|
||||
kernel_hal::serial_write(&data);
|
||||
if data.as_bytes().last() != Some(&b'\n') {
|
||||
kernel_hal::serial_write("\n");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[allow(unsafe_code)]
|
||||
/// Read log entries from debuglog.
|
||||
pub fn sys_debuglog_read(
|
||||
&self,
|
||||
handle_value: HandleValue,
|
||||
options: u32,
|
||||
mut buf: UserOutPtr<u8>,
|
||||
len: usize,
|
||||
) -> ZxResult {
|
||||
info!(
|
||||
"debuglog.read: handle={:#x?}, options={:#x?}, buf=({:#x?}; {:#x?})",
|
||||
handle_value, options, buf, len,
|
||||
);
|
||||
if options != 0 {
|
||||
return Err(ZxError::INVALID_ARGS);
|
||||
}
|
||||
let proc = self.thread.proc();
|
||||
let mut buffer = [0; DLOG_MAX_LEN];
|
||||
let dlog = proc.get_object_with_rights::<DebugLog>(handle_value, Rights::READ)?;
|
||||
let actual_len = dlog.read(&mut buffer).min(len);
|
||||
if actual_len == 0 {
|
||||
return Err(ZxError::SHOULD_WAIT);
|
||||
}
|
||||
buf.write_array(&buffer[..actual_len])?;
|
||||
// special case: return actual_len as status
|
||||
Err(unsafe { core::mem::transmute(actual_len as u32) })
|
||||
}
|
||||
}
|
||||
@ -1,74 +0,0 @@
|
||||
//! Zircon syscall implementations
|
||||
|
||||
#![no_std]
|
||||
#![deny(warnings, unsafe_code, unused_must_use, unreachable_patterns)]
|
||||
|
||||
extern crate alloc;
|
||||
|
||||
#[macro_use]
|
||||
extern crate log;
|
||||
|
||||
use {
|
||||
core::convert::TryFrom,
|
||||
kernel_hal::user::*,
|
||||
zircon_object::object::*,
|
||||
zircon_object::task::{CurrentThread, ThreadFn},
|
||||
};
|
||||
|
||||
mod channel;
|
||||
mod consts;
|
||||
mod debuglog;
|
||||
|
||||
use consts::SyscallType as Sys;
|
||||
|
||||
pub struct Syscall<'a> {
|
||||
pub thread: &'a CurrentThread,
|
||||
pub thread_fn: ThreadFn,
|
||||
}
|
||||
|
||||
impl Syscall<'_> {
|
||||
pub async fn syscall(&mut self, num: u32, args: [usize; 8]) -> isize {
|
||||
let thread_name = self.thread.name();
|
||||
let proc_name = self.thread.proc().name();
|
||||
let sys_type = match Sys::try_from(num) {
|
||||
Ok(t) => t,
|
||||
Err(_) => {
|
||||
error!("invalid syscall number: {}", num);
|
||||
return ZxError::INVALID_ARGS as _;
|
||||
}
|
||||
};
|
||||
info!(
|
||||
"{}|{} {:?} => args={:x?}",
|
||||
proc_name, thread_name, sys_type, args
|
||||
);
|
||||
let [a0, a1, a2, a3, a4, a5, a6, a7] = args;
|
||||
let ret = match sys_type {
|
||||
Sys::CHANNEL_CREATE => self.sys_channel_create(a0 as _, a1.into(), a2.into()),
|
||||
Sys::CHANNEL_READ => self.sys_channel_read(
|
||||
a0 as _,
|
||||
a1 as _,
|
||||
a2.into(),
|
||||
a3 as _,
|
||||
a4 as _,
|
||||
a5 as _,
|
||||
a6.into(),
|
||||
a7.into(),
|
||||
),
|
||||
Sys::CHANNEL_WRITE => {
|
||||
self.sys_channel_write(a0 as _, a1 as _, a2.into(), a3 as _, a4.into(), a5 as _)
|
||||
}
|
||||
Sys::DEBUGLOG_CREATE => self.sys_debuglog_create(a0 as _, a1 as _, a2.into()),
|
||||
Sys::DEBUGLOG_WRITE => self.sys_debuglog_write(a0 as _, a1 as _, a2.into(), a3 as _),
|
||||
Sys::DEBUGLOG_READ => self.sys_debuglog_read(a0 as _, a1 as _, a2.into(), a3 as _),
|
||||
_ => {
|
||||
error!("syscall unimplemented: {:?}", sys_type);
|
||||
Err(ZxError::NOT_SUPPORTED)
|
||||
}
|
||||
};
|
||||
info!("{}|{} {:?} <= {:?}", proc_name, thread_name, sys_type, ret);
|
||||
match ret {
|
||||
Ok(_) => 0,
|
||||
Err(err) => err as isize,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1,177 +0,0 @@
|
||||
// Copyright 2019 The Fuchsia Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// WARNING: THIS FILE IS MACHINE GENERATED BY //tools/kazoo. DO NOT EDIT.
|
||||
|
||||
#define ZX_SYS_bti_create 0
|
||||
#define ZX_SYS_bti_pin 1
|
||||
#define ZX_SYS_bti_release_quarantine 2
|
||||
#define ZX_SYS_channel_create 3
|
||||
#define ZX_SYS_channel_read 4
|
||||
#define ZX_SYS_channel_read_etc 5
|
||||
#define ZX_SYS_channel_write 6
|
||||
#define ZX_SYS_channel_write_etc 7
|
||||
#define ZX_SYS_channel_call_noretry 8
|
||||
#define ZX_SYS_channel_call_finish 9
|
||||
#define ZX_SYS_clock_get 10
|
||||
#define ZX_SYS_clock_adjust 11
|
||||
#define ZX_SYS_clock_get_monotonic_via_kernel 12
|
||||
#define ZX_SYS_clock_create 13
|
||||
#define ZX_SYS_clock_read 14
|
||||
#define ZX_SYS_clock_get_details 15
|
||||
#define ZX_SYS_clock_update 16
|
||||
#define ZX_SYS_cprng_draw_once 17
|
||||
#define ZX_SYS_cprng_add_entropy 18
|
||||
#define ZX_SYS_debug_read 19
|
||||
#define ZX_SYS_debug_write 20
|
||||
#define ZX_SYS_debug_send_command 21
|
||||
#define ZX_SYS_debuglog_create 22
|
||||
#define ZX_SYS_debuglog_write 23
|
||||
#define ZX_SYS_debuglog_read 24
|
||||
#define ZX_SYS_event_create 25
|
||||
#define ZX_SYS_eventpair_create 26
|
||||
#define ZX_SYS_exception_get_thread 27
|
||||
#define ZX_SYS_exception_get_process 28
|
||||
#define ZX_SYS_fifo_create 29
|
||||
#define ZX_SYS_fifo_read 30
|
||||
#define ZX_SYS_fifo_write 31
|
||||
#define ZX_SYS_framebuffer_get_info 32
|
||||
#define ZX_SYS_framebuffer_set_range 33
|
||||
#define ZX_SYS_futex_wait 34
|
||||
#define ZX_SYS_futex_wake 35
|
||||
#define ZX_SYS_futex_requeue 36
|
||||
#define ZX_SYS_futex_wake_single_owner 37
|
||||
#define ZX_SYS_futex_requeue_single_owner 38
|
||||
#define ZX_SYS_futex_get_owner 39
|
||||
#define ZX_SYS_guest_create 40
|
||||
#define ZX_SYS_guest_set_trap 41
|
||||
#define ZX_SYS_handle_close 42
|
||||
#define ZX_SYS_handle_close_many 43
|
||||
#define ZX_SYS_handle_duplicate 44
|
||||
#define ZX_SYS_handle_replace 45
|
||||
#define ZX_SYS_interrupt_create 46
|
||||
#define ZX_SYS_interrupt_bind 47
|
||||
#define ZX_SYS_interrupt_wait 48
|
||||
#define ZX_SYS_interrupt_destroy 49
|
||||
#define ZX_SYS_interrupt_ack 50
|
||||
#define ZX_SYS_interrupt_trigger 51
|
||||
#define ZX_SYS_interrupt_bind_vcpu 52
|
||||
#define ZX_SYS_iommu_create 53
|
||||
#define ZX_SYS_ioports_request 54
|
||||
#define ZX_SYS_ioports_release 55
|
||||
#define ZX_SYS_job_create 56
|
||||
#define ZX_SYS_job_set_policy 57
|
||||
#define ZX_SYS_job_set_critical 58
|
||||
#define ZX_SYS_ktrace_read 59
|
||||
#define ZX_SYS_ktrace_control 60
|
||||
#define ZX_SYS_ktrace_write 61
|
||||
#define ZX_SYS_nanosleep 62
|
||||
#define ZX_SYS_ticks_get_via_kernel 63
|
||||
#define ZX_SYS_msi_allocate 64
|
||||
#define ZX_SYS_msi_create 65
|
||||
#define ZX_SYS_mtrace_control 66
|
||||
#define ZX_SYS_object_wait_one 67
|
||||
#define ZX_SYS_object_wait_many 68
|
||||
#define ZX_SYS_object_wait_async 69
|
||||
#define ZX_SYS_object_signal 70
|
||||
#define ZX_SYS_object_signal_peer 71
|
||||
#define ZX_SYS_object_get_property 72
|
||||
#define ZX_SYS_object_set_property 73
|
||||
#define ZX_SYS_object_get_info 74
|
||||
#define ZX_SYS_object_get_child 75
|
||||
#define ZX_SYS_object_set_profile 76
|
||||
#define ZX_SYS_pager_create 77
|
||||
#define ZX_SYS_pager_create_vmo 78
|
||||
#define ZX_SYS_pager_detach_vmo 79
|
||||
#define ZX_SYS_pager_supply_pages 80
|
||||
#define ZX_SYS_pager_op_range 81
|
||||
#define ZX_SYS_pc_firmware_tables 82
|
||||
#define ZX_SYS_pci_get_nth_device 83
|
||||
#define ZX_SYS_pci_enable_bus_master 84
|
||||
#define ZX_SYS_pci_reset_device 85
|
||||
#define ZX_SYS_pci_config_read 86
|
||||
#define ZX_SYS_pci_config_write 87
|
||||
#define ZX_SYS_pci_cfg_pio_rw 88
|
||||
#define ZX_SYS_pci_get_bar 89
|
||||
#define ZX_SYS_pci_map_interrupt 90
|
||||
#define ZX_SYS_pci_query_irq_mode 91
|
||||
#define ZX_SYS_pci_set_irq_mode 92
|
||||
#define ZX_SYS_pci_init 93
|
||||
#define ZX_SYS_pci_add_subtract_io_range 94
|
||||
#define ZX_SYS_pmt_unpin 95
|
||||
#define ZX_SYS_port_create 96
|
||||
#define ZX_SYS_port_queue 97
|
||||
#define ZX_SYS_port_wait 98
|
||||
#define ZX_SYS_port_cancel 99
|
||||
#define ZX_SYS_process_exit 100
|
||||
#define ZX_SYS_process_create 101
|
||||
#define ZX_SYS_process_start 102
|
||||
#define ZX_SYS_process_read_memory 103
|
||||
#define ZX_SYS_process_write_memory 104
|
||||
#define ZX_SYS_profile_create 105
|
||||
#define ZX_SYS_resource_create 106
|
||||
#define ZX_SYS_smc_call 107
|
||||
#define ZX_SYS_socket_create 108
|
||||
#define ZX_SYS_socket_write 109
|
||||
#define ZX_SYS_socket_read 110
|
||||
#define ZX_SYS_socket_shutdown 111
|
||||
#define ZX_SYS_stream_create 112
|
||||
#define ZX_SYS_stream_writev 113
|
||||
#define ZX_SYS_stream_writev_at 114
|
||||
#define ZX_SYS_stream_readv 115
|
||||
#define ZX_SYS_stream_readv_at 116
|
||||
#define ZX_SYS_stream_seek 117
|
||||
#define ZX_SYS_syscall_test_0 118
|
||||
#define ZX_SYS_syscall_test_1 119
|
||||
#define ZX_SYS_syscall_test_2 120
|
||||
#define ZX_SYS_syscall_test_3 121
|
||||
#define ZX_SYS_syscall_test_4 122
|
||||
#define ZX_SYS_syscall_test_5 123
|
||||
#define ZX_SYS_syscall_test_6 124
|
||||
#define ZX_SYS_syscall_test_7 125
|
||||
#define ZX_SYS_syscall_test_8 126
|
||||
#define ZX_SYS_syscall_test_wrapper 127
|
||||
#define ZX_SYS_syscall_test_handle_create 128
|
||||
#define ZX_SYS_system_get_event 129
|
||||
#define ZX_SYS_system_mexec 130
|
||||
#define ZX_SYS_system_mexec_payload_get 131
|
||||
#define ZX_SYS_system_powerctl 132
|
||||
#define ZX_SYS_task_suspend 133
|
||||
#define ZX_SYS_task_suspend_token 134
|
||||
#define ZX_SYS_task_create_exception_channel 135
|
||||
#define ZX_SYS_task_kill 136
|
||||
#define ZX_SYS_thread_exit 137
|
||||
#define ZX_SYS_thread_create 138
|
||||
#define ZX_SYS_thread_start 139
|
||||
#define ZX_SYS_thread_read_state 140
|
||||
#define ZX_SYS_thread_write_state 141
|
||||
#define ZX_SYS_timer_create 142
|
||||
#define ZX_SYS_timer_set 143
|
||||
#define ZX_SYS_timer_cancel 144
|
||||
#define ZX_SYS_vcpu_create 145
|
||||
#define ZX_SYS_vcpu_resume 146
|
||||
#define ZX_SYS_vcpu_interrupt 147
|
||||
#define ZX_SYS_vcpu_read_state 148
|
||||
#define ZX_SYS_vcpu_write_state 149
|
||||
#define ZX_SYS_vmar_allocate 150
|
||||
#define ZX_SYS_vmar_destroy 151
|
||||
#define ZX_SYS_vmar_map 152
|
||||
#define ZX_SYS_vmar_unmap 153
|
||||
#define ZX_SYS_vmar_protect 154
|
||||
#define ZX_SYS_vmar_op_range 155
|
||||
#define ZX_SYS_vmo_create 156
|
||||
#define ZX_SYS_vmo_read 157
|
||||
#define ZX_SYS_vmo_write 158
|
||||
#define ZX_SYS_vmo_get_size 159
|
||||
#define ZX_SYS_vmo_set_size 160
|
||||
#define ZX_SYS_vmo_op_range 161
|
||||
#define ZX_SYS_vmo_create_child 162
|
||||
#define ZX_SYS_vmo_set_cache_policy 163
|
||||
#define ZX_SYS_vmo_replace_as_executable 164
|
||||
#define ZX_SYS_vmo_create_contiguous 165
|
||||
#define ZX_SYS_vmo_create_physical 166
|
||||
#define ZX_SYS_COUNT 167
|
||||
|
||||
#define ZX_SYS_futex_wake_handle_close_thread_exit 200
|
||||
#define ZX_SYS_vmar_unmap_handle_close_thread_exit 201
|
||||
@ -1,2 +1 @@
|
||||
book
|
||||
.DS_Store
|
||||
@ -1,3 +1 @@
|
||||
# 用户程序
|
||||
|
||||
zCore采用的是微内核的设计风格。微内核设计的一个复杂问题是”如何引导初始用户空间进程“。通常这是通过让内核实现最小版本的文件系统读取和程序加载来实现的引导。
|
||||
|
Before Width: | Height: | Size: 36 KiB |
Loading…
Reference in new issue