Merge branch 'master' of github.com:DeathWish5/zCore-Tutorial

master
Yu Chen 5 years ago
commit 44e9ab946e

@ -0,0 +1,6 @@
[workspace]
members = [
"object",
"kernel-hal-unix",
"kernel-hal",
]

@ -0,0 +1,18 @@
[package]
name = "kernel-hal-unix"
version = "0.1.0"
authors = ["Runji Wang <wangrunji0408@163.com>"]
edition = "2018"
description = "Kernel HAL implementation on Linux and macOS."
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
log = "0.4"
libc = "0.2"
tempfile = "3"
bitflags = "1.2"
lazy_static = "1.4"
kernel-hal = { path = "../kernel-hal" }
async-std = "1.9"
trapframe = "0.8.0"

@ -0,0 +1,302 @@
#![feature(asm)]
#![feature(linkage)]
#![deny(warnings)]
extern crate alloc;
#[macro_use]
extern crate log;
use {
alloc::boxed::Box,
alloc::collections::VecDeque,
core::time::Duration,
core::{future::Future, pin::Pin},
lazy_static::*,
std::fmt::{Debug, Formatter},
std::fs::{File, OpenOptions},
std::io::Error,
std::os::unix::io::AsRawFd,
std::sync::Mutex,
std::time::SystemTime,
tempfile::tempdir,
};
pub use kernel_hal::{defs::*, *};
#[repr(C)]
pub struct Thread {
thread: usize,
}
impl Thread {
#[export_name = "hal_thread_spawn"]
pub fn spawn(
future: Pin<Box<dyn Future<Output = ()> + Send + 'static>>,
_vmtoken: usize,
) -> Self {
async_std::task::spawn(future);
Thread { thread: 0 }
}
}
/// Get current time.
#[export_name = "hal_timer_now"]
pub fn timer_now() -> Duration {
SystemTime::now()
.duration_since(SystemTime::UNIX_EPOCH)
.unwrap()
}
/// Initialize the HAL.
///
/// This function must be called at the beginning.
pub fn init() {
#[cfg(target_os = "macos")]
unimplemented!()
}
#[repr(C)]
pub struct PhysFrame {
paddr: PhysAddr,
}
impl Debug for PhysFrame {
fn fmt(&self, f: &mut Formatter<'_>) -> core::result::Result<(), std::fmt::Error> {
write!(f, "PhysFrame({:#x})", self.paddr)
}
}
const PMEM_SIZE: usize = 0x4000_0000; // 1GiB
const PAGE_SIZE: usize = 0x1000;
fn page_aligned(x: VirtAddr) -> bool {
x % PAGE_SIZE == 0
}
lazy_static! {
static ref FRAME_FILE: File = create_pmem_file();
}
fn create_pmem_file() -> File {
let dir = tempdir().expect("failed to create pmem dir");
let path = dir.path().join("pmem");
// workaround on macOS to avoid permission denied.
// see https://jiege.ch/software/2020/02/07/macos-mmap-exec/ for analysis on this problem.
#[cfg(target_os = "macos")]
std::mem::forget(dir);
let file = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(&path)
.expect("failed to create pmem file");
file.set_len(PMEM_SIZE as u64)
.expect("failed to resize file");
trace!("create pmem file: path={:?}, size={:#x}", path, PMEM_SIZE);
let prot = libc::PROT_READ | libc::PROT_WRITE;
mmap(file.as_raw_fd(), 0, PMEM_SIZE, phys_to_virt(0), prot);
file
}
/// Mmap frame file `fd` to `vaddr`.
fn mmap(fd: libc::c_int, offset: usize, len: usize, vaddr: VirtAddr, prot: libc::c_int) {
// workaround on macOS to write text section.
#[cfg(target_os = "macos")]
let prot = if prot & libc::PROT_EXEC != 0 {
prot | libc::PROT_WRITE
} else {
prot
};
let ret = unsafe {
let flags = libc::MAP_SHARED | libc::MAP_FIXED;
libc::mmap(vaddr as _, len, prot, flags, fd, offset as _)
} as usize;
println!(
"mmap file: fd={}, offset={:#x}, len={:#x}, vaddr={:#x}, prot={:#b}",
fd, offset, len, vaddr, prot,
);
assert_eq!(ret, vaddr, "failed to mmap: {:?}", Error::last_os_error());
}
lazy_static! {
static ref AVAILABLE_FRAMES: Mutex<VecDeque<usize>> =
Mutex::new((PAGE_SIZE..PMEM_SIZE).step_by(PAGE_SIZE).collect());
}
impl PhysFrame {
#[export_name = "hal_frame_alloc"]
pub fn alloc() -> Option<Self> {
let ret = AVAILABLE_FRAMES
.lock()
.unwrap()
.pop_front()
.map(|paddr| PhysFrame { paddr });
trace!("frame alloc: {:?}", ret);
ret
}
#[export_name = "hal_zero_frame_paddr"]
pub fn zero_frame_addr() -> PhysAddr {
0
}
}
impl Drop for PhysFrame {
#[export_name = "hal_frame_dealloc"]
fn drop(&mut self) {
trace!("frame dealloc: {:?}", self);
AVAILABLE_FRAMES.lock().unwrap().push_back(self.paddr);
}
}
fn phys_to_virt(paddr: PhysAddr) -> VirtAddr {
/// Map physical memory from here.
const PMEM_BASE: VirtAddr = 0x8_0000_0000;
PMEM_BASE + paddr
}
/// Ensure physical memory are mmapped and accessible.
fn ensure_mmap_pmem() {
FRAME_FILE.as_raw_fd();
}
/// Read physical memory from `paddr` to `buf`.
#[export_name = "hal_pmem_read"]
pub fn pmem_read(paddr: PhysAddr, buf: &mut [u8]) {
trace!("pmem read: paddr={:#x}, len={:#x}", paddr, buf.len());
assert!(paddr + buf.len() <= PMEM_SIZE);
ensure_mmap_pmem();
unsafe {
(phys_to_virt(paddr) as *const u8).copy_to_nonoverlapping(buf.as_mut_ptr(), buf.len());
}
}
/// Write physical memory to `paddr` from `buf`.
#[export_name = "hal_pmem_write"]
pub fn pmem_write(paddr: PhysAddr, buf: &[u8]) {
trace!("pmem write: paddr={:#x}, len={:#x}", paddr, buf.len());
assert!(paddr + buf.len() <= PMEM_SIZE);
ensure_mmap_pmem();
unsafe {
buf.as_ptr()
.copy_to_nonoverlapping(phys_to_virt(paddr) as _, buf.len());
}
}
/// Zero physical memory at `[paddr, paddr + len)`
#[export_name = "hal_pmem_zero"]
pub fn pmem_zero(paddr: PhysAddr, len: usize) {
trace!("pmem_zero: addr={:#x}, len={:#x}", paddr, len);
assert!(paddr + len <= PMEM_SIZE);
ensure_mmap_pmem();
unsafe {
core::ptr::write_bytes(phys_to_virt(paddr) as *mut u8, 0, len);
}
}
/// Copy content of `src` frame to `target` frame
#[export_name = "hal_frame_copy"]
pub fn frame_copy(src: PhysAddr, target: PhysAddr) {
trace!("frame_copy: {:#x} <- {:#x}", target, src);
assert!(src + PAGE_SIZE <= PMEM_SIZE && target + PAGE_SIZE <= PMEM_SIZE);
ensure_mmap_pmem();
unsafe {
let buf = phys_to_virt(src) as *const u8;
buf.copy_to_nonoverlapping(phys_to_virt(target) as _, PAGE_SIZE);
}
}
/// Flush the physical frame.
#[export_name = "hal_frame_flush"]
pub fn frame_flush(_target: PhysAddr) {
// do nothing
}
/// Page Table
#[repr(C)]
pub struct PageTable {
table_phys: PhysAddr,
}
impl PageTable {
/// Create a new `PageTable`.
#[allow(clippy::new_without_default)]
#[export_name = "hal_pt_new"]
pub fn new() -> Self {
PageTable { table_phys: 0 }
}
}
impl PageTableTrait for PageTable {
/// Map the page of `vaddr` to the frame of `paddr` with `flags`.
#[export_name = "hal_pt_map"]
fn map(&mut self, vaddr: VirtAddr, paddr: PhysAddr, flags: MMUFlags) -> Result<()> {
debug_assert!(page_aligned(vaddr));
debug_assert!(page_aligned(paddr));
let prot = flags.to_mmap_prot();
mmap(FRAME_FILE.as_raw_fd(), paddr, PAGE_SIZE, vaddr, prot);
Ok(())
}
/// Unmap the page of `vaddr`.
#[export_name = "hal_pt_unmap"]
fn unmap(&mut self, vaddr: VirtAddr) -> Result<()> {
self.unmap_cont(vaddr, 1)
}
/// Change the `flags` of the page of `vaddr`.
#[export_name = "hal_pt_protect"]
fn protect(&mut self, vaddr: VirtAddr, flags: MMUFlags) -> Result<()> {
debug_assert!(page_aligned(vaddr));
let prot = flags.to_mmap_prot();
let ret = unsafe { libc::mprotect(vaddr as _, PAGE_SIZE, prot) };
assert_eq!(ret, 0, "failed to mprotect: {:?}", Error::last_os_error());
Ok(())
}
/// Query the physical address which the page of `vaddr` maps to.
#[export_name = "hal_pt_query"]
fn query(&mut self, vaddr: VirtAddr) -> Result<PhysAddr> {
debug_assert!(page_aligned(vaddr));
unimplemented!()
}
/// Get the physical address of root page table.
#[export_name = "hal_pt_table_phys"]
fn table_phys(&self) -> PhysAddr {
self.table_phys
}
#[export_name = "hal_pt_unmap_cont"]
fn unmap_cont(&mut self, vaddr: VirtAddr, pages: usize) -> Result<()> {
if pages == 0 {
return Ok(());
}
debug_assert!(page_aligned(vaddr));
let ret = unsafe { libc::munmap(vaddr as _, PAGE_SIZE * pages) };
assert_eq!(ret, 0, "failed to munmap: {:?}", Error::last_os_error());
Ok(())
}
}
trait FlagsExt {
fn to_mmap_prot(&self) -> libc::c_int;
}
impl FlagsExt for MMUFlags {
fn to_mmap_prot(&self) -> libc::c_int {
let mut flags = 0;
if self.contains(MMUFlags::READ) {
flags |= libc::PROT_READ;
}
if self.contains(MMUFlags::WRITE) {
flags |= libc::PROT_WRITE;
}
if self.contains(MMUFlags::EXECUTE) {
flags |= libc::PROT_EXEC;
}
flags
}
}

@ -0,0 +1,13 @@
[package]
name = "kernel-hal"
version = "0.1.0"
authors = ["Runji Wang <wangrunji0408@163.com>"]
edition = "2018"
description = "Kernel HAL interface definations."
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
bitflags = "1.2"
trapframe = "0.8.0"
numeric-enum-macro = "0.2"

@ -0,0 +1,311 @@
#![no_std]
#![feature(linkage)]
#![deny(warnings)]
extern crate alloc;
pub use trapframe::{GeneralRegs, UserContext};
use {
alloc::{boxed::Box, vec::Vec},
core::{future::Future, pin::Pin, time::Duration},
};
#[derive(Debug)]
pub struct HalError;
/// The result type returned by HAL functions.
pub type Result<T> = core::result::Result<T, HalError>;
pub mod defs {
use bitflags::bitflags;
use numeric_enum_macro::numeric_enum;
bitflags! {
pub struct MMUFlags: usize {
#[allow(clippy::identity_op)]
const CACHE_1 = 1 << 0;
const CACHE_2 = 1 << 1;
const READ = 1 << 2;
const WRITE = 1 << 3;
const EXECUTE = 1 << 4;
const USER = 1 << 5;
const RXW = Self::READ.bits | Self::WRITE.bits | Self::EXECUTE.bits;
}
}
numeric_enum! {
#[repr(u32)]
#[derive(Debug, PartialEq, Clone, Copy)]
pub enum CachePolicy {
Cached = 0,
Uncached = 1,
UncachedDevice = 2,
WriteCombining = 3,
}
}
pub const CACHE_POLICY_MASK: u32 = 3;
impl Default for CachePolicy {
fn default() -> Self {
Self::Cached
}
}
pub type PhysAddr = usize;
pub type VirtAddr = usize;
pub type DevVAddr = usize;
pub const PAGE_SIZE: usize = 0x1000;
}
pub use self::defs::*;
#[repr(C)]
pub struct Thread {
id: usize,
}
impl Thread {
/// Spawn a new thread.
#[linkage = "weak"]
#[export_name = "hal_thread_spawn"]
pub fn spawn(
_future: Pin<Box<dyn Future<Output = ()> + Send + 'static>>,
_vmtoken: usize,
) -> Self {
unimplemented!()
}
}
#[linkage = "weak"]
#[export_name = "hal_timer_now"]
pub fn timer_now() -> Duration {
unimplemented!()
}
#[repr(C)]
pub struct PhysFrame {
paddr: PhysAddr,
}
impl PhysFrame {
#[linkage = "weak"]
#[export_name = "hal_frame_alloc"]
pub fn alloc() -> Option<Self> {
unimplemented!()
}
#[linkage = "weak"]
#[export_name = "hal_frame_alloc_contiguous"]
pub fn alloc_contiguous_base(_size: usize, _align_log2: usize) -> Option<PhysAddr> {
unimplemented!()
}
pub fn alloc_contiguous(size: usize, align_log2: usize) -> Vec<Self> {
PhysFrame::alloc_contiguous_base(size, align_log2).map_or(Vec::new(), |base| {
(0..size)
.map(|i| PhysFrame {
paddr: base + i * PAGE_SIZE,
})
.collect()
})
}
pub fn alloc_zeroed() -> Option<Self> {
Self::alloc().map(|f| {
pmem_zero(f.addr(), PAGE_SIZE);
f
})
}
pub fn alloc_contiguous_zeroed(size: usize, align_log2: usize) -> Vec<Self> {
PhysFrame::alloc_contiguous_base(size, align_log2).map_or(Vec::new(), |base| {
pmem_zero(base, size * PAGE_SIZE);
(0..size)
.map(|i| PhysFrame {
paddr: base + i * PAGE_SIZE,
})
.collect()
})
}
pub fn addr(&self) -> PhysAddr {
self.paddr
}
#[linkage = "weak"]
#[export_name = "hal_zero_frame_paddr"]
pub fn zero_frame_addr() -> PhysAddr {
unimplemented!()
}
}
impl Drop for PhysFrame {
#[linkage = "weak"]
#[export_name = "hal_frame_dealloc"]
fn drop(&mut self) {
unimplemented!()
}
}
/// Read physical memory from `paddr` to `buf`.
#[linkage = "weak"]
#[export_name = "hal_pmem_read"]
pub fn pmem_read(_paddr: PhysAddr, _buf: &mut [u8]) {
unimplemented!()
}
/// Write physical memory to `paddr` from `buf`.
#[linkage = "weak"]
#[export_name = "hal_pmem_write"]
pub fn pmem_write(_paddr: PhysAddr, _buf: &[u8]) {
unimplemented!()
}
/// Zero physical memory at `[paddr, paddr + len)`
#[linkage = "weak"]
#[export_name = "hal_pmem_zero"]
pub fn pmem_zero(_paddr: PhysAddr, _len: usize) {
unimplemented!()
}
/// Copy content of `src` frame to `target` frame.
#[linkage = "weak"]
#[export_name = "hal_frame_copy"]
pub fn frame_copy(_src: PhysAddr, _target: PhysAddr) {
unimplemented!()
}
/// Flush the physical frame.
#[linkage = "weak"]
#[export_name = "hal_frame_flush"]
pub fn frame_flush(_target: PhysAddr) {
unimplemented!()
}
pub trait PageTableTrait: Sync + Send {
/// Map the page of `vaddr` to the frame of `paddr` with `flags`.
fn map(&mut self, _vaddr: VirtAddr, _paddr: PhysAddr, _flags: MMUFlags) -> Result<()>;
/// Unmap the page of `vaddr`.
fn unmap(&mut self, _vaddr: VirtAddr) -> Result<()>;
/// Change the `flags` of the page of `vaddr`.
fn protect(&mut self, _vaddr: VirtAddr, _flags: MMUFlags) -> Result<()>;
/// Query the physical address which the page of `vaddr` maps to.
fn query(&mut self, _vaddr: VirtAddr) -> Result<PhysAddr>;
/// Get the physical address of root page table.
fn table_phys(&self) -> PhysAddr;
#[cfg(target_arch = "riscv64")]
/// Activate this page table
fn activate(&self);
fn map_many(
&mut self,
mut vaddr: VirtAddr,
paddrs: &[PhysAddr],
flags: MMUFlags,
) -> Result<()> {
for &paddr in paddrs {
self.map(vaddr, paddr, flags)?;
vaddr += PAGE_SIZE;
}
Ok(())
}
fn map_cont(
&mut self,
mut vaddr: VirtAddr,
paddr: PhysAddr,
pages: usize,
flags: MMUFlags,
) -> Result<()> {
for i in 0..pages {
let paddr = paddr + i * PAGE_SIZE;
self.map(vaddr, paddr, flags)?;
vaddr += PAGE_SIZE;
}
Ok(())
}
fn unmap_cont(&mut self, vaddr: VirtAddr, pages: usize) -> Result<()> {
for i in 0..pages {
self.unmap(vaddr + i * PAGE_SIZE)?;
}
Ok(())
}
}
/// Page Table
#[repr(C)]
pub struct PageTable {
table_phys: PhysAddr,
}
impl PageTable {
/// Get current page table
#[linkage = "weak"]
#[export_name = "hal_pt_current"]
pub fn current() -> Self {
unimplemented!()
}
/// Create a new `PageTable`.
#[allow(clippy::new_without_default)]
#[linkage = "weak"]
#[export_name = "hal_pt_new"]
pub fn new() -> Self {
unimplemented!()
}
}
impl PageTableTrait for PageTable {
/// Map the page of `vaddr` to the frame of `paddr` with `flags`.
#[linkage = "weak"]
#[export_name = "hal_pt_map"]
fn map(&mut self, _vaddr: VirtAddr, _paddr: PhysAddr, _flags: MMUFlags) -> Result<()> {
unimplemented!()
}
/// Unmap the page of `vaddr`.
#[linkage = "weak"]
#[export_name = "hal_pt_unmap"]
fn unmap(&mut self, _vaddr: VirtAddr) -> Result<()> {
unimplemented!()
}
/// Change the `flags` of the page of `vaddr`.
#[linkage = "weak"]
#[export_name = "hal_pt_protect"]
fn protect(&mut self, _vaddr: VirtAddr, _flags: MMUFlags) -> Result<()> {
unimplemented!()
}
/// Query the physical address which the page of `vaddr` maps to.
#[linkage = "weak"]
#[export_name = "hal_pt_query"]
fn query(&mut self, _vaddr: VirtAddr) -> Result<PhysAddr> {
unimplemented!()
}
/// Get the physical address of root page table.
#[linkage = "weak"]
#[export_name = "hal_pt_table_phys"]
fn table_phys(&self) -> PhysAddr {
self.table_phys
}
/// Activate this page table
#[cfg(target_arch = "riscv64")]
#[linkage = "weak"]
#[export_name = "hal_pt_activate"]
fn activate(&self) {
unimplemented!()
}
#[linkage = "weak"]
#[export_name = "hal_pt_unmap_cont"]
fn unmap_cont(&mut self, vaddr: VirtAddr, pages: usize) -> Result<()> {
for i in 0..pages {
self.unmap(vaddr + i * PAGE_SIZE)?;
}
Ok(())
}
}

@ -0,0 +1,20 @@
[package]
name = "ch02-03"
version = "0.1.0"
authors = ["Runji Wang <wangrunji0408@163.com>"]
edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
log = "0.4"
spin = "0.7"
downcast-rs = { version = "1.2.0", default-features = false }
bitflags = "1.2"
hashbrown = "0.9"
trapframe = "0.8.0"
futures = { version = "0.3", default-features = false, features = ["alloc", "async-await"] }
async-std = { version = "1.9", features = ["attributes", "unstable"] }
numeric-enum-macro = "0.2"
kernel-hal = { path = "../kernel-hal" }
kernel-hal-unix = { path = "../kernel-hal-unix" }

@ -0,0 +1,232 @@
// ANCHOR: result
///
pub type ZxResult<T = ()> = Result<T, ZxError>;
// ANCHOR_END: result
// ANCHOR: error_begin
/// Zircon statuses are signed 32 bit integers. The space of values is
/// divided as follows:
/// - The zero value is for the OK status.
/// - Negative values are defined by the system, in this file.
/// - Positive values are reserved for protocol-specific error values,
/// and will never be defined by the system.
#[allow(non_camel_case_types, dead_code)]
#[repr(i32)]
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
pub enum ZxError {
OK = 0,
// ======= Internal failures =======
/// The system encountered an otherwise unspecified error
/// while performing the operation.
INTERNAL = -1,
/// The operation is not implemented, supported,
/// or enabled.
NOT_SUPPORTED = -2,
// ANCHOR_END: error_begin
/// The system was not able to allocate some resource
/// needed for the operation.
NO_RESOURCES = -3,
/// The system was not able to allocate memory needed
/// for the operation.
NO_MEMORY = -4,
// -5 used to be ZX_ERR_CALL_FAILED.
/// The system call was interrupted, but should be
/// retried. This should not be seen outside of the VDSO.
INTERNAL_INTR_RETRY = -6,
// ======= Parameter errors =======
/// an argument is invalid, ex. null pointer
INVALID_ARGS = -10,
/// A specified handle value does not refer to a handle.
BAD_HANDLE = -11,
/// The subject of the operation is the wrong type to
/// perform the operation.
/// Example: Attempting a message_read on a thread handle.
WRONG_TYPE = -12,
/// The specified syscall number is invalid.
BAD_SYSCALL = -13,
/// An argument is outside the valid range for this
/// operation.
OUT_OF_RANGE = -14,
/// A caller provided buffer is too small for
/// this operation.
BUFFER_TOO_SMALL = -15,
// ======= Precondition or state errors =======
/// operation failed because the current state of the
/// object does not allow it, or a precondition of the operation is
/// not satisfied
BAD_STATE = -20,
/// The time limit for the operation elapsed before
/// the operation completed.
TIMED_OUT = -21,
/// The operation cannot be performed currently but
/// potentially could succeed if the caller waits for a prerequisite
/// to be satisfied, for example waiting for a handle to be readable
/// or writable.
/// Example: Attempting to read from a channel that has no
/// messages waiting but has an open remote will return ZX_ERR_SHOULD_WAIT.
/// Attempting to read from a channel that has no messages waiting
/// and has a closed remote end will return ZX_ERR_PEER_CLOSED.
SHOULD_WAIT = -22,
/// The in-progress operation (e.g. a wait) has been
/// canceled.
CANCELED = -23,
/// The operation failed because the remote end of the
/// subject of the operation was closed.
PEER_CLOSED = -24,
/// The requested entity is not found.
NOT_FOUND = -25,
/// An object with the specified identifier
/// already exists.
/// Example: Attempting to create a file when a file already exists
/// with that name.
ALREADY_EXISTS = -26,
/// The operation failed because the named entity
/// is already owned or controlled by another entity. The operation
/// could succeed later if the current owner releases the entity.
ALREADY_BOUND = -27,
/// The subject of the operation is currently unable
/// to perform the operation.
/// Note: This is used when there's no direct way for the caller to
/// observe when the subject will be able to perform the operation
/// and should thus retry.
UNAVAILABLE = -28,
// ======= Permission check errors =======
/// The caller did not have permission to perform
/// the specified operation.
ACCESS_DENIED = -30,
// ======= Input-output errors =======
/// Otherwise unspecified error occurred during I/O.
IO = -40,
/// The entity the I/O operation is being performed on
/// rejected the operation.
/// Example: an I2C device NAK'ing a transaction or a disk controller
/// rejecting an invalid command, or a stalled USB endpoint.
IO_REFUSED = -41,
/// The data in the operation failed an integrity
/// check and is possibly corrupted.
/// Example: CRC or Parity error.
IO_DATA_INTEGRITY = -42,
/// The data in the operation is currently unavailable
/// and may be permanently lost.
/// Example: A disk block is irrecoverably damaged.
IO_DATA_LOSS = -43,
/// The device is no longer available (has been
/// unplugged from the system, powered down, or the driver has been
/// unloaded,
IO_NOT_PRESENT = -44,
/// More data was received from the device than expected.
/// Example: a USB "babble" error due to a device sending more data than
/// the host queued to receive.
IO_OVERRUN = -45,
/// An operation did not complete within the required timeframe.
/// Example: A USB isochronous transfer that failed to complete due to an overrun or underrun.
IO_MISSED_DEADLINE = -46,
/// The data in the operation is invalid parameter or is out of range.
/// Example: A USB transfer that failed to complete with TRB Error
IO_INVALID = -47,
// ======== Filesystem Errors ========
/// Path name is too long.
BAD_PATH = -50,
/// Object is not a directory or does not support
/// directory operations.
/// Example: Attempted to open a file as a directory or
/// attempted to do directory operations on a file.
NOT_DIR = -51,
/// Object is not a regular file.
NOT_FILE = -52,
/// This operation would cause a file to exceed a
/// filesystem-specific size limit
FILE_BIG = -53,
/// Filesystem or device space is exhausted.
NO_SPACE = -54,
/// Directory is not empty.
NOT_EMPTY = -55,
// ======== Flow Control ========
// These are not errors, as such, and will never be returned
// by a syscall or public API. They exist to allow callbacks
// to request changes in operation.
/// Do not call again.
/// Example: A notification callback will be called on every
/// event until it returns something other than ZX_OK.
/// This status allows differentiation between "stop due to
/// an error" and "stop because the work is done."
STOP = -60,
/// Advance to the next item.
/// Example: A notification callback will use this response
/// to indicate it did not "consume" an item passed to it,
/// but by choice, not due to an error condition.
NEXT = -61,
/// Ownership of the item has moved to an asynchronous worker.
///
/// Unlike ZX_ERR_STOP, which implies that iteration on an object
/// should stop, and ZX_ERR_NEXT, which implies that iteration
/// should continue to the next item, ZX_ERR_ASYNC implies
/// that an asynchronous worker is responsible for continuing iteration.
///
/// Example: A notification callback will be called on every
/// event, but one event needs to handle some work asynchronously
/// before it can continue. ZX_ERR_ASYNC implies the worker is
/// responsible for resuming iteration once its work has completed.
ASYNC = -62,
// ======== Network-related errors ========
/// Specified protocol is not
/// supported.
PROTOCOL_NOT_SUPPORTED = -70,
/// Host is unreachable.
ADDRESS_UNREACHABLE = -71,
/// Address is being used by someone else.
ADDRESS_IN_USE = -72,
/// Socket is not connected.
NOT_CONNECTED = -73,
/// Remote peer rejected the connection.
CONNECTION_REFUSED = -74,
/// Connection was reset.
CONNECTION_RESET = -75,
// ANCHOR: error_end
/// Connection was aborted.
CONNECTION_ABORTED = -76,
}
// ANCHOR_END: error_end

@ -0,0 +1,176 @@
use {
super::*,
crate::error::*,
crate::object::*,
alloc::collections::VecDeque,
alloc::sync::{Arc, Weak},
alloc::vec::Vec,
core::sync::atomic::{AtomicU32, Ordering},
spin::Mutex,
};
pub struct Channel {
base: KObjectBase,
peer: Weak<Channel>,
recv_queue: Mutex<VecDeque<T>>,
next_txid: AtomicU32,
}
type T = MessagePacket;
type TxID = u32;
impl_kobject!(Channel
fn peer(&self) -> ZxResult<Arc<dyn KernelObject>> {
let peer = self.peer.upgrade().ok_or(ZxError::PEER_CLOSED)?;
Ok(peer)
}
fn related_koid(&self) -> KoID {
self.peer.upgrade().map(|p| p.id()).unwrap_or(0)
}
);
impl Channel {
/// Create a channel and return a pair of its endpoints
#[allow(unsafe_code)]
pub fn create() -> (Arc<Self>, Arc<Self>) {
let mut channel0 = Arc::new(Channel {
base: KObjectBase::default(),
peer: Weak::default(),
recv_queue: Default::default(),
next_txid: AtomicU32::new(0x8000_0000),
});
let channel1 = Arc::new(Channel {
base: KObjectBase::default(),
peer: Arc::downgrade(&channel0),
recv_queue: Default::default(),
next_txid: AtomicU32::new(0x8000_0000),
});
// no other reference of `channel0`
unsafe {
Arc::get_mut_unchecked(&mut channel0).peer = Arc::downgrade(&channel1);
}
(channel0, channel1)
}
/// Read a packet from the channel if check is ok, otherwise the msg will keep.
pub fn read(&self) -> ZxResult<T> {
let mut recv_queue = self.recv_queue.lock();
if let Some(_) = recv_queue.front() {
let msg = recv_queue.pop_front().unwrap();
return Ok(msg);
}
if self.peer_closed() {
Err(ZxError::PEER_CLOSED)
} else {
Err(ZxError::SHOULD_WAIT)
}
}
/// Write a packet to the channel
pub fn write(&self, msg: T) -> ZxResult {
let peer = self.peer.upgrade().ok_or(ZxError::PEER_CLOSED)?;
peer.push_general(msg);
Ok(())
}
/// Push a message to general queue, called from peer.
fn push_general(&self, msg: T) {
let mut send_queue = self.recv_queue.lock();
send_queue.push_back(msg);
}
/// Generate a new transaction ID for `call`.
fn new_txid(&self) -> TxID {
self.next_txid.fetch_add(1, Ordering::SeqCst)
}
/// Is peer channel closed?
fn peer_closed(&self) -> bool {
self.peer.strong_count() == 0
}
}
/// The message transferred in the channel.
/// See [Channel](struct.Channel.html) for details.
#[derive(Default)]
pub struct MessagePacket {
/// The transition id of the message packet
pub txid: TxID,
/// The data carried by the message packet
pub data: Vec<u8>,
/// See [Channel](struct.Channel.html) for details.
pub handles: Vec<Handle>,
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_basics() {
let (end0, end1) = Channel::create();
assert!(Arc::ptr_eq(
&end0.peer().unwrap().downcast_arc().unwrap(),
&end1
));
assert_eq!(end0.related_koid(), end1.id());
drop(end1);
assert_eq!(end0.peer().unwrap_err(), ZxError::PEER_CLOSED);
assert_eq!(end0.related_koid(), 0);
}
#[test]
fn read_write() {
let (channel0, channel1) = Channel::create();
// write a message to each other
let txid0 = channel0.new_txid();
channel0
.write(MessagePacket {
txid: txid0,
data: Vec::from("hello 1"),
handles: Vec::new(),
})
.unwrap();
let txid1 = channel1.new_txid();
channel1
.write(MessagePacket {
txid: txid1,
data: Vec::from("hello 0"),
handles: Vec::new(),
})
.unwrap();
// read message should success
let recv_msg = channel1.read().unwrap();
assert_eq!(recv_msg.txid, txid0);
assert_eq!(recv_msg.data.as_slice(), b"hello 1");
assert!(recv_msg.handles.is_empty());
let recv_msg = channel0.read().unwrap();
assert_eq!(recv_msg.txid, txid1);
assert_eq!(recv_msg.data.as_slice(), b"hello 0");
assert!(recv_msg.handles.is_empty());
// read more message should fail.
assert_eq!(channel0.read().err(), Some(ZxError::SHOULD_WAIT));
assert_eq!(channel1.read().err(), Some(ZxError::SHOULD_WAIT));
}
#[test]
fn peer_closed() {
let (channel0, channel1) = Channel::create();
// write a message from peer, then drop it
channel1.write(MessagePacket::default()).unwrap();
drop(channel1);
// read the first message should success.
channel0.read().unwrap();
// read more message should fail.
assert_eq!(channel0.read().err(), Some(ZxError::PEER_CLOSED));
// write message should fail.
assert_eq!(
channel0.write(MessagePacket::default()),
Err(ZxError::PEER_CLOSED)
);
}
}

@ -0,0 +1,4 @@
use super::*;
mod channel;
pub use self::channel::*;

@ -0,0 +1,23 @@
#![no_std]
#![deny(unused_imports)]
#![allow(dead_code)]
#![feature(get_mut_unchecked)]
#![feature(drain_filter)]
extern crate alloc;
#[cfg(test)]
#[macro_use]
extern crate std;
#[macro_use]
extern crate log;
mod error;
mod ipc;
mod object;
mod task;
mod util;
mod vm;
pub use self::error::*;

@ -0,0 +1,21 @@
// ANCHOR: handle
use super::{KernelObject, Rights};
use alloc::sync::Arc;
pub type HandleValue = u32;
pub const INVALID_HANDLE: HandleValue = 0;
/// 内核对象句柄
#[derive(Clone)]
pub struct Handle {
pub object: Arc<dyn KernelObject>,
pub rights: Rights,
}
impl Handle {
/// 创建一个新句柄
pub fn new(object: Arc<dyn KernelObject>, rights: Rights) -> Self {
Handle { object, rights }
}
}
// ANCHOR_END: handle

@ -0,0 +1,185 @@
use alloc::string::String;
use alloc::sync::Arc;
use core::fmt::Debug;
use core::sync::atomic::*;
use downcast_rs::{impl_downcast, DowncastSync};
use spin::Mutex;
mod handle;
mod rights;
pub use self::handle::*;
pub use self::rights::*;
pub use super::*;
/// 内核对象公共接口
pub trait KernelObject: DowncastSync + Debug {
/// 获取对象 ID
fn id(&self) -> KoID;
/// 获取对象类型名
fn type_name(&self) -> &str;
/// 获取对象名称
fn name(&self) -> String;
/// 设置对象名称
fn set_name(&self, name: &str);
/// 尝试获取对象伙伴
///
/// 当前该对象必须是 `Channel`
fn peer(&self) -> ZxResult<Arc<dyn KernelObject>> {
Err(ZxError::NOT_SUPPORTED)
}
/// 尝试获取关联对象 id否则返回 0
///
/// 当前该对象必须是 `Channel` 或者 `Task`
///
/// 如果该对象是 `Channel`, 将获取伙伴的 id
///
/// 如果该对象是 `Task`, 将获取其父 `Task` 的 id
fn related_koid(&self) -> KoID {
0
}
/// 尝试获取对应 id 的子对象
///
/// 当前该对象必须是 `Job` 或者 `Process`.
///
/// 如果该对象是 `Job`,则其直属子 `Job` 以及 `Process` 必须被获取。
fn get_child(&self, _id: KoID) -> ZxResult<Arc<dyn KernelObject>> {
Err(ZxError::WRONG_TYPE)
}
}
impl_downcast!(sync KernelObject);
/// 对象 ID 类型
pub type KoID = u64;
/// 内核对象核心结构
pub struct KObjectBase {
/// 对象 ID
pub id: KoID,
inner: Mutex<KObjectBaseInner>,
}
/// `KObjectBase` 的内部可变部分
#[derive(Default)]
struct KObjectBaseInner {
name: String,
}
impl Default for KObjectBase {
/// 创建一个新 `KObjectBase`
fn default() -> Self {
KObjectBase {
id: Self::new_koid(),
inner: Default::default(),
}
}
}
impl KObjectBase {
/// Create a new kernel object base.
pub fn new() -> Self {
Self::default()
}
/// 生成一个唯一的 ID
fn new_koid() -> KoID {
static NEXT_KOID: AtomicU64 = AtomicU64::new(1024);
NEXT_KOID.fetch_add(1, Ordering::SeqCst)
}
/// 获取对象名称
pub fn name(&self) -> String {
self.inner.lock().name.clone()
}
/// 设置对象名称
pub fn set_name(&self, name: &str) {
self.inner.lock().name = String::from(name);
}
/// Create a kernel object base with `name`.
pub fn with_name(name: &str) -> Self {
KObjectBase {
id: Self::new_koid(),
inner: Mutex::new(KObjectBaseInner {
name: String::from(name),
}),
}
}
}
/// 为内核对象 struct 自动实现 `KernelObject` trait 的宏。
#[macro_export] // 导出宏,可在 crate 外部使用
macro_rules! impl_kobject {
// 匹配类型名,并可以提供函数覆盖默认实现
($class:ident $( $fn:tt )*) => {
// 为对象实现 KernelObject trait方法直接转发到内部 struct
impl KernelObject for $class {
fn id(&self) -> KoID {
// 直接访问内部的 pub 属性
self.base.id
}
fn type_name(&self) -> &str {
// 用 stringify! 宏将输入转成字符串
stringify!($class)
}
// 注意宏里面的类型要写完整路径例如alloc::string::String
fn name(&self) -> alloc::string::String {
self.base.name()
}
fn set_name(&self, name: &str){
// 直接访问内部的 pub 方法
self.base.set_name(name)
}
// 可以传入任意数量的函数,覆盖 trait 的默认实现
$( $fn )*
}
// 为对象实现 Debug trait
impl core::fmt::Debug for $class {
fn fmt(
&self,
f: &mut core::fmt::Formatter<'_>,
) -> core::result::Result<(), core::fmt::Error> {
// 输出对象类型、ID 和名称
f.debug_tuple(&stringify!($class))
.field(&self.id())
.field(&self.name())
.finish()
}
}
};
}
/// 空对象
pub struct DummyObject {
// 其中必须包含一个名为 `base` 的 `KObjectBase`
base: KObjectBase,
}
// 使用刚才的宏,声明其为内核对象,自动生成必要的代码
impl_kobject!(DummyObject);
impl DummyObject {
/// 创建一个新 `DummyObject`
pub fn new() -> Arc<Self> {
Arc::new(DummyObject {
base: KObjectBase::default(),
})
}
}
#[cfg(test)]
#[test]
fn impl_kobject() {
use alloc::format;
let dummy = DummyObject::new();
let object: Arc<dyn KernelObject> = dummy;
assert_eq!(object.type_name(), "DummyObject");
assert_eq!(object.name(), "");
object.set_name("dummy");
assert_eq!(object.name(), "dummy");
assert_eq!(
format!("{:?}", object),
format!("DummyObject({}, \"dummy\")", object.id())
);
let _result: Arc<DummyObject> = object.downcast_arc::<DummyObject>().unwrap();
}

@ -0,0 +1,58 @@
// ANCHOR: rights
use bitflags::bitflags;
bitflags! {
/// 句柄权限
#[derive(Default)]
pub struct Rights: u32 {
const DUPLICATE = 1 << 0;
const TRANSFER = 1 << 1;
const READ = 1 << 2;
const WRITE = 1 << 3;
const EXECUTE = 1 << 4;
const MAP = 1 << 5;
const GET_PROPERTY = 1 << 6;
const SET_PROPERTY = 1 << 7;
const ENUMERATE = 1 << 8;
const DESTROY = 1 << 9;
const SET_POLICY = 1 << 10;
const GET_POLICY = 1 << 11;
const SIGNAL = 1 << 12;
const SIGNAL_PEER = 1 << 13;
const WAIT = 1 << 14;
const INSPECT = 1 << 15;
const MANAGE_JOB = 1 << 16;
const MANAGE_PROCESS = 1 << 17;
const MANAGE_THREAD = 1 << 18;
const APPLY_PROFILE = 1 << 19;
const SAME_RIGHTS = 1 << 31;
const BASIC = Self::TRANSFER.bits | Self::DUPLICATE.bits | Self::WAIT.bits | Self::INSPECT.bits;
const IO = Self::READ.bits | Self::WRITE.bits;
/// GET_PROPERTY SET_PROPERTY
const PROPERTY = Self::GET_PROPERTY.bits | Self::SET_PROPERTY.bits;
/// GET_POLICY SET_POLICY
const POLICY = Self::GET_POLICY.bits | Self::SET_POLICY.bits;
/// BASIC & !Self::DUPLICATE | IO | SIGNAL | SIGNAL_PEER
const DEFAULT_CHANNEL = Self::BASIC.bits & !Self::DUPLICATE.bits | Self::IO.bits | Self::SIGNAL.bits | Self::SIGNAL_PEER.bits;
/// BASIC | IO | PROPERTY | ENUMERATE | DESTROY | SIGNAL | MANAGE_PROCESS | MANAGE_THREAD
const DEFAULT_PROCESS = Self::BASIC.bits | Self::IO.bits | Self::PROPERTY.bits | Self::ENUMERATE.bits | Self::DESTROY.bits
| Self::SIGNAL.bits | Self::MANAGE_PROCESS.bits | Self::MANAGE_THREAD.bits;
/// BASIC | IO | PROPERTY | DESTROY | SIGNAL | MANAGE_THREAD
const DEFAULT_THREAD = Self::BASIC.bits | Self::IO.bits | Self::PROPERTY.bits | Self::DESTROY.bits | Self::SIGNAL.bits | Self::MANAGE_THREAD.bits;
/// BASIC | WAIT
const DEFAULT_VMAR = Self::BASIC.bits & !Self::WAIT.bits;
/// BASIC | IO | PROPERTY | POLICY | ENUMERATE | DESTROY | SIGNAL | MANAGE_JOB | MANAGE_PROCESS | MANAGE_THREAD
const DEFAULT_JOB = Self::BASIC.bits | Self::IO.bits | Self::PROPERTY.bits | Self::POLICY.bits | Self::ENUMERATE.bits
| Self::DESTROY.bits | Self::SIGNAL.bits | Self::MANAGE_JOB.bits | Self::MANAGE_PROCESS.bits | Self::MANAGE_THREAD.bits;
}
}
// ANCHOR_END: rights

@ -0,0 +1,409 @@
use {
super::job_policy::*,
super::process::Process,
super::*,
crate::error::*,
crate::object::*,
crate::task::Task,
alloc::sync::{Arc, Weak},
alloc::vec::Vec,
spin::Mutex,
};
/// Job 对象
#[allow(dead_code)]
pub struct Job {
base: KObjectBase,
parent: Option<Arc<Job>>,
parent_policy: JobPolicy,
inner: Mutex<JobInner>,
}
impl_kobject!(Job
fn get_child(&self, id: KoID) -> ZxResult<Arc<dyn KernelObject>> {
let inner = self.inner.lock();
if let Some(job) = inner.children.iter().filter_map(|o|o.upgrade()).find(|o| o.id() == id) {
return Ok(job);
}
if let Some(proc) = inner.processes.iter().find(|o| o.id() == id) {
return Ok(proc.clone());
}
Err(ZxError::NOT_FOUND)
}
fn related_koid(&self) -> KoID {
self.parent.as_ref().map(|p| p.id()).unwrap_or(0)
}
);
#[derive(Default)]
struct JobInner {
policy: JobPolicy,
children: Vec<Weak<Job>>,
processes: Vec<Arc<Process>>,
// if the job is killed, no more child creation should works
killed: bool,
self_ref: Weak<Job>,
}
impl Job {
/// Create the root job.
pub fn root() -> Arc<Self> {
let job = Arc::new(Job {
base: KObjectBase::new(),
parent: None,
parent_policy: JobPolicy::default(),
inner: Mutex::new(JobInner::default()),
});
job.inner.lock().self_ref = Arc::downgrade(&job);
job
}
/// Create a new child job object.
pub fn create_child(self: &Arc<Self>) -> ZxResult<Arc<Self>> {
let mut inner = self.inner.lock();
if inner.killed {
return Err(ZxError::BAD_STATE);
}
let child = Arc::new(Job {
base: KObjectBase::new(),
parent: Some(self.clone()),
parent_policy: inner.policy.merge(&self.parent_policy),
inner: Mutex::new(JobInner::default()),
});
let child_weak = Arc::downgrade(&child);
child.inner.lock().self_ref = child_weak.clone();
inner.children.push(child_weak);
Ok(child)
}
fn remove_child(&self, to_remove: &Weak<Job>) {
let mut inner = self.inner.lock();
inner.children.retain(|child| !to_remove.ptr_eq(child));
if inner.killed && inner.processes.is_empty() && inner.children.is_empty() {
drop(inner);
self.terminate()
}
}
/// Get the policy of the job.
pub fn policy(&self) -> JobPolicy {
self.inner.lock().policy.merge(&self.parent_policy)
}
/// Get the parent job.
pub fn parent(&self) -> Option<Arc<Self>> {
self.parent.clone()
}
/// Sets one or more security and/or resource policies to an empty job.
///
/// The job's effective policies is the combination of the parent's
/// effective policies and the policies specified in policy.
///
/// After this call succeeds any new child process or child job will have
/// the new effective policy applied to it.
pub fn set_policy_basic(
&self,
options: SetPolicyOptions,
policies: &[BasicPolicy],
) -> ZxResult {
let mut inner = self.inner.lock();
if !inner.is_empty() {
return Err(ZxError::BAD_STATE);
}
for policy in policies {
if self.parent_policy.get_action(policy.condition).is_some() {
match options {
SetPolicyOptions::Absolute => return Err(ZxError::ALREADY_EXISTS),
SetPolicyOptions::Relative => {}
}
} else {
inner.policy.apply(*policy);
}
}
Ok(())
}
/// Add a process to the job.
pub(super) fn add_process(&self, process: Arc<Process>) -> ZxResult {
let mut inner = self.inner.lock();
if inner.killed {
return Err(ZxError::BAD_STATE);
}
inner.processes.push(process);
Ok(())
}
/// Remove a process from the job.
pub(super) fn remove_process(&self, id: KoID) {
let mut inner = self.inner.lock();
inner.processes.retain(|proc| proc.id() != id);
if inner.killed && inner.processes.is_empty() && inner.children.is_empty() {
drop(inner);
self.terminate()
}
}
/// Check whether this job is root job.
pub fn check_root_job(&self) -> ZxResult {
if self.parent.is_some() {
Err(ZxError::ACCESS_DENIED)
} else {
Ok(())
}
}
/// Get KoIDs of Processes.
pub fn process_ids(&self) -> Vec<KoID> {
self.inner.lock().processes.iter().map(|p| p.id()).collect()
}
/// Get KoIDs of children Jobs.
pub fn children_ids(&self) -> Vec<KoID> {
self.inner
.lock()
.children
.iter()
.filter_map(|j| j.upgrade())
.map(|j| j.id())
.collect()
}
/// Return true if this job has no processes and no child jobs.
pub fn is_empty(&self) -> bool {
self.inner.lock().is_empty()
}
/// The job finally terminates.
fn terminate(&self) {
if let Some(parent) = self.parent.as_ref() {
parent.remove_child(&self.inner.lock().self_ref)
}
}
}
impl Task for Job {
/// Kill the job. The job do not terminate immediately when killed.
/// It will terminate after all its children and processes are terminated.
fn kill(&self) {
let (children, processes) = {
let mut inner = self.inner.lock();
if inner.killed {
return;
}
inner.killed = true;
(inner.children.clone(), inner.processes.clone())
};
if children.is_empty() && processes.is_empty() {
self.terminate();
return;
}
for child in children {
if let Some(child) = child.upgrade() {
child.kill();
}
}
for proc in processes {
proc.kill();
}
}
fn suspend(&self) {
panic!("job do not support suspend");
}
fn resume(&self) {
panic!("job do not support resume");
}
}
impl JobInner {
fn is_empty(&self) -> bool {
self.processes.is_empty() && self.children.is_empty()
}
}
impl Drop for Job {
fn drop(&mut self) {
self.terminate();
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::task::TASK_RETCODE_SYSCALL_KILL;
#[test]
fn create() {
let root_job = Job::root();
let job = Job::create_child(&root_job).expect("failed to create job");
let child = root_job
.get_child(job.id())
.unwrap()
.downcast_arc()
.unwrap();
assert!(Arc::ptr_eq(&child, &job));
assert_eq!(job.related_koid(), root_job.id());
assert_eq!(root_job.related_koid(), 0);
root_job.kill();
assert_eq!(root_job.create_child().err(), Some(ZxError::BAD_STATE));
}
#[test]
fn set_policy() {
let root_job = Job::root();
// default policy
assert_eq!(
root_job.policy().get_action(PolicyCondition::BadHandle),
None
);
// set policy for root job
let policy = &[BasicPolicy {
condition: PolicyCondition::BadHandle,
action: PolicyAction::Deny,
}];
root_job
.set_policy_basic(SetPolicyOptions::Relative, policy)
.expect("failed to set policy");
assert_eq!(
root_job.policy().get_action(PolicyCondition::BadHandle),
Some(PolicyAction::Deny)
);
// override policy should success
let policy = &[BasicPolicy {
condition: PolicyCondition::BadHandle,
action: PolicyAction::Allow,
}];
root_job
.set_policy_basic(SetPolicyOptions::Relative, policy)
.expect("failed to set policy");
assert_eq!(
root_job.policy().get_action(PolicyCondition::BadHandle),
Some(PolicyAction::Allow)
);
// create a child job
let job = Job::create_child(&root_job).expect("failed to create job");
// should inherit parent's policy.
assert_eq!(
job.policy().get_action(PolicyCondition::BadHandle),
Some(PolicyAction::Allow)
);
// setting policy for a non-empty job should fail.
assert_eq!(
root_job.set_policy_basic(SetPolicyOptions::Relative, &[]),
Err(ZxError::BAD_STATE)
);
// set new policy should success.
let policy = &[BasicPolicy {
condition: PolicyCondition::WrongObject,
action: PolicyAction::Allow,
}];
job.set_policy_basic(SetPolicyOptions::Relative, policy)
.expect("failed to set policy");
assert_eq!(
job.policy().get_action(PolicyCondition::WrongObject),
Some(PolicyAction::Allow)
);
// relatively setting existing policy should be ignored.
let policy = &[BasicPolicy {
condition: PolicyCondition::BadHandle,
action: PolicyAction::Deny,
}];
job.set_policy_basic(SetPolicyOptions::Relative, policy)
.expect("failed to set policy");
assert_eq!(
job.policy().get_action(PolicyCondition::BadHandle),
Some(PolicyAction::Allow)
);
// absolutely setting existing policy should fail.
assert_eq!(
job.set_policy_basic(SetPolicyOptions::Absolute, policy),
Err(ZxError::ALREADY_EXISTS)
);
}
#[test]
fn parent_child() {
let root_job = Job::root();
let job = Job::create_child(&root_job).expect("failed to create job");
let proc = Process::create(&root_job, "proc").expect("failed to create process");
assert_eq!(root_job.get_child(job.id()).unwrap().id(), job.id());
assert_eq!(root_job.get_child(proc.id()).unwrap().id(), proc.id());
assert_eq!(
root_job.get_child(root_job.id()).err(),
Some(ZxError::NOT_FOUND)
);
assert!(Arc::ptr_eq(&job.parent().unwrap(), &root_job));
let job1 = root_job.create_child().expect("failed to create job");
let proc1 = Process::create(&root_job, "proc1").expect("failed to create process");
assert_eq!(root_job.children_ids(), vec![job.id(), job1.id()]);
assert_eq!(root_job.process_ids(), vec![proc.id(), proc1.id()]);
root_job.kill();
assert_eq!(root_job.create_child().err(), Some(ZxError::BAD_STATE));
}
#[test]
fn check() {
let root_job = Job::root();
assert!(root_job.is_empty());
let job = root_job.create_child().expect("failed to create job");
assert_eq!(root_job.check_root_job(), Ok(()));
assert_eq!(job.check_root_job(), Err(ZxError::ACCESS_DENIED));
assert!(!root_job.is_empty());
assert!(job.is_empty());
let _proc = Process::create(&job, "proc").expect("failed to create process");
assert!(!job.is_empty());
}
#[test]
fn kill() {
let root_job = Job::root();
let job = Job::create_child(&root_job).expect("failed to create job");
let proc = Process::create(&root_job, "proc").expect("failed to create process");
let thread = Thread::create(&proc, "thread").expect("failed to create thread");
let current_thread = CurrentThread(thread.clone());
root_job.kill();
assert!(root_job.inner.lock().killed);
assert!(job.inner.lock().killed);
assert_eq!(proc.status(), Status::Exited(TASK_RETCODE_SYSCALL_KILL));
assert_eq!(thread.state(), ThreadState::Dying);
std::mem::drop(current_thread);
assert!(root_job.inner.lock().killed);
assert!(job.inner.lock().killed);
assert_eq!(proc.status(), Status::Exited(TASK_RETCODE_SYSCALL_KILL));
assert_eq!(thread.state(), ThreadState::Dead);
// The job has no children.
let root_job = Job::root();
root_job.kill();
assert!(root_job.inner.lock().killed);
// The job's process have no threads.
let root_job = Job::root();
let job = Job::create_child(&root_job).expect("failed to create job");
let proc = Process::create(&root_job, "proc").expect("failed to create process");
root_job.kill();
assert!(root_job.inner.lock().killed);
assert!(job.inner.lock().killed);
assert_eq!(proc.status(), Status::Exited(TASK_RETCODE_SYSCALL_KILL));
}
}

@ -0,0 +1,108 @@
/// Security and resource policies of a job.
#[derive(Default, Copy, Clone)]
pub struct JobPolicy {
// TODO: use bitset
action: [Option<PolicyAction>; 15],
}
impl JobPolicy {
/// Get the action of a policy `condition`.
pub fn get_action(&self, condition: PolicyCondition) -> Option<PolicyAction> {
self.action[condition as usize]
}
/// Apply a basic policy.
pub fn apply(&mut self, policy: BasicPolicy) {
self.action[policy.condition as usize] = Some(policy.action);
}
/// Merge the policy with `parent`'s.
pub fn merge(&self, parent: &Self) -> Self {
let mut new = *self;
for i in 0..15 {
if parent.action[i].is_some() {
new.action[i] = parent.action[i];
}
}
new
}
}
/// Control the effect in the case of conflict between
/// the existing policies and the new policies when setting new policies.
#[derive(Debug, Copy, Clone)]
pub enum SetPolicyOptions {
/// Policy is applied for all conditions in policy or the call fails.
Absolute,
/// Policy is applied for the conditions not specifically overridden by the parent policy.
Relative,
}
/// The policy type.
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct BasicPolicy {
/// Condition when the policy is applied.
pub condition: PolicyCondition,
///
pub action: PolicyAction,
}
/// The condition when a policy is applied.
#[repr(u32)]
#[derive(Debug, Copy, Clone)]
pub enum PolicyCondition {
/// A process under this job is attempting to issue a syscall with an invalid handle.
/// In this case, `PolicyAction::Allow` and `PolicyAction::Deny` are equivalent:
/// if the syscall returns, it will always return the error ZX_ERR_BAD_HANDLE.
BadHandle = 0,
/// A process under this job is attempting to issue a syscall with a handle that does not support such operation.
WrongObject = 1,
/// A process under this job is attempting to map an address region with write-execute access.
VmarWx = 2,
/// A special condition that stands for all of the above ZX_NEW conditions
/// such as NEW_VMO, NEW_CHANNEL, NEW_EVENT, NEW_EVENTPAIR, NEW_PORT, NEW_SOCKET, NEW_FIFO,
/// And any future ZX_NEW policy.
/// This will include any new kernel objects which do not require a parent object for creation.
NewAny = 3,
/// A process under this job is attempting to create a new vm object.
NewVMO = 4,
/// A process under this job is attempting to create a new channel.
NewChannel = 5,
/// A process under this job is attempting to create a new event.
NewEvent = 6,
/// A process under this job is attempting to create a new event pair.
NewEventPair = 7,
/// A process under this job is attempting to create a new port.
NewPort = 8,
/// A process under this job is attempting to create a new socket.
NewSocket = 9,
/// A process under this job is attempting to create a new fifo.
NewFIFO = 10,
/// A process under this job is attempting to create a new timer.
NewTimer = 11,
/// A process under this job is attempting to create a new process.
NewProcess = 12,
/// A process under this job is attempting to create a new profile.
NewProfile = 13,
/// A process under this job is attempting to use zx_vmo_replace_as_executable()
/// with a ZX_HANDLE_INVALID as the second argument rather than a valid ZX_RSRC_KIND_VMEX.
AmbientMarkVMOExec = 14,
}
/// The action taken when the condition happens specified by a policy.
#[repr(u32)]
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub enum PolicyAction {
/// Allow condition.
Allow = 0,
/// Prevent condition.
Deny = 1,
/// Generate an exception via the debug port. An exception generated this
/// way acts as a breakpoint. The thread may be resumed after the exception.
AllowException = 2,
/// Just like `AllowException`, but after resuming condition is denied.
DenyException = 3,
/// Terminate the process.
Kill = 4,
}

@ -0,0 +1,24 @@
use super::*;
mod job;
mod job_policy;
mod process;
mod thread;
pub use {self::job::*, self::job_policy::*, self::process::*, self::thread::*};
/// Task (Thread, Process, or Job)
pub trait Task: Sync + Send {
/// Kill the task. The task do not terminate immediately when killed.
/// It will terminate after all its children are terminated or some cleanups are finished.
fn kill(&self);
/// Suspend the task. Currently only thread or process handles may be suspended.
fn suspend(&self);
/// Resume the task
fn resume(&self);
}
/// The return code set when a task is killed via zx_task_kill().
pub const TASK_RETCODE_SYSCALL_KILL: i64 = -1028;

@ -0,0 +1,505 @@
use {
super::{job::Job, job_policy::*, thread::*, *},
crate::{error::*, object::*, vm::*},
alloc::{sync::Arc, vec::Vec},
hashbrown::HashMap,
spin::Mutex,
};
pub struct Process {
base: KObjectBase,
job: Arc<Job>,
policy: JobPolicy,
vmar: Arc<VmAddressRegion>,
inner: Mutex<ProcessInner>,
}
impl_kobject!(Process
fn get_child(&self, id: KoID) -> ZxResult<Arc<dyn KernelObject>> {
let inner = self.inner.lock();
let thread = inner.threads.iter().find(|o| o.id() == id).ok_or(ZxError::NOT_FOUND)?;
Ok(thread.clone())
}
fn related_koid(&self) -> KoID {
self.job.id()
}
);
#[derive(Default)]
struct ProcessInner {
max_handle_id: u32,
status: Status,
handles: HashMap<HandleValue, Handle>,
threads: Vec<Arc<Thread>>,
}
/// Status of a process.
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub enum Status {
/// Initial state, no thread present in process.
Init,
/// First thread has started and is running.
Running,
/// Process has exited with the code.
Exited(i64),
}
impl Default for Status {
fn default() -> Self {
Status::Init
}
}
impl Process {
/// Create a new process in the `job`.
pub fn create(job: &Arc<Job>, name: &str) -> ZxResult<Arc<Self>> {
let proc = Arc::new(Process {
base: KObjectBase::with_name(name),
job: job.clone(),
policy: job.policy(),
vmar: VmAddressRegion::new_root(),
inner: Mutex::new(ProcessInner::default()),
});
job.add_process(proc.clone())?;
Ok(proc)
}
/// Get a handle from the process
fn get_handle(&self, handle_value: HandleValue) -> ZxResult<Handle> {
self.inner.lock().get_handle(handle_value)
}
/// 添加一个新的对象句柄
pub fn add_handle(&self, handle: Handle) -> HandleValue {
self.inner.lock().add_handle(handle)
}
/// 删除一个对象句柄
pub fn remove_handle(&self, handle_value: HandleValue) -> ZxResult<Handle> {
self.inner.lock().remove_handle(handle_value)
}
/// Add all handles to the process
pub fn add_handles(&self, handles: Vec<Handle>) -> Vec<HandleValue> {
let mut inner = self.inner.lock();
handles.into_iter().map(|h| inner.add_handle(h)).collect()
}
/// Remove all handles from the process.
pub fn remove_handles(&self, handle_values: &[HandleValue]) -> ZxResult<Vec<Handle>> {
let mut inner = self.inner.lock();
handle_values
.iter()
.map(|h| inner.remove_handle(*h))
.collect()
}
/// Get the kernel object corresponding to this `handle_value`
pub fn get_object<T: KernelObject>(&self, handle_value: HandleValue) -> ZxResult<Arc<T>> {
let handle = self.get_handle(handle_value)?;
let object = handle
.object
.downcast_arc::<T>()
.map_err(|_| ZxError::WRONG_TYPE)?;
Ok(object)
}
/// 根据句柄值查找内核对象,并检查权限
pub fn get_object_with_rights<T: KernelObject>(
&self,
handle_value: HandleValue,
desired_rights: Rights,
) -> ZxResult<Arc<T>> {
let handle = self.get_handle(handle_value)?;
// check type before rights
let object = handle
.object
.downcast_arc::<T>()
.map_err(|_| ZxError::WRONG_TYPE)?;
if !handle.rights.contains(desired_rights) {
return Err(ZxError::ACCESS_DENIED);
}
Ok(object)
}
/// Get the kernel object corresponding to this `handle_value` and this handle's rights.
pub fn get_object_and_rights<T: KernelObject>(
&self,
handle_value: HandleValue,
) -> ZxResult<(Arc<T>, Rights)> {
let handle = self.get_handle(handle_value)?;
let object = handle
.object
.downcast_arc::<T>()
.map_err(|_| ZxError::WRONG_TYPE)?;
Ok((object, handle.rights))
}
/// Remove a handle referring to a kernel object of the given type from the process.
pub fn remove_object<T: KernelObject>(&self, handle_value: HandleValue) -> ZxResult<Arc<T>> {
let handle = self.remove_handle(handle_value)?;
let object = handle
.object
.downcast_arc::<T>()
.map_err(|_| ZxError::WRONG_TYPE)?;
Ok(object)
}
pub fn start(
&self,
thread: &Arc<Thread>,
entry: usize,
stack: usize,
arg1: Option<Handle>,
arg2: usize,
thread_fn: ThreadFn,
) -> ZxResult {
let handle_value;
{
let mut inner = self.inner.lock();
if !inner.contains_thread(thread) {
return Err(ZxError::ACCESS_DENIED);
}
if inner.status != Status::Init {
return Err(ZxError::BAD_STATE);
}
inner.status = Status::Running;
handle_value = arg1.map_or(INVALID_HANDLE, |handle| inner.add_handle(handle));
}
thread.set_first_thread();
match thread.start(entry, stack, handle_value as usize, arg2, thread_fn) {
Ok(_) => Ok(()),
Err(err) => {
let mut inner = self.inner.lock();
if handle_value != INVALID_HANDLE {
inner.remove_handle(handle_value).ok();
}
Err(err)
}
}
}
/// Exit current process with `retcode`.
/// The process do not terminate immediately when exited.
/// It will terminate after all its child threads are terminated.
pub fn exit(&self, retcode: i64) {
let mut inner = self.inner.lock();
if let Status::Exited(_) = inner.status {
return;
}
inner.status = Status::Exited(retcode);
if inner.threads.is_empty() {
inner.handles.clear();
drop(inner);
self.terminate();
return;
}
for thread in inner.threads.iter() {
thread.kill();
}
inner.handles.clear();
}
/// The process finally terminates.
fn terminate(&self) {
let mut inner = self.inner.lock();
let _retcode = match inner.status {
Status::Exited(retcode) => retcode,
_ => {
inner.status = Status::Exited(0);
0
}
};
self.job.remove_process(self.base.id);
}
/// Check whether `condition` is allowed in the parent job's policy.
pub fn check_policy(&self, condition: PolicyCondition) -> ZxResult {
match self
.policy
.get_action(condition)
.unwrap_or(PolicyAction::Allow)
{
PolicyAction::Allow => Ok(()),
PolicyAction::Deny => Err(ZxError::ACCESS_DENIED),
_ => unimplemented!(),
}
}
/// Get process status.
pub fn status(&self) -> Status {
self.inner.lock().status
}
/// Get the `VmAddressRegion` of the process.
pub fn vmar(&self) -> Arc<VmAddressRegion> {
self.vmar.clone()
}
/// Get the job of the process.
pub fn job(&self) -> Arc<Job> {
self.job.clone()
}
/// Add a thread to the process.
pub(super) fn add_thread(&self, thread: Arc<Thread>) -> ZxResult {
let mut inner = self.inner.lock();
if let Status::Exited(_) = inner.status {
return Err(ZxError::BAD_STATE);
}
inner.threads.push(thread);
Ok(())
}
/// Remove a thread from the process.
///
/// If no more threads left, exit the process.
pub(super) fn remove_thread(&self, tid: KoID) {
let mut inner = self.inner.lock();
inner.threads.retain(|t| t.id() != tid);
if inner.threads.is_empty() {
drop(inner);
self.terminate();
}
}
/// Get KoIDs of Threads.
pub fn thread_ids(&self) -> Vec<KoID> {
self.inner.lock().threads.iter().map(|t| t.id()).collect()
}
/// Get information of this process.
pub fn get_info(&self) -> ProcessInfo {
let mut info = ProcessInfo {
..Default::default()
};
match self.inner.lock().status {
Status::Init => {
info.started = false;
info.has_exited = false;
}
Status::Running => {
info.started = true;
info.has_exited = false;
}
Status::Exited(ret) => {
info.return_code = ret;
info.has_exited = true;
info.started = true;
}
}
info
}
}
/// Information of a process.
#[allow(missing_docs)]
#[repr(C)]
#[derive(Default)]
pub struct ProcessInfo {
pub return_code: i64,
pub started: bool,
pub has_exited: bool,
}
impl Task for Process {
fn kill(&self) {
self.exit(TASK_RETCODE_SYSCALL_KILL);
}
fn suspend(&self) {
let inner = self.inner.lock();
for thread in inner.threads.iter() {
thread.suspend();
}
}
fn resume(&self) {
let inner = self.inner.lock();
for thread in inner.threads.iter() {
thread.resume();
}
}
}
impl ProcessInner {
/// Add a handle to the process
fn add_handle(&mut self, handle: Handle) -> HandleValue {
let key = (self.max_handle_id << 2) | 0x3u32;
self.max_handle_id += 1;
self.handles.insert(key, handle);
key
}
fn remove_handle(&mut self, handle_value: HandleValue) -> ZxResult<Handle> {
let handle = self
.handles
.remove(&handle_value)
.ok_or(ZxError::BAD_HANDLE)?;
Ok(handle)
}
fn get_handle(&mut self, handle_value: HandleValue) -> ZxResult<Handle> {
let handle = self.handles.get(&handle_value).ok_or(ZxError::BAD_HANDLE)?;
Ok(handle.clone())
}
/// Whether `thread` is in this process.
fn contains_thread(&self, thread: &Arc<Thread>) -> bool {
self.threads.iter().any(|t| Arc::ptr_eq(t, thread))
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn create() {
let root_job = Job::root();
let proc = Process::create(&root_job, "proc").expect("failed to create process");
assert_eq!(proc.related_koid(), root_job.id());
assert!(Arc::ptr_eq(&root_job, &proc.job()));
}
#[test]
fn handle() {
let root_job = Job::root();
let proc = Process::create(&root_job, "proc").expect("failed to create process");
let handle = Handle::new(proc.clone(), Rights::DEFAULT_PROCESS);
let handle_value = proc.add_handle(handle);
// getting object should success
let object: Arc<Process> = proc
.get_object_with_rights(handle_value, Rights::DEFAULT_PROCESS)
.expect("failed to get object");
assert!(Arc::ptr_eq(&object, &proc));
let (object, rights) = proc
.get_object_and_rights::<Process>(handle_value)
.expect("failed to get object");
assert!(Arc::ptr_eq(&object, &proc));
assert_eq!(rights, Rights::DEFAULT_PROCESS);
// getting object with an extra rights should fail.
assert_eq!(
proc.get_object_with_rights::<Process>(handle_value, Rights::MANAGE_JOB)
.err(),
Some(ZxError::ACCESS_DENIED)
);
// getting object with invalid type should fail.
assert_eq!(
proc.get_object_with_rights::<Job>(handle_value, Rights::DEFAULT_PROCESS)
.err(),
Some(ZxError::WRONG_TYPE)
);
proc.remove_handle(handle_value).unwrap();
// getting object with invalid handle should fail.
assert_eq!(
proc.get_object_with_rights::<Process>(handle_value, Rights::DEFAULT_PROCESS)
.err(),
Some(ZxError::BAD_HANDLE)
);
let handle1 = Handle::new(proc.clone(), Rights::DEFAULT_PROCESS);
let handle2 = Handle::new(proc.clone(), Rights::DEFAULT_PROCESS);
let handle_values = proc.add_handles(vec![handle1, handle2]);
let object1: Arc<Process> = proc
.get_object_with_rights(handle_values[0], Rights::DEFAULT_PROCESS)
.expect("failed to get object");
assert!(Arc::ptr_eq(&object1, &proc));
proc.remove_handles(&handle_values).unwrap();
assert_eq!(
proc.get_object_with_rights::<Process>(handle_values[0], Rights::DEFAULT_PROCESS)
.err(),
Some(ZxError::BAD_HANDLE)
);
}
#[test]
fn get_child() {
let root_job = Job::root();
let proc = Process::create(&root_job, "proc").expect("failed to create process");
let thread = Thread::create(&proc, "thread").expect("failed to create thread");
assert_eq!(proc.get_child(thread.id()).unwrap().id(), thread.id());
assert_eq!(proc.get_child(proc.id()).err(), Some(ZxError::NOT_FOUND));
let thread1 = Thread::create(&proc, "thread1").expect("failed to create thread");
assert_eq!(proc.thread_ids(), vec![thread.id(), thread1.id()]);
}
#[test]
fn contains_thread() {
let root_job = Job::root();
let proc = Process::create(&root_job, "proc").expect("failed to create process");
let thread = Thread::create(&proc, "thread").expect("failed to create thread");
let proc1 = Process::create(&root_job, "proc1").expect("failed to create process");
let thread1 = Thread::create(&proc1, "thread1").expect("failed to create thread");
let inner = proc.inner.lock();
assert!(inner.contains_thread(&thread) && !inner.contains_thread(&thread1));
}
#[test]
fn check_policy() {
let root_job = Job::root();
let policy1 = BasicPolicy {
condition: PolicyCondition::BadHandle,
action: PolicyAction::Allow,
};
let policy2 = BasicPolicy {
condition: PolicyCondition::NewChannel,
action: PolicyAction::Deny,
};
assert!(root_job
.set_policy_basic(SetPolicyOptions::Absolute, &[policy1, policy2])
.is_ok());
let proc = Process::create(&root_job, "proc").expect("failed to create process");
assert!(proc.check_policy(PolicyCondition::BadHandle).is_ok());
assert!(proc.check_policy(PolicyCondition::NewProcess).is_ok());
assert_eq!(
proc.check_policy(PolicyCondition::NewChannel).err(),
Some(ZxError::ACCESS_DENIED)
);
let _job = root_job.create_child().unwrap();
assert_eq!(
root_job
.set_policy_basic(SetPolicyOptions::Absolute, &[policy1, policy2])
.err(),
Some(ZxError::BAD_STATE)
);
}
#[test]
fn exit() {
let root_job = Job::root();
let proc = Process::create(&root_job, "proc").expect("failed to create process");
let thread = Thread::create(&proc, "thread").expect("failed to create thread");
let info = proc.get_info();
assert!(!info.has_exited && !info.started && info.return_code == 0);
proc.exit(666);
let info = proc.get_info();
assert!(info.has_exited && info.started && info.return_code == 666);
assert_eq!(thread.state(), ThreadState::Dying);
// TODO: when is the thread dead?
assert_eq!(
Thread::create(&proc, "thread1").err(),
Some(ZxError::BAD_STATE)
);
}
}

@ -0,0 +1,567 @@
use {
super::process::Process,
super::*,
crate::object::*,
alloc::{boxed::Box, sync::Arc},
bitflags::bitflags,
core::{
future::Future,
ops::Deref,
pin::Pin,
task::{Context, Poll, Waker},
},
spin::Mutex,
trapframe::UserContext,
};
pub use self::thread_state::*;
mod thread_state;
pub struct Thread {
base: KObjectBase,
proc: Arc<Process>,
inner: Mutex<ThreadInner>,
}
impl_kobject!(Thread
fn related_koid(&self) -> KoID {
self.proc.id()
}
);
#[derive(Default)]
struct ThreadInner {
/// Thread context
///
/// It will be taken away when running this thread.
context: Option<Box<UserContext>>,
/// The number of existing `SuspendToken`.
suspend_count: usize,
/// The waker of task when suspending.
waker: Option<Waker>,
/// Thread state
///
/// NOTE: This variable will never be `Suspended`. On suspended, the
/// `suspend_count` is non-zero, and this represents the state before suspended.
state: ThreadState,
/// Should The ProcessStarting exception generated at start of this thread
first_thread: bool,
/// Should The ThreadExiting exception do not block this thread
killed: bool,
/// The time this thread has run on cpu
time: u128,
flags: ThreadFlag,
}
impl ThreadInner {
fn state(&self) -> ThreadState {
// Dying > Exception > Suspend > Blocked
if self.suspend_count == 0
|| self.context.is_none()
|| self.state == ThreadState::BlockedException
|| self.state == ThreadState::Dying
|| self.state == ThreadState::Dead
{
self.state
} else {
ThreadState::Suspended
}
}
/// Change state and update signal.
fn change_state(&mut self, state: ThreadState) {
self.state = state;
}
}
bitflags! {
/// Thread flags.
#[derive(Default)]
pub struct ThreadFlag: usize {
/// The thread currently has a VCPU.
const VCPU = 1 << 3;
}
}
/// The type of a new thread function.
pub type ThreadFn = fn(thread: CurrentThread) -> Pin<Box<dyn Future<Output = ()> + Send + 'static>>;
impl Thread {
/// Create a new thread.
pub fn create(proc: &Arc<Process>, name: &str) -> ZxResult<Arc<Self>> {
let thread = Arc::new(Thread {
base: KObjectBase::with_name(name),
proc: proc.clone(),
inner: Mutex::new(ThreadInner {
context: Some(Box::new(UserContext::default())),
..Default::default()
}),
});
proc.add_thread(thread.clone())?;
Ok(thread)
}
/// Get the process.
pub fn proc(&self) -> &Arc<Process> {
&self.proc
}
/// Start execution on the thread.
pub fn start(
self: &Arc<Self>,
entry: usize,
stack: usize,
arg1: usize,
arg2: usize,
thread_fn: ThreadFn,
) -> ZxResult {
{
let mut inner = self.inner.lock();
let context = inner.context.as_mut().ok_or(ZxError::BAD_STATE)?;
context.general.rip = entry;
context.general.rsp = stack;
context.general.rdi = arg1;
context.general.rsi = arg2;
context.general.rflags |= 0x3202;
inner.change_state(ThreadState::Running);
}
kernel_hal::Thread::spawn(thread_fn(CurrentThread(self.clone())), 0);
Ok(())
}
/// Stop the thread. Internal implementation of `exit` and `kill`.
///
/// The thread do not terminate immediately when stopped. It is just made dying.
/// It will terminate after some cleanups (when `terminate` are called **explicitly** by upper layer).
fn stop(&self, killed: bool) {
let mut inner = self.inner.lock();
if inner.state == ThreadState::Dead {
return;
}
if killed {
inner.killed = true;
}
if inner.state == ThreadState::Dying {
return;
}
inner.change_state(ThreadState::Dying);
if let Some(waker) = inner.waker.take() {
waker.wake();
}
}
/// Read one aspect of thread state.
pub fn read_state(&self, kind: ThreadStateKind, buf: &mut [u8]) -> ZxResult<usize> {
let inner = self.inner.lock();
let state = inner.state();
if state != ThreadState::BlockedException && state != ThreadState::Suspended {
return Err(ZxError::BAD_STATE);
}
let context = inner.context.as_ref().ok_or(ZxError::BAD_STATE)?;
context.read_state(kind, buf)
}
/// Write one aspect of thread state.
pub fn write_state(&self, kind: ThreadStateKind, buf: &[u8]) -> ZxResult {
let mut inner = self.inner.lock();
let state = inner.state();
if state != ThreadState::BlockedException && state != ThreadState::Suspended {
return Err(ZxError::BAD_STATE);
}
let context = inner.context.as_mut().ok_or(ZxError::BAD_STATE)?;
context.write_state(kind, buf)
}
/// Get the thread's information.
pub fn get_thread_info(&self) -> ThreadInfo {
let inner = self.inner.lock();
ThreadInfo {
state: inner.state() as u32,
}
}
/// Get the thread state.
pub fn state(&self) -> ThreadState {
self.inner.lock().state()
}
/// Add the parameter to the time this thread has run on cpu.
pub fn time_add(&self, time: u128) {
self.inner.lock().time += time;
}
/// Get the time this thread has run on cpu.
pub fn get_time(&self) -> u64 {
self.inner.lock().time as u64
}
/// Set this thread as the first thread of a process.
pub(super) fn set_first_thread(&self) {
self.inner.lock().first_thread = true;
}
/// Whether this thread is the first thread of a process.
pub fn is_first_thread(&self) -> bool {
self.inner.lock().first_thread
}
/// Get the thread's flags.
pub fn flags(&self) -> ThreadFlag {
self.inner.lock().flags
}
/// Apply `f` to the thread's flags.
pub fn update_flags(&self, f: impl FnOnce(&mut ThreadFlag)) {
f(&mut self.inner.lock().flags)
}
/// Set the thread local fsbase register on x86_64.
pub fn set_fsbase(&self, fsbase: usize) -> ZxResult {
let mut inner = self.inner.lock();
let context = inner.context.as_mut().ok_or(ZxError::BAD_STATE)?;
context.general.fsbase = fsbase;
Ok(())
}
/// Set the thread local gsbase register on x86_64.
pub fn set_gsbase(&self, gsbase: usize) -> ZxResult {
let mut inner = self.inner.lock();
let context = inner.context.as_mut().ok_or(ZxError::BAD_STATE)?;
context.general.gsbase = gsbase;
Ok(())
}
}
impl Task for Thread {
fn kill(&self) {
self.stop(true)
}
fn suspend(&self) {
let mut inner = self.inner.lock();
inner.suspend_count += 1;
// let state = inner.state;
// inner.change_state(state);
}
fn resume(&self) {
let mut inner = self.inner.lock();
assert_ne!(inner.suspend_count, 0);
inner.suspend_count -= 1;
if inner.suspend_count == 0 {
// let state = inner.state;
// inner.change_state(state);
if let Some(waker) = inner.waker.take() {
waker.wake();
}
}
}
}
/// A handle to current thread.
///
/// This is a wrapper of [`Thread`] that provides additional methods for the thread runner.
/// It can only be obtained from the argument of `thread_fn` in a new thread started by [`Thread::start`].
///
/// It will terminate current thread on drop.
///
/// [`Thread`]: crate::task::Thread
/// [`Thread::start`]: crate::task::Thread::start
pub struct CurrentThread(pub(super) Arc<Thread>);
impl Deref for CurrentThread {
type Target = Arc<Thread>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl Drop for CurrentThread {
/// Terminate the current running thread.
fn drop(&mut self) {
let mut inner = self.inner.lock();
inner.change_state(ThreadState::Dead);
self.proc().remove_thread(self.base.id);
}
}
impl CurrentThread {
/// Exit the current thread.
///
/// The thread do not terminate immediately when exited. It is just made dying.
/// It will terminate after some cleanups on this struct drop.
pub fn exit(&self) {
self.stop(false);
}
/// Wait until the thread is ready to run (not suspended),
/// and then take away its context to run the thread.
pub fn wait_for_run(&self) -> impl Future<Output = Box<UserContext>> {
#[must_use = "wait_for_run does nothing unless polled/`await`-ed"]
struct RunnableChecker {
thread: Arc<Thread>,
}
impl Future for RunnableChecker {
type Output = Box<UserContext>;
fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
let mut inner = self.thread.inner.lock();
if inner.state() != ThreadState::Suspended {
// resume: return the context token from thread object
// There is no need to call change_state here
// since take away the context of a non-suspended thread won't change it's state
Poll::Ready(inner.context.take().unwrap())
} else {
// suspend: put waker into the thread object
inner.waker = Some(cx.waker().clone());
Poll::Pending
}
}
}
RunnableChecker {
thread: self.0.clone(),
}
}
/// The thread ends running and takes back the context.
pub fn end_running(&self, context: Box<UserContext>) {
let mut inner = self.inner.lock();
inner.context = Some(context);
// let state = inner.state;
// inner.change_state(state);
}
/// Access saved context of current thread.
///
/// Will panic if the context is not availiable.
pub fn with_context<T, F>(&self, f: F) -> T
where
F: FnOnce(&mut UserContext) -> T,
{
let mut inner = self.inner.lock();
let mut cx = inner.context.as_mut().unwrap();
f(&mut cx)
}
}
/// The thread state.
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
pub enum ThreadState {
/// The thread has been created but it has not started running yet.
New = 0,
/// The thread is running user code normally.
Running = 1,
/// Stopped due to `zx_task_suspend()`.
Suspended = 2,
/// In a syscall or handling an exception.
Blocked = 3,
/// The thread is in the process of being terminated, but it has not been stopped yet.
Dying = 4,
/// The thread has stopped running.
Dead = 5,
/// The thread is stopped in an exception.
BlockedException = 0x103,
/// The thread is stopped in `zx_nanosleep()`.
BlockedSleeping = 0x203,
/// The thread is stopped in `zx_futex_wait()`.
BlockedFutex = 0x303,
/// The thread is stopped in `zx_port_wait()`.
BlockedPort = 0x403,
/// The thread is stopped in `zx_channel_call()`.
BlockedChannel = 0x503,
/// The thread is stopped in `zx_object_wait_one()`.
BlockedWaitOne = 0x603,
/// The thread is stopped in `zx_object_wait_many()`.
BlockedWaitMany = 0x703,
/// The thread is stopped in `zx_interrupt_wait()`.
BlockedInterrupt = 0x803,
/// Pager.
BlockedPager = 0x903,
}
impl Default for ThreadState {
fn default() -> Self {
ThreadState::New
}
}
/// The thread information.
#[repr(C)]
pub struct ThreadInfo {
state: u32,
}
#[cfg(test)]
mod tests {
use super::job::Job;
use super::*;
use core::time::Duration;
use kernel_hal::timer_now;
use kernel_hal::GeneralRegs;
#[test]
fn create() {
let root_job = Job::root();
let proc = Process::create(&root_job, "proc").expect("failed to create process");
let thread = Thread::create(&proc, "thread").expect("failed to create thread");
assert_eq!(thread.flags(), ThreadFlag::empty());
assert_eq!(thread.related_koid(), proc.id());
let child = proc.get_child(thread.id()).unwrap().downcast_arc().unwrap();
assert!(Arc::ptr_eq(&child, &thread));
}
#[async_std::test]
async fn start() {
kernel_hal_unix::init();
let root_job = Job::root();
let proc = Process::create(&root_job, "proc").expect("failed to create process");
let thread = Thread::create(&proc, "thread").expect("failed to create thread");
let thread1 = Thread::create(&proc, "thread1").expect("failed to create thread");
// function for new thread
async fn new_thread(thread: CurrentThread) {
let cx = thread.wait_for_run().await;
assert_eq!(cx.general.rip, 1);
assert_eq!(cx.general.rsp, 4);
assert_eq!(cx.general.rdi, 3);
assert_eq!(cx.general.rsi, 2);
async_std::task::sleep(Duration::from_millis(10)).await;
thread.end_running(cx);
}
// start a new thread
let handle = Handle::new(proc.clone(), Rights::DEFAULT_PROCESS);
proc.start(&thread, 1, 4, Some(handle.clone()), 2, |thread| {
Box::pin(new_thread(thread))
})
.expect("failed to start thread");
// check info and state
let info = proc.get_info();
assert!(info.started && !info.has_exited && info.return_code == 0);
assert_eq!(proc.status(), Status::Running);
assert_eq!(thread.state(), ThreadState::Running);
// start again should fail
assert_eq!(
proc.start(&thread, 1, 4, Some(handle.clone()), 2, |thread| Box::pin(
new_thread(thread)
)),
Err(ZxError::BAD_STATE)
);
// start another thread should fail
assert_eq!(
proc.start(&thread1, 1, 4, Some(handle.clone()), 2, |thread| Box::pin(
new_thread(thread)
)),
Err(ZxError::BAD_STATE)
);
// wait 100ms for the new thread to exit
async_std::task::sleep(core::time::Duration::from_millis(100)).await;
// no other references to `Thread`
assert_eq!(Arc::strong_count(&thread), 1);
assert_eq!(thread.state(), ThreadState::Dead);
}
#[test]
fn info() {
let root_job = Job::root();
let proc = Process::create(&root_job, "proc").expect("failed to create process");
let thread = Thread::create(&proc, "thread").expect("failed to create thread");
let info = thread.get_thread_info();
assert!(info.state == thread.state() as u32);
}
#[test]
fn read_write_state() {
let root_job = Job::root();
let proc = Process::create(&root_job, "proc").expect("failed to create process");
let thread = Thread::create(&proc, "thread").expect("failed to create thread");
const SIZE: usize = core::mem::size_of::<GeneralRegs>();
let mut buf = [0; 10];
assert_eq!(
thread.read_state(ThreadStateKind::General, &mut buf).err(),
Some(ZxError::BAD_STATE)
);
assert_eq!(
thread.write_state(ThreadStateKind::General, &buf).err(),
Some(ZxError::BAD_STATE)
);
thread.suspend();
assert_eq!(
thread.read_state(ThreadStateKind::General, &mut buf).err(),
Some(ZxError::BUFFER_TOO_SMALL)
);
assert_eq!(
thread.write_state(ThreadStateKind::General, &buf).err(),
Some(ZxError::BUFFER_TOO_SMALL)
);
let mut buf = [0; SIZE];
assert!(thread
.read_state(ThreadStateKind::General, &mut buf)
.is_ok());
assert!(thread.write_state(ThreadStateKind::General, &buf).is_ok());
// TODO
}
#[async_std::test]
async fn wait_for_run() {
let root_job = Job::root();
let proc = Process::create(&root_job, "proc").expect("failed to create process");
let thread = Thread::create(&proc, "thread").expect("failed to create thread");
assert_eq!(thread.state(), ThreadState::New);
thread
.start(0, 0, 0, 0, |thread| Box::pin(new_thread(thread)))
.unwrap();
async fn new_thread(thread: CurrentThread) {
assert_eq!(thread.state(), ThreadState::Running);
// without suspend
let context = thread.wait_for_run().await;
thread.end_running(context);
// with suspend
thread.suspend();
thread.suspend();
assert_eq!(thread.state(), ThreadState::Suspended);
async_std::task::spawn({
let thread = (*thread).clone();
async move {
async_std::task::sleep(Duration::from_millis(10)).await;
thread.resume();
async_std::task::sleep(Duration::from_millis(10)).await;
thread.resume();
}
});
let time = timer_now();
let _context = thread.wait_for_run().await;
assert!(timer_now() - time >= Duration::from_millis(20));
}
// FIX ME
// let thread: Arc<dyn KernelObject> = thread;
// thread.wait_signal(Signal::THREAD_TERMINATED).await;
}
#[test]
fn time() {
let root_job = Job::root();
let proc = Process::create(&root_job, "proc").expect("failed to create process");
let thread = Thread::create(&proc, "thread").expect("failed to create thread");
assert_eq!(thread.get_time(), 0);
thread.time_add(10);
assert_eq!(thread.get_time(), 10);
}
}

@ -0,0 +1,64 @@
use crate::{ZxError, ZxResult};
use kernel_hal::UserContext;
use numeric_enum_macro::numeric_enum;
numeric_enum! {
#[repr(u32)]
/// Possible values for "kind" in zx_thread_read_state and zx_thread_write_state.
#[allow(missing_docs)]
#[derive(Debug, Copy, Clone)]
pub enum ThreadStateKind {
General = 0,
FS = 6,
GS = 7,
}
}
pub(super) trait ContextExt {
fn read_state(&self, kind: ThreadStateKind, buf: &mut [u8]) -> ZxResult<usize>;
fn write_state(&mut self, kind: ThreadStateKind, buf: &[u8]) -> ZxResult;
}
impl ContextExt for UserContext {
fn read_state(&self, kind: ThreadStateKind, buf: &mut [u8]) -> ZxResult<usize> {
match kind {
ThreadStateKind::General => buf.write_struct(&self.general),
ThreadStateKind::FS => buf.write_struct(&self.general.fsbase),
ThreadStateKind::GS => buf.write_struct(&self.general.gsbase),
}
}
fn write_state(&mut self, kind: ThreadStateKind, buf: &[u8]) -> ZxResult {
match kind {
ThreadStateKind::General => self.general = buf.read_struct()?,
ThreadStateKind::FS => self.general.fsbase = buf.read_struct()?,
ThreadStateKind::GS => self.general.gsbase = buf.read_struct()?,
}
Ok(())
}
}
trait BufExt {
fn read_struct<T>(&self) -> ZxResult<T>;
fn write_struct<T: Copy>(&mut self, value: &T) -> ZxResult<usize>;
}
#[allow(unsafe_code)]
impl BufExt for [u8] {
fn read_struct<T>(&self) -> ZxResult<T> {
if self.len() < core::mem::size_of::<T>() {
return Err(ZxError::BUFFER_TOO_SMALL);
}
Ok(unsafe { (self.as_ptr() as *const T).read() })
}
fn write_struct<T: Copy>(&mut self, value: &T) -> ZxResult<usize> {
if self.len() < core::mem::size_of::<T>() {
return Err(ZxError::BUFFER_TOO_SMALL);
}
unsafe {
*(self.as_mut_ptr() as *mut T) = *value;
}
Ok(core::mem::size_of::<T>())
}
}

@ -0,0 +1,102 @@
#![allow(dead_code)]
/// Given a range and iterate sub-range for each block
pub struct BlockIter {
pub begin: usize,
pub end: usize,
pub block_size_log2: u8,
}
#[derive(Debug, Eq, PartialEq)]
pub struct BlockRange {
pub block: usize,
pub begin: usize,
pub end: usize,
pub block_size_log2: u8,
}
impl BlockRange {
pub fn len(&self) -> usize {
self.end - self.begin
}
pub fn is_full(&self) -> bool {
self.len() == (1usize << self.block_size_log2)
}
pub fn is_empty(&self) -> bool {
self.len() == 0
}
pub fn origin_begin(&self) -> usize {
(self.block << self.block_size_log2) + self.begin
}
pub fn origin_end(&self) -> usize {
(self.block << self.block_size_log2) + self.end
}
}
impl Iterator for BlockIter {
type Item = BlockRange;
fn next(&mut self) -> Option<<Self as Iterator>::Item> {
if self.begin >= self.end {
return None;
}
let block_size_log2 = self.block_size_log2;
let block_size = 1usize << self.block_size_log2;
let block = self.begin / block_size;
let begin = self.begin % block_size;
let end = if block == self.end / block_size {
self.end % block_size
} else {
block_size
};
self.begin += end - begin;
Some(BlockRange {
block,
begin,
end,
block_size_log2,
})
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn block_iter() {
let mut iter = BlockIter {
begin: 0x123,
end: 0x2018,
block_size_log2: 12,
};
assert_eq!(
iter.next(),
Some(BlockRange {
block: 0,
begin: 0x123,
end: 0x1000,
block_size_log2: 12
})
);
assert_eq!(
iter.next(),
Some(BlockRange {
block: 1,
begin: 0,
end: 0x1000,
block_size_log2: 12
})
);
assert_eq!(
iter.next(),
Some(BlockRange {
block: 2,
begin: 0,
end: 0x18,
block_size_log2: 12
})
);
assert_eq!(iter.next(), None);
}
}

@ -0,0 +1,52 @@
//! Objects for Virtual Memory Management.
mod vmar;
mod vmo;
pub use self::{vmar::*, vmo::*};
/// Physical Address
pub type PhysAddr = usize;
/// Virtual Address
pub type VirtAddr = usize;
/// Device Address
pub type DevVAddr = usize;
/// Size of a page
pub const PAGE_SIZE: usize = 0x1000;
/// log2(PAGE_SIZE)
pub const PAGE_SIZE_LOG2: usize = 12;
/// Check whether `x` is a multiple of `PAGE_SIZE`.
pub fn page_aligned(x: usize) -> bool {
check_aligned(x, PAGE_SIZE)
}
/// Check whether `x` is a multiple of `align`.
pub fn check_aligned(x: usize, align: usize) -> bool {
x % align == 0
}
/// How many pages the `size` needs.
/// To avoid overflow and pass more unit tests, use wrapping add
pub fn pages(size: usize) -> usize {
ceil(size, PAGE_SIZE)
}
/// How many `align` the `x` needs.
pub fn ceil(x: usize, align: usize) -> usize {
x.wrapping_add(align - 1) / align
}
/// Round up `size` to a multiple of `PAGE_SIZE`.
pub fn roundup_pages(size: usize) -> usize {
pages(size) * PAGE_SIZE
}
/// Round down `size` to a multiple of `PAGE_SIZE`.
pub fn round_down_pages(size: usize) -> usize {
size / PAGE_SIZE * PAGE_SIZE
}

@ -0,0 +1,849 @@
use {
super::*,
crate::object::*,
alloc::sync::Arc,
alloc::vec,
alloc::vec::Vec,
bitflags::bitflags,
kernel_hal::{MMUFlags, PageTableTrait},
spin::Mutex,
};
bitflags! {
/// Creation flags for VmAddressRegion.
pub struct VmarFlags: u32 {
#[allow(clippy::identity_op)]
/// When randomly allocating subregions, reduce sprawl by placing allocations
/// near each other.
const COMPACT = 1 << 0;
/// Request that the new region be at the specified offset in its parent region.
const SPECIFIC = 1 << 1;
/// Like SPECIFIC, but permits overwriting existing mappings. This
/// flag will not overwrite through a subregion.
const SPECIFIC_OVERWRITE = 1 << 2;
/// Allow VmMappings to be created inside the new region with the SPECIFIC or
/// OFFSET_IS_UPPER_LIMIT flag.
const CAN_MAP_SPECIFIC = 1 << 3;
/// Allow VmMappings to be created inside the region with read permissions.
const CAN_MAP_READ = 1 << 4;
/// Allow VmMappings to be created inside the region with write permissions.
const CAN_MAP_WRITE = 1 << 5;
/// Allow VmMappings to be created inside the region with execute permissions.
const CAN_MAP_EXECUTE = 1 << 6;
/// Require that VMO backing the mapping is non-resizable.
const REQUIRE_NON_RESIZABLE = 1 << 7;
/// Treat the offset as an upper limit when allocating a VMO or child VMAR.
const ALLOW_FAULTS = 1 << 8;
/// Allow VmMappings to be created inside the region with read, write and execute permissions.
const CAN_MAP_RXW = Self::CAN_MAP_READ.bits | Self::CAN_MAP_EXECUTE.bits | Self::CAN_MAP_WRITE.bits;
/// Creation flags for root VmAddressRegion
const ROOT_FLAGS = Self::CAN_MAP_RXW.bits | Self::CAN_MAP_SPECIFIC.bits;
}
}
/// Virtual Memory Address Regions
pub struct VmAddressRegion {
flags: VmarFlags,
base: KObjectBase,
addr: VirtAddr,
size: usize,
parent: Option<Arc<VmAddressRegion>>,
page_table: Arc<Mutex<dyn PageTableTrait>>,
/// If inner is None, this region is destroyed, all operations are invalid.
inner: Mutex<Option<VmarInner>>,
}
impl_kobject!(VmAddressRegion);
/// The mutable part of `VmAddressRegion`.
#[derive(Default)]
struct VmarInner {
children: Vec<Arc<VmAddressRegion>>,
mappings: Vec<Arc<VmMapping>>,
}
impl VmAddressRegion {
/// Create a new root VMAR.
pub fn new_root() -> Arc<Self> {
let (addr, size) = {
use core::sync::atomic::*;
static VMAR_ID: AtomicUsize = AtomicUsize::new(0);
let i = VMAR_ID.fetch_add(1, Ordering::SeqCst);
(0x2_0000_0000 + 0x100_0000_0000 * i, 0x100_0000_0000)
};
Arc::new(VmAddressRegion {
flags: VmarFlags::ROOT_FLAGS,
base: KObjectBase::new(),
addr,
size,
parent: None,
page_table: Arc::new(Mutex::new(kernel_hal::PageTable::new())), //hal PageTable
inner: Mutex::new(Some(VmarInner::default())),
})
}
/// Create a kernel root VMAR.
pub fn new_kernel() -> Arc<Self> {
let kernel_vmar_base = KERNEL_ASPACE_BASE as usize;
let kernel_vmar_size = KERNEL_ASPACE_SIZE as usize;
Arc::new(VmAddressRegion {
flags: VmarFlags::ROOT_FLAGS,
base: KObjectBase::new(),
addr: kernel_vmar_base,
size: kernel_vmar_size,
parent: None,
page_table: Arc::new(Mutex::new(kernel_hal::PageTable::new())),
inner: Mutex::new(Some(VmarInner::default())),
})
}
/// Create a child VMAR at the `offset`.
pub fn allocate_at(
self: &Arc<Self>,
offset: usize,
len: usize,
flags: VmarFlags,
align: usize,
) -> ZxResult<Arc<Self>> {
self.allocate(Some(offset), len, flags, align)
}
/// Create a child VMAR with optional `offset`.
pub fn allocate(
self: &Arc<Self>,
offset: Option<usize>,
len: usize,
flags: VmarFlags,
align: usize,
) -> ZxResult<Arc<Self>> {
let mut guard = self.inner.lock();
let inner = guard.as_mut().ok_or(ZxError::BAD_STATE)?;
let offset = self.determine_offset(inner, offset, len, align)?;
let child = Arc::new(VmAddressRegion {
flags,
base: KObjectBase::new(),
addr: self.addr + offset,
size: len,
parent: Some(self.clone()),
page_table: self.page_table.clone(),
inner: Mutex::new(Some(VmarInner::default())),
});
inner.children.push(child.clone());
Ok(child)
}
/// Map the `vmo` into this VMAR at given `offset`.
pub fn map_at(
&self,
vmar_offset: usize,
vmo: Arc<VmObject>,
vmo_offset: usize,
len: usize,
flags: MMUFlags,
) -> ZxResult<VirtAddr> {
self.map(Some(vmar_offset), vmo, vmo_offset, len, flags)
}
/// Map the `vmo` into this VMAR.
pub fn map(
&self,
vmar_offset: Option<usize>,
vmo: Arc<VmObject>,
vmo_offset: usize,
len: usize,
flags: MMUFlags,
) -> ZxResult<VirtAddr> {
self.map_ext(
vmar_offset,
vmo,
vmo_offset,
len,
MMUFlags::RXW,
flags,
false,
true,
)
}
/// Map the `vmo` into this VMAR.
#[allow(clippy::too_many_arguments)]
pub fn map_ext(
&self,
vmar_offset: Option<usize>,
vmo: Arc<VmObject>,
vmo_offset: usize,
len: usize,
permissions: MMUFlags,
flags: MMUFlags,
overwrite: bool,
map_range: bool,
) -> ZxResult<VirtAddr> {
if !page_aligned(vmo_offset) || !page_aligned(len) || vmo_offset.overflowing_add(len).1 {
return Err(ZxError::INVALID_ARGS);
}
if !permissions.contains(flags & MMUFlags::RXW) {
return Err(ZxError::ACCESS_DENIED);
}
if vmo_offset > vmo.len() || len > vmo.len() - vmo_offset {
return Err(ZxError::INVALID_ARGS);
}
// Simplify: overwrite == false && map_range == true
if overwrite || !map_range {
warn!("Simplify: overwrite == false && map_range == true");
return Err(ZxError::INVALID_ARGS);
}
let mut guard = self.inner.lock();
let inner = guard.as_mut().ok_or(ZxError::BAD_STATE)?;
let offset = self.determine_offset(inner, vmar_offset, len, PAGE_SIZE)?;
let addr = self.addr + offset;
let flags = flags | MMUFlags::from_bits_truncate(vmo.cache_policy() as u32 as usize);
// align = 1K? 2K? 4K? 8K? ...
if !self.test_map(inner, offset, len, PAGE_SIZE) {
return Err(ZxError::NO_MEMORY);
}
let mapping = VmMapping::new(
addr,
len,
vmo,
vmo_offset,
permissions,
flags,
self.page_table.clone(),
);
mapping.map()?;
inner.mappings.push(mapping);
Ok(addr)
}
/// Unmaps all VMO mappings and destroys all sub-regions within the absolute range
/// including `addr` and ending before exclusively at `addr + len`.
/// Any sub-region that is in the range must be fully in the range
/// (i.e. partial overlaps are an error).
/// NOT SUPPORT:
/// If a mapping is only partially in the range, the mapping is split and the requested
/// portion is unmapped.
pub fn unmap(&self, addr: VirtAddr, len: usize) -> ZxResult {
if !page_aligned(addr) || !page_aligned(len) || len == 0 {
return Err(ZxError::INVALID_ARGS);
}
let mut guard = self.inner.lock();
let inner = guard.as_mut().ok_or(ZxError::BAD_STATE)?;
let begin = addr;
let end = addr + len;
// check partial overlapped sub-regions
if inner
.children
.iter()
.any(|vmar| vmar.partial_overlap(begin, end))
{
return Err(ZxError::INVALID_ARGS);
}
if inner
.mappings
.iter()
.any(|map| map.partial_overlap(begin, end))
{
warn!("Simplify: Not support partial unmap.");
return Err(ZxError::INVALID_ARGS);
}
inner.mappings.drain_filter(|map| map.within(begin, end));
for vmar in inner.children.drain_filter(|vmar| vmar.within(begin, end)) {
vmar.destroy_internal()?;
}
Ok(())
}
/// Change protections on a subset of the region of memory in the containing
/// address space. If the requested range overlaps with a subregion,
/// protect() will fail.
pub fn protect(&self, addr: usize, len: usize, flags: MMUFlags) -> ZxResult {
if !page_aligned(addr) || !page_aligned(len) {
return Err(ZxError::INVALID_ARGS);
}
let mut guard = self.inner.lock();
let inner = guard.as_mut().ok_or(ZxError::BAD_STATE)?;
let end_addr = addr + len;
// check if there are overlapping subregion
if inner
.children
.iter()
.any(|child| child.overlap(addr, end_addr))
{
return Err(ZxError::INVALID_ARGS);
}
let length = inner.mappings.iter().fold(0, |acc, map| {
acc + end_addr
.min(map.end_addr())
.checked_sub(addr.max(map.addr()))
.unwrap_or(0)
});
if length != len {
return Err(ZxError::NOT_FOUND);
}
// check if protect flags is valid
if inner
.mappings
.iter()
.filter(|map| map.overlap(addr, end_addr)) // get mappings in range: [addr, end_addr]
.any(|map| !map.is_valid_mapping_flags(flags))
{
return Err(ZxError::ACCESS_DENIED);
}
inner
.mappings
.iter()
.filter(|map| map.overlap(addr, end_addr))
.for_each(|map| {
let start_index = pages(addr.max(map.addr()) - map.addr());
let end_index = pages(end_addr.min(map.end_addr()) - map.addr());
map.protect(flags, start_index, end_index);
});
Ok(())
}
/// Unmap all mappings and destroy all sub-regions of VMAR.
pub fn clear(&self) -> ZxResult {
let mut guard = self.inner.lock();
let inner = guard.as_mut().ok_or(ZxError::BAD_STATE)?;
for vmar in inner.children.drain(..) {
vmar.destroy_internal()?;
}
inner.mappings.clear();
Ok(())
}
/// Destroy but do not remove self from parent.
fn destroy_internal(&self) -> ZxResult {
let mut guard = self.inner.lock();
let inner = guard.as_mut().ok_or(ZxError::BAD_STATE)?;
for vmar in inner.children.drain(..) {
vmar.destroy_internal()?;
}
inner.mappings.clear();
*guard = None;
Ok(())
}
/// Unmap all mappings within the VMAR, and destroy all sub-regions of the region.
pub fn destroy(self: &Arc<Self>) -> ZxResult {
self.destroy_internal()?;
// remove from parent
if let Some(parent) = &self.parent {
let mut guard = parent.inner.lock();
let inner = guard.as_mut().ok_or(ZxError::BAD_STATE)?;
inner.children.retain(|vmar| !Arc::ptr_eq(self, vmar));
}
Ok(())
}
/// Get physical address of the underlying page table.
pub fn table_phys(&self) -> PhysAddr {
self.page_table.lock().table_phys()
}
/// Get start address of this VMAR.
pub fn addr(&self) -> usize {
self.addr
}
/// Whether this VMAR is dead.
pub fn is_dead(&self) -> bool {
self.inner.lock().is_none()
}
/// Whether this VMAR is alive.
pub fn is_alive(&self) -> bool {
!self.is_dead()
}
/// Determine final address with given input `offset` and `len`.
fn determine_offset(
&self,
inner: &VmarInner,
offset: Option<usize>,
len: usize,
align: usize,
) -> ZxResult<VirtAddr> {
if !check_aligned(len, align) {
Err(ZxError::INVALID_ARGS)
} else if let Some(offset) = offset {
if check_aligned(offset, align) && self.test_map(inner, offset, len, align) {
Ok(offset)
} else {
Err(ZxError::INVALID_ARGS)
}
} else if len > self.size {
Err(ZxError::INVALID_ARGS)
} else {
match self.find_free_area(inner, 0, len, align) {
Some(offset) => Ok(offset),
None => Err(ZxError::NO_MEMORY),
}
}
}
/// Test if can create a new mapping at `offset` with `len`.
fn test_map(&self, inner: &VmarInner, offset: usize, len: usize, align: usize) -> bool {
debug_assert!(check_aligned(offset, align));
debug_assert!(check_aligned(len, align));
let begin = self.addr + offset;
let end = begin + len;
if end > self.addr + self.size {
return false;
}
// brute force
if inner.children.iter().any(|vmar| vmar.overlap(begin, end)) {
return false;
}
if inner.mappings.iter().any(|map| map.overlap(begin, end)) {
return false;
}
true
}
/// Find a free area with `len`.
fn find_free_area(
&self,
inner: &VmarInner,
offset_hint: usize,
len: usize,
align: usize,
) -> Option<usize> {
// TODO: randomize
debug_assert!(check_aligned(offset_hint, align));
debug_assert!(check_aligned(len, align));
// brute force:
// try each area's end address as the start
core::iter::once(offset_hint)
.chain(inner.children.iter().map(|map| map.end_addr() - self.addr))
.chain(inner.mappings.iter().map(|map| map.end_addr() - self.addr))
.find(|&offset| self.test_map(inner, offset, len, align))
}
fn end_addr(&self) -> VirtAddr {
self.addr + self.size
}
fn overlap(&self, begin: VirtAddr, end: VirtAddr) -> bool {
!(self.addr >= end || self.end_addr() <= begin)
}
fn within(&self, begin: VirtAddr, end: VirtAddr) -> bool {
begin <= self.addr && self.end_addr() <= end
}
fn partial_overlap(&self, begin: VirtAddr, end: VirtAddr) -> bool {
self.overlap(begin, end) && !self.within(begin, end)
}
fn contains(&self, vaddr: VirtAddr) -> bool {
self.addr <= vaddr && vaddr < self.end_addr()
}
/// Get information of this VmAddressRegion
pub fn get_info(&self) -> VmarInfo {
// pub fn get_info(&self, va: usize) -> VmarInfo {
// let _r = self.page_table.lock().query(va);
VmarInfo {
base: self.addr(),
len: self.size,
}
}
/// Get VmarFlags of this VMAR.
pub fn get_flags(&self) -> VmarFlags {
self.flags
}
#[cfg(test)]
fn count(&self) -> usize {
let mut guard = self.inner.lock();
let inner = guard.as_mut().unwrap();
println!("m = {}, c = {}", inner.mappings.len(), inner.children.len());
inner.mappings.len() + inner.children.len()
}
#[cfg(test)]
fn used_size(&self) -> usize {
let mut guard = self.inner.lock();
let inner = guard.as_mut().unwrap();
let map_size: usize = inner.mappings.iter().map(|map| map.size()).sum();
let vmar_size: usize = inner.children.iter().map(|vmar| vmar.size).sum();
println!("size = {:#x?}", map_size + vmar_size);
map_size + vmar_size
}
}
/// Information of a VmAddressRegion.
#[repr(C)]
#[derive(Debug)]
pub struct VmarInfo {
base: usize,
len: usize,
}
/// Virtual Memory Mapping
pub struct VmMapping {
/// The permission limitation of the vmar
permissions: MMUFlags,
vmo: Arc<VmObject>,
page_table: Arc<Mutex<dyn PageTableTrait>>,
inner: Mutex<VmMappingInner>,
}
#[derive(Debug, Clone)]
struct VmMappingInner {
/// The actual flags used in the mapping of each page
flags: Vec<MMUFlags>,
addr: VirtAddr,
size: usize,
vmo_offset: usize,
}
impl core::fmt::Debug for VmMapping {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
let inner = self.inner.lock();
f.debug_struct("VmMapping")
.field("addr", &inner.addr)
.field("size", &inner.size)
.field("permissions", &self.permissions)
.field("flags", &inner.flags)
.field("vmo_id", &self.vmo.id())
.field("vmo_offset", &inner.vmo_offset)
.finish()
}
}
impl VmMapping {
fn new(
addr: VirtAddr,
size: usize,
vmo: Arc<VmObject>,
vmo_offset: usize,
permissions: MMUFlags,
flags: MMUFlags,
page_table: Arc<Mutex<dyn PageTableTrait>>,
) -> Arc<Self> {
let mapping = Arc::new(VmMapping {
inner: Mutex::new(VmMappingInner {
flags: vec![flags; pages(size)],
addr,
size,
vmo_offset,
}),
permissions,
page_table,
vmo: vmo.clone(),
});
vmo.append_mapping(Arc::downgrade(&mapping));
mapping
}
/// Map range and commit.
/// Commit pages to vmo, and map those to frames in page_table.
/// Temporarily used for development. A standard procedure for
/// vmo is: create_vmo, op_range(commit), map
fn map(self: &Arc<Self>) -> ZxResult {
self.vmo.commit_pages_with(&mut |commit| {
let inner = self.inner.lock();
let mut page_table = self.page_table.lock();
let page_num = inner.size / PAGE_SIZE;
let vmo_offset = inner.vmo_offset / PAGE_SIZE;
for i in 0..page_num {
let paddr = commit(vmo_offset + i, inner.flags[i])?;
//通过 PageTableTrait 的 hal_pt_map 进行页表映射
page_table
.map(inner.addr + i * PAGE_SIZE, paddr, inner.flags[i])
.expect("failed to map");
}
Ok(())
})
}
fn unmap(&self) {
let inner = self.inner.lock();
let pages = inner.size / PAGE_SIZE;
// TODO inner.vmo_offset unused?
self.page_table
.lock()
.unmap_cont(inner.addr, pages)
.expect("failed to unmap")
}
fn overlap(&self, begin: VirtAddr, end: VirtAddr) -> bool {
let inner = self.inner.lock();
!(inner.addr >= end || inner.end_addr() <= begin)
}
fn within(&self, begin: VirtAddr, end: VirtAddr) -> bool {
let inner = self.inner.lock();
begin <= inner.addr && inner.end_addr() <= end
}
fn partial_overlap(&self, begin: VirtAddr, end: VirtAddr) -> bool {
self.overlap(begin, end) && !self.within(begin, end)
}
fn contains(&self, vaddr: VirtAddr) -> bool {
let inner = self.inner.lock();
inner.addr <= vaddr && vaddr < inner.end_addr()
}
fn is_valid_mapping_flags(&self, flags: MMUFlags) -> bool {
self.permissions.contains(flags & MMUFlags::RXW)
}
fn protect(&self, flags: MMUFlags, start_index: usize, end_index: usize) {
let mut inner = self.inner.lock();
let mut pg_table = self.page_table.lock();
for i in start_index..end_index {
inner.flags[i] = (inner.flags[i] & !MMUFlags::RXW) | (flags & MMUFlags::RXW);
pg_table
.protect(inner.addr + i * PAGE_SIZE, inner.flags[i])
.unwrap();
}
}
fn size(&self) -> usize {
self.inner.lock().size
}
fn addr(&self) -> VirtAddr {
self.inner.lock().addr
}
fn end_addr(&self) -> VirtAddr {
self.inner.lock().end_addr()
}
/// Get MMUFlags of this VmMapping.
pub fn get_flags(&self, vaddr: usize) -> ZxResult<MMUFlags> {
if self.contains(vaddr) {
let page_id = (vaddr - self.addr()) / PAGE_SIZE;
Ok(self.inner.lock().flags[page_id])
} else {
Err(ZxError::NO_MEMORY)
}
}
}
impl VmMappingInner {
fn end_addr(&self) -> VirtAddr {
self.addr + self.size
}
}
impl Drop for VmMapping {
fn drop(&mut self) {
self.unmap();
}
}
/// The base of kernel address space
/// In x86 fuchsia this is 0xffff_ff80_0000_0000 instead
pub const KERNEL_ASPACE_BASE: u64 = 0xffff_ff02_0000_0000;
/// The size of kernel address space
pub const KERNEL_ASPACE_SIZE: u64 = 0x0000_0080_0000_0000;
/// The base of user address space
pub const USER_ASPACE_BASE: u64 = 0;
// pub const USER_ASPACE_BASE: u64 = 0x0000_0000_0100_0000;
/// The size of user address space
pub const USER_ASPACE_SIZE: u64 = (1u64 << 47) - 4096 - USER_ASPACE_BASE;
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn create_child() {
let root_vmar = VmAddressRegion::new_root();
let child = root_vmar
.allocate_at(0, 0x2000, VmarFlags::CAN_MAP_RXW, PAGE_SIZE)
.expect("failed to create child VMAR");
// test invalid argument
assert_eq!(
root_vmar
.allocate_at(0x2001, 0x1000, VmarFlags::CAN_MAP_RXW, PAGE_SIZE)
.err(),
Some(ZxError::INVALID_ARGS)
);
assert_eq!(
root_vmar
.allocate_at(0x2000, 1, VmarFlags::CAN_MAP_RXW, PAGE_SIZE)
.err(),
Some(ZxError::INVALID_ARGS)
);
assert_eq!(
root_vmar
.allocate_at(0, 0x1000, VmarFlags::CAN_MAP_RXW, PAGE_SIZE)
.err(),
Some(ZxError::INVALID_ARGS)
);
assert_eq!(
child
.allocate_at(0x1000, 0x2000, VmarFlags::CAN_MAP_RXW, PAGE_SIZE)
.err(),
Some(ZxError::INVALID_ARGS)
);
}
/// A valid virtual address base to mmap.
const MAGIC: usize = 0xdead_beaf;
#[test]
#[allow(unsafe_code)]
fn map() {
let vmar = VmAddressRegion::new_root();
let vmo = VmObject::new_paged(4);
let flags = MMUFlags::READ | MMUFlags::WRITE;
// invalid argument
assert_eq!(
vmar.map_at(0, vmo.clone(), 0x4000, 0x1000, flags),
Err(ZxError::INVALID_ARGS)
);
assert_eq!(
vmar.map_at(0, vmo.clone(), 0, 0x5000, flags),
Err(ZxError::INVALID_ARGS)
);
assert_eq!(
vmar.map_at(0, vmo.clone(), 0x1000, 1, flags),
Err(ZxError::INVALID_ARGS)
);
assert_eq!(
vmar.map_at(0, vmo.clone(), 1, 0x1000, flags),
Err(ZxError::INVALID_ARGS)
);
vmar.map_at(0, vmo.clone(), 0, 0x4000, flags).unwrap();
vmar.map_at(0x12000, vmo.clone(), 0x2000, 0x1000, flags)
.unwrap();
unsafe {
((vmar.addr() + 0x2000) as *mut usize).write(MAGIC);
assert_eq!(((vmar.addr() + 0x12000) as *const usize).read(), MAGIC);
}
}
/// ```text
/// +--------+--------+--------+--------+
/// | root .... |
/// +--------+--------+--------+--------+
/// | child1 | child2 |
/// +--------+--------+--------+
/// | g-son1 | g-son2 |
/// +--------+--------+
/// ```
struct Sample {
root: Arc<VmAddressRegion>,
child1: Arc<VmAddressRegion>,
child2: Arc<VmAddressRegion>,
grandson1: Arc<VmAddressRegion>,
grandson2: Arc<VmAddressRegion>,
}
impl Sample {
fn new() -> Self {
let root = VmAddressRegion::new_root();
let child1 = root
.allocate_at(0, 0x2000, VmarFlags::CAN_MAP_RXW, PAGE_SIZE)
.unwrap();
let child2 = root
.allocate_at(0x2000, 0x1000, VmarFlags::CAN_MAP_RXW, PAGE_SIZE)
.unwrap();
let grandson1 = child1
.allocate_at(0, 0x1000, VmarFlags::CAN_MAP_RXW, PAGE_SIZE)
.unwrap();
let grandson2 = child1
.allocate_at(0x1000, 0x1000, VmarFlags::CAN_MAP_RXW, PAGE_SIZE)
.unwrap();
Sample {
root,
child1,
child2,
grandson1,
grandson2,
}
}
}
#[test]
fn unmap_vmar() {
let s = Sample::new();
let base = s.root.addr();
s.child1.unmap(base, 0x1000).unwrap();
assert!(s.grandson1.is_dead());
assert!(s.grandson2.is_alive());
// partial overlap sub-region should fail.
let s = Sample::new();
let base = s.root.addr();
assert_eq!(
s.root.unmap(base + 0x1000, 0x2000),
Err(ZxError::INVALID_ARGS)
);
// unmap nothing should success.
let s = Sample::new();
let base = s.root.addr();
s.child1.unmap(base + 0x8000, 0x1000).unwrap();
}
#[test]
fn destroy() {
let s = Sample::new();
s.child1.destroy().unwrap();
assert!(s.child1.is_dead());
assert!(s.grandson1.is_dead());
assert!(s.grandson2.is_dead());
assert!(s.child2.is_alive());
// address space should be released
assert!(s
.root
.allocate_at(0, 0x1000, VmarFlags::CAN_MAP_RXW, PAGE_SIZE)
.is_ok());
}
#[test]
fn unmap_mapping() {
// +--------+--------+--------+--------+--------+
// 1 [--------------------------|xxxxxxxx|--------]
// 2 [xxxxxxxx|-----------------]
// 3 [--------|xxxxxxxx]
// 4 [xxxxxxxx]
let vmar = VmAddressRegion::new_root();
let base = vmar.addr();
let vmo = VmObject::new_paged(5);
let flags = MMUFlags::READ | MMUFlags::WRITE;
vmar.map_at(0, vmo, 0, 0x5000, flags).unwrap();
assert_eq!(vmar.count(), 1);
assert_eq!(vmar.used_size(), 0x5000);
// 0. unmap none.
vmar.unmap(base + 0x5000, 0x1000).unwrap();
assert_eq!(vmar.count(), 1);
assert_eq!(vmar.used_size(), 0x5000);
// // 1. unmap middle.
// vmar.unmap(base + 0x3000, 0x1000).unwrap();
// assert_eq!(vmar.count(), 2);
// assert_eq!(vmar.used_size(), 0x4000);
// // 2. unmap prefix.
// vmar.unmap(base, 0x1000).unwrap();
// assert_eq!(vmar.count(), 2);
// assert_eq!(vmar.used_size(), 0x3000);
// // 3. unmap postfix.
// vmar.unmap(base + 0x2000, 0x1000).unwrap();
// assert_eq!(vmar.count(), 2);
// assert_eq!(vmar.used_size(), 0x2000);
// 4. unmap all.
vmar.unmap(base, 0x5000).unwrap();
assert_eq!(vmar.count(), 0);
assert_eq!(vmar.used_size(), 0x0);
}
}

@ -0,0 +1,469 @@
use {
self::{paged::*, physical::*, slice::*},
super::*,
crate::object::*,
alloc::{
sync::{Arc, Weak},
vec::Vec,
},
bitflags::bitflags,
core::ops::Deref,
kernel_hal::{CachePolicy, MMUFlags},
spin::Mutex,
};
mod paged;
mod physical;
mod slice;
/// Virtual Memory Object Trait
#[allow(clippy::len_without_is_empty)]
pub trait VMObjectTrait: Sync + Send {
/// Read memory to `buf` from VMO at `offset`.
fn read(&self, offset: usize, buf: &mut [u8]) -> ZxResult;
/// Write memory from `buf` to VMO at `offset`.
fn write(&self, offset: usize, buf: &[u8]) -> ZxResult;
/// Resets the range of bytes in the VMO from `offset` to `offset+len` to 0.
fn zero(&self, offset: usize, len: usize) -> ZxResult;
/// Get the length of VMO.
fn len(&self) -> usize;
/// Set the length of VMO.
fn set_len(&self, len: usize) -> ZxResult;
/// Commit a page.
fn commit_page(&self, page_idx: usize, flags: MMUFlags) -> ZxResult<PhysAddr>;
/// Commit pages with an external function f.
/// the vmo is internally locked before it calls f,
/// allowing `VmMapping` to avoid deadlock
fn commit_pages_with(
&self,
f: &mut dyn FnMut(&mut dyn FnMut(usize, MMUFlags) -> ZxResult<PhysAddr>) -> ZxResult,
) -> ZxResult;
/// Commit allocating physical memory.
fn commit(&self, offset: usize, len: usize) -> ZxResult;
/// Decommit allocated physical memory.
fn decommit(&self, offset: usize, len: usize) -> ZxResult;
/// Create a child VMO.
fn create_child(&self, offset: usize, len: usize) -> ZxResult<Arc<dyn VMObjectTrait>>;
/// Append a mapping to the VMO's mapping list.
fn append_mapping(&self, _mapping: Weak<VmMapping>) {}
/// Remove a mapping from the VMO's mapping list.
fn remove_mapping(&self, _mapping: Weak<VmMapping>) {}
/// Complete the VmoInfo.
fn complete_info(&self, info: &mut VmoInfo);
/// Get the cache policy.
fn cache_policy(&self) -> CachePolicy;
/// Set the cache policy.
fn set_cache_policy(&self, policy: CachePolicy) -> ZxResult;
/// Count committed pages of the VMO.
fn committed_pages_in_range(&self, start_idx: usize, end_idx: usize) -> usize;
/// Pin the given range of the VMO.
fn pin(&self, _offset: usize, _len: usize) -> ZxResult {
Err(ZxError::NOT_SUPPORTED)
}
/// Unpin the given range of the VMO.
fn unpin(&self, _offset: usize, _len: usize) -> ZxResult {
Err(ZxError::NOT_SUPPORTED)
}
/// Returns true if the object is backed by a contiguous range of physical memory.
fn is_contiguous(&self) -> bool {
false
}
/// Returns true if the object is backed by RAM.
fn is_paged(&self) -> bool {
false
}
}
/// Virtual memory containers
///
/// ## SYNOPSIS
///
/// A Virtual Memory Object (VMO) represents a contiguous region of virtual memory
/// that may be mapped into multiple address spaces.
pub struct VmObject {
base: KObjectBase,
resizable: bool,
trait_: Arc<dyn VMObjectTrait>,
inner: Mutex<VmObjectInner>,
}
impl_kobject!(VmObject);
#[derive(Default)]
struct VmObjectInner {
parent: Weak<VmObject>,
children: Vec<Weak<VmObject>>,
mapping_count: usize,
content_size: usize,
}
impl VmObject {
/// Create a new VMO backing on physical memory allocated in pages.
pub fn new_paged(pages: usize) -> Arc<Self> {
Self::new_paged_with_resizable(false, pages)
}
/// Create a new VMO, which can be resizable, backing on physical memory allocated in pages.
pub fn new_paged_with_resizable(resizable: bool, pages: usize) -> Arc<Self> {
let base = KObjectBase::new();
Arc::new(VmObject {
resizable,
trait_: VMObjectPaged::new(pages),
inner: Mutex::new(VmObjectInner::default()),
base,
})
}
/// Create a new VMO representing a piece of contiguous physical memory.
pub fn new_physical(paddr: PhysAddr, pages: usize) -> Arc<Self> {
Arc::new(VmObject {
base: KObjectBase::new(),
resizable: false,
trait_: VMObjectPhysical::new(paddr, pages),
inner: Mutex::new(VmObjectInner::default()),
})
}
/// Create a VM object referring to a specific contiguous range of physical frame.
pub fn new_contiguous(pages: usize, align_log2: usize) -> ZxResult<Arc<Self>> {
let vmo = Arc::new(VmObject {
base: KObjectBase::new(),
resizable: false,
trait_: VMObjectPaged::new_contiguous(pages, align_log2)?,
inner: Mutex::new(VmObjectInner::default()),
});
Ok(vmo)
}
/// Create a child VMO.
pub fn create_child(
self: &Arc<Self>,
resizable: bool,
offset: usize,
len: usize,
) -> ZxResult<Arc<Self>> {
let base = KObjectBase::with_name(&self.base.name());
let trait_ = self.trait_.create_child(offset, len)?;
let child = Arc::new(VmObject {
base,
resizable,
trait_,
inner: Mutex::new(VmObjectInner {
parent: Arc::downgrade(self),
..VmObjectInner::default()
}),
});
self.add_child(&child);
Ok(child)
}
/// Create a child slice as an VMO
pub fn create_slice(self: &Arc<Self>, offset: usize, p_size: usize) -> ZxResult<Arc<Self>> {
let size = roundup_pages(p_size);
// why 32 * PAGE_SIZE? Refered to zircon source codes
if size < p_size || size > usize::MAX & !(32 * PAGE_SIZE) {
return Err(ZxError::OUT_OF_RANGE);
}
// child slice must be wholly contained
let parent_size = self.trait_.len();
if !page_aligned(offset) {
return Err(ZxError::INVALID_ARGS);
}
if offset > parent_size || size > parent_size - offset {
return Err(ZxError::INVALID_ARGS);
}
if self.resizable {
return Err(ZxError::NOT_SUPPORTED);
}
if self.trait_.cache_policy() != CachePolicy::Cached && !self.trait_.is_contiguous() {
return Err(ZxError::BAD_STATE);
}
let child = Arc::new(VmObject {
base: KObjectBase::with_name(&self.base.name()),
resizable: false,
trait_: VMObjectSlice::new(self.trait_.clone(), offset, size),
inner: Mutex::new(VmObjectInner {
parent: Arc::downgrade(self),
..VmObjectInner::default()
}),
});
self.add_child(&child);
Ok(child)
}
/// Add child to the list and signal if ZeroChildren signal is active.
/// If the number of children turns 0 to 1, signal it
fn add_child(&self, child: &Arc<VmObject>) {
let mut inner = self.inner.lock();
inner.children.retain(|x| x.strong_count() != 0);
inner.children.push(Arc::downgrade(child));
// if inner.children.len() == 1 {
// self.base.signal_clear(Signal::VMO_ZERO_CHILDREN);
// }
}
/// Set the length of this VMO if resizable.
pub fn set_len(&self, len: usize) -> ZxResult {
let size = roundup_pages(len);
if size < len {
return Err(ZxError::OUT_OF_RANGE);
}
if !self.resizable {
return Err(ZxError::UNAVAILABLE);
}
self.trait_.set_len(size)
}
/// Set the size of the content stored in the VMO in bytes, resize vmo if needed
pub fn set_content_size_and_resize(
&self,
size: usize,
zero_until_offset: usize,
) -> ZxResult<usize> {
let mut inner = self.inner.lock();
let content_size = inner.content_size;
let len = self.trait_.len();
if size < content_size {
return Ok(content_size);
}
let required_len = roundup_pages(size);
let new_content_size = if required_len > len && self.set_len(required_len).is_err() {
len
} else {
size
};
let zero_until_offset = zero_until_offset.min(new_content_size);
if zero_until_offset > content_size {
self.trait_
.zero(content_size, zero_until_offset - content_size)?;
}
inner.content_size = new_content_size;
Ok(new_content_size)
}
/// Get the size of the content stored in the VMO in bytes.
pub fn content_size(&self) -> usize {
let inner = self.inner.lock();
inner.content_size
}
/// Get the size of the content stored in the VMO in bytes.
pub fn set_content_size(&self, size: usize) -> ZxResult {
let mut inner = self.inner.lock();
inner.content_size = size;
Ok(())
}
/// Get information of this VMO.
pub fn get_info(&self) -> VmoInfo {
let inner = self.inner.lock();
let mut ret = VmoInfo {
koid: self.base.id,
name: {
let mut arr = [0u8; 32];
let name = self.base.name();
let length = name.len().min(32);
arr[..length].copy_from_slice(&name.as_bytes()[..length]);
arr
},
size: self.trait_.len() as u64,
parent_koid: inner.parent.upgrade().map(|p| p.id()).unwrap_or(0),
num_children: inner.children.len() as u64,
flags: if self.resizable {
VmoInfoFlags::RESIZABLE
} else {
VmoInfoFlags::empty()
},
cache_policy: self.trait_.cache_policy() as u32,
share_count: inner.mapping_count as u64,
..Default::default()
};
self.trait_.complete_info(&mut ret);
ret
}
/// Set the cache policy.
pub fn set_cache_policy(&self, policy: CachePolicy) -> ZxResult {
let inner = self.inner.lock();
if !inner.children.is_empty() {
return Err(ZxError::BAD_STATE);
}
if inner.mapping_count != 0 {
return Err(ZxError::BAD_STATE);
}
self.trait_.set_cache_policy(policy)
}
/// Append a mapping to the VMO's mapping list.
pub fn append_mapping(&self, mapping: Weak<VmMapping>) {
self.inner.lock().mapping_count += 1;
self.trait_.append_mapping(mapping);
}
/// Remove a mapping from the VMO's mapping list.
pub fn remove_mapping(&self, mapping: Weak<VmMapping>) {
self.inner.lock().mapping_count -= 1;
self.trait_.remove_mapping(mapping);
}
/// Returns an estimate of the number of unique VmAspaces that this object
/// is mapped into.
pub fn share_count(&self) -> usize {
let inner = self.inner.lock();
inner.mapping_count
}
/// Returns true if the object size can be changed.
pub fn is_resizable(&self) -> bool {
self.resizable
}
/// Returns true if the object is backed by a contiguous range of physical memory.
pub fn is_contiguous(&self) -> bool {
self.trait_.is_contiguous()
}
}
impl Deref for VmObject {
type Target = Arc<dyn VMObjectTrait>;
fn deref(&self) -> &Self::Target {
&self.trait_
}
}
impl Drop for VmObject {
fn drop(&mut self) {
let mut inner = self.inner.lock();
let parent = match inner.parent.upgrade() {
Some(parent) => parent,
None => return,
};
for child in inner.children.iter() {
if let Some(child) = child.upgrade() {
child.inner.lock().parent = Arc::downgrade(&parent);
}
}
let mut parent_inner = parent.inner.lock();
let children = &mut parent_inner.children;
children.append(&mut inner.children);
children.retain(|c| c.strong_count() != 0);
for child in children.iter() {
let child = child.upgrade().unwrap();
let mut inner = child.inner.lock();
inner.children.retain(|c| c.strong_count() != 0);
}
}
}
/// Describes a VMO.
#[repr(C)]
#[derive(Default)]
pub struct VmoInfo {
/// The koid of this VMO.
koid: KoID,
/// The name of this VMO.
name: [u8; 32],
/// The size of this VMO; i.e., the amount of virtual address space it
/// would consume if mapped.
size: u64,
/// If this VMO is a clone, the koid of its parent. Otherwise, zero.
parent_koid: KoID,
/// The number of clones of this VMO, if any.
num_children: u64,
/// The number of times this VMO is currently mapped into VMARs.
num_mappings: u64,
/// The number of unique address space we're mapped into.
share_count: u64,
/// Flags.
pub flags: VmoInfoFlags,
/// Padding.
padding1: [u8; 4],
/// If the type is `PAGED`, the amount of
/// memory currently allocated to this VMO; i.e., the amount of physical
/// memory it consumes. Undefined otherwise.
committed_bytes: u64,
/// If `flags & ZX_INFO_VMO_VIA_HANDLE`, the handle rights.
/// Undefined otherwise.
pub rights: Rights,
/// VMO mapping cache policy.
cache_policy: u32,
}
bitflags! {
#[derive(Default)]
/// Values used by ZX_INFO_PROCESS_VMOS.
pub struct VmoInfoFlags: u32 {
/// The VMO points to a physical address range, and does not consume memory.
/// Typically used to access memory-mapped hardware.
/// Mutually exclusive with TYPE_PAGED.
const TYPE_PHYSICAL = 0;
#[allow(clippy::identity_op)]
/// The VMO is backed by RAM, consuming memory.
/// Mutually exclusive with TYPE_PHYSICAL.
const TYPE_PAGED = 1 << 0;
/// The VMO is resizable.
const RESIZABLE = 1 << 1;
/// The VMO is a child, and is a copy-on-write clone.
const IS_COW_CLONE = 1 << 2;
/// When reading a list of VMOs pointed to by a process, indicates that the
/// process has a handle to the VMO, which isn't necessarily mapped.
const VIA_HANDLE = 1 << 3;
/// When reading a list of VMOs pointed to by a process, indicates that the
/// process maps the VMO into a VMAR, but doesn't necessarily have a handle to
/// the VMO.
const VIA_MAPPING = 1 << 4;
/// The VMO is a pager owned VMO created by zx_pager_create_vmo or is
/// a clone of a VMO with this flag set. Will only be set on VMOs with
/// the ZX_INFO_VMO_TYPE_PAGED flag set.
const PAGER_BACKED = 1 << 5;
/// The VMO is contiguous.
const CONTIGUOUS = 1 << 6;
}
}
/// Different operations that `range_change` can perform against any VmMappings that are found.
#[allow(dead_code)]
#[derive(PartialEq, Eq, Clone, Copy)]
pub(super) enum RangeChangeOp {
Unmap,
RemoveWrite,
}
#[cfg(test)]
mod tests {
use super::*;
pub fn read_write(vmo: &VmObject) {
let mut buf = [0u8; 4];
vmo.write(0, &[0, 1, 2, 3]).unwrap();
vmo.read(0, &mut buf).unwrap();
assert_eq!(&buf, &[0, 1, 2, 3]);
}
}

@ -0,0 +1,337 @@
use {
super::*,
crate::util::BlockIter,
alloc::sync::Arc,
alloc::vec::Vec,
core::ops::Range,
kernel_hal::{MMUFlags, PhysFrame, PAGE_SIZE},
spin::Mutex,
};
/// The main VM object type, holding a list of pages.
pub struct VMObjectPaged {
inner: Mutex<VMObjectPagedInner>,
}
/// The mutable part of `VMObjectPaged`.
#[derive(Default)]
struct VMObjectPagedInner {
/// Physical frames of this VMO.
frames: Vec<PhysFrame>,
/// Cache Policy
cache_policy: CachePolicy,
/// Is contiguous
contiguous: bool,
/// Sum of pin_count
pin_count: usize,
/// All mappings to this VMO.
mappings: Vec<Weak<VmMapping>>,
}
impl VMObjectPaged {
/// Create a new VMO backing on physical memory allocated in pages.
pub fn new(pages: usize) -> Arc<Self> {
let mut frames = Vec::new();
frames.resize_with(pages, || PhysFrame::alloc_zeroed().unwrap());
Arc::new(VMObjectPaged {
inner: Mutex::new(VMObjectPagedInner {
frames,
..Default::default()
}),
})
}
/// Create a list of contiguous pages
pub fn new_contiguous(pages: usize, align_log2: usize) -> ZxResult<Arc<Self>> {
let frames = PhysFrame::alloc_contiguous_zeroed(pages, align_log2 - PAGE_SIZE_LOG2);
if frames.is_empty() {
return Err(ZxError::NO_MEMORY);
}
Ok(Arc::new(VMObjectPaged {
inner: Mutex::new(VMObjectPagedInner {
frames,
contiguous: true,
..Default::default()
}),
}))
}
}
impl VMObjectTrait for VMObjectPaged {
fn read(&self, offset: usize, buf: &mut [u8]) -> ZxResult {
let mut inner = self.inner.lock();
if inner.cache_policy != CachePolicy::Cached {
return Err(ZxError::BAD_STATE);
}
inner.for_each_page(offset, buf.len(), |paddr, buf_range| {
kernel_hal::pmem_read(paddr, &mut buf[buf_range]);
});
Ok(())
}
fn write(&self, offset: usize, buf: &[u8]) -> ZxResult {
let mut inner = self.inner.lock();
if inner.cache_policy != CachePolicy::Cached {
return Err(ZxError::BAD_STATE);
}
inner.for_each_page(offset, buf.len(), |paddr, buf_range| {
kernel_hal::pmem_write(paddr, &buf[buf_range]);
});
Ok(())
}
fn zero(&self, offset: usize, len: usize) -> ZxResult {
let mut inner = self.inner.lock();
if inner.cache_policy != CachePolicy::Cached {
return Err(ZxError::BAD_STATE);
}
inner.for_each_page(offset, len, |paddr, buf_range| {
kernel_hal::pmem_zero(paddr, buf_range.len());
});
Ok(())
}
fn len(&self) -> usize {
let inner = self.inner.lock();
inner.frames.len() * PAGE_SIZE
}
fn set_len(&self, len: usize) -> ZxResult {
assert!(page_aligned(len));
let mut inner = self.inner.lock();
inner.frames.resize_with(len / PAGE_SIZE, || {
PhysFrame::alloc().ok_or(ZxError::NO_MEMORY).unwrap()
});
Ok(())
}
fn commit_page(&self, page_idx: usize, _flags: MMUFlags) -> ZxResult<PhysAddr> {
let inner = self.inner.lock();
Ok(inner.frames[page_idx].addr())
}
fn commit_pages_with(
&self,
f: &mut dyn FnMut(&mut dyn FnMut(usize, MMUFlags) -> ZxResult<PhysAddr>) -> ZxResult,
) -> ZxResult {
let inner = self.inner.lock();
f(&mut |page_idx, _| Ok(inner.frames[page_idx].addr()))
}
fn commit(&self, _offset: usize, _len: usize) -> ZxResult {
Ok(())
}
fn decommit(&self, _offset: usize, _len: usize) -> ZxResult {
Ok(())
}
fn create_child(&self, offset: usize, len: usize) -> ZxResult<Arc<dyn VMObjectTrait>> {
assert!(page_aligned(offset));
assert!(page_aligned(len));
let mut inner = self.inner.lock();
let child = inner.create_child(offset, len)?;
Ok(child)
}
fn append_mapping(&self, mapping: Weak<VmMapping>) {
let mut inner = self.inner.lock();
inner.mappings.push(mapping);
}
fn remove_mapping(&self, mapping: Weak<VmMapping>) {
let mut inner = self.inner.lock();
inner
.mappings
.drain_filter(|x| x.strong_count() == 0 || Weak::ptr_eq(x, &mapping));
}
fn complete_info(&self, info: &mut VmoInfo) {
let inner = self.inner.lock();
info.flags |= VmoInfoFlags::TYPE_PAGED;
inner.complete_info(info);
}
fn cache_policy(&self) -> CachePolicy {
let inner = self.inner.lock();
inner.cache_policy
}
fn set_cache_policy(&self, policy: CachePolicy) -> ZxResult {
// conditions for allowing the cache policy to be set:
// 1) vmo either has no pages committed currently or is transitioning from being cached
// 2) vmo has no pinned pages
// 3) vmo has no mappings
// 4) vmo has no children (TODO)
// 5) vmo is not a child
let mut inner = self.inner.lock();
if !inner.frames.is_empty() && inner.cache_policy != CachePolicy::Cached {
return Err(ZxError::BAD_STATE);
}
if inner.pin_count != 0 {
return Err(ZxError::BAD_STATE);
}
if inner.cache_policy == CachePolicy::Cached && policy != CachePolicy::Cached {
for frame in inner.frames.iter() {
kernel_hal::frame_flush(frame.addr());
}
}
inner.cache_policy = policy;
Ok(())
}
fn committed_pages_in_range(&self, start_idx: usize, end_idx: usize) -> usize {
end_idx - start_idx
}
fn pin(&self, offset: usize, len: usize) -> ZxResult {
let mut inner = self.inner.lock();
if offset + len > inner.frames.len() * PAGE_SIZE {
return Err(ZxError::OUT_OF_RANGE);
}
if len == 0 {
return Ok(());
}
inner.pin_count += pages(len);
Ok(())
}
fn unpin(&self, offset: usize, len: usize) -> ZxResult {
let mut inner = self.inner.lock();
if offset + len > inner.frames.len() * PAGE_SIZE {
return Err(ZxError::OUT_OF_RANGE);
}
if len == 0 {
return Ok(());
}
inner.pin_count -= pages(len);
Ok(())
}
fn is_contiguous(&self) -> bool {
let inner = self.inner.lock();
inner.contiguous
}
fn is_paged(&self) -> bool {
true
}
}
impl VMObjectPagedInner {
/// Helper function to split range into sub-ranges within pages.
///
/// ```text
/// VMO range:
/// |----|----|----|----|----|
///
/// buf:
/// [====len====]
/// |--offset--|
///
/// sub-ranges:
/// [===]
/// [====]
/// [==]
/// ```
///
/// `f` is a function to process in-page ranges.
/// It takes 2 arguments:
/// * `paddr`: the start physical address of the in-page range.
/// * `buf_range`: the range in view of the input buffer.
fn for_each_page(
&mut self,
offset: usize,
buf_len: usize,
mut f: impl FnMut(PhysAddr, Range<usize>),
) {
let iter = BlockIter {
begin: offset,
end: offset + buf_len,
block_size_log2: 12,
};
for block in iter {
let paddr = self.frames[block.block].addr();
let buf_range = block.origin_begin() - offset..block.origin_end() - offset;
f(paddr + block.begin, buf_range);
}
}
/// Create a snapshot child VMO.
fn create_child(&mut self, offset: usize, len: usize) -> ZxResult<Arc<VMObjectPaged>> {
// clone contiguous vmo is no longer permitted
// https://fuchsia.googlesource.com/fuchsia/+/e6b4c6751bbdc9ed2795e81b8211ea294f139a45
if self.contiguous {
return Err(ZxError::INVALID_ARGS);
}
if self.cache_policy != CachePolicy::Cached || self.pin_count != 0 {
return Err(ZxError::BAD_STATE);
}
let mut frames = Vec::with_capacity(pages(len));
for _ in 0..pages(len) {
frames.push(PhysFrame::alloc().ok_or(ZxError::NO_MEMORY)?);
}
for (i, frame) in frames.iter().enumerate() {
if let Some(src_frame) = self.frames.get(pages(offset) + i) {
kernel_hal::frame_copy(src_frame.addr(), frame.addr())
} else {
kernel_hal::pmem_zero(frame.addr(), PAGE_SIZE);
}
}
// create child VMO
let child = Arc::new(VMObjectPaged {
inner: Mutex::new(VMObjectPagedInner {
frames,
..Default::default()
}),
});
Ok(child)
}
fn complete_info(&self, info: &mut VmoInfo) {
if self.contiguous {
info.flags |= VmoInfoFlags::CONTIGUOUS;
}
// info.num_children = if self.type_.is_hidden() { 2 } else { 0 };
info.committed_bytes = (self.frames.len() * PAGE_SIZE) as u64;
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn read_write() {
let vmo = VmObject::new_paged(2);
super::super::tests::read_write(&*vmo);
}
#[test]
fn create_child() {
let vmo = VmObject::new_paged(1);
let child_vmo = vmo.create_child(false, 0, PAGE_SIZE).unwrap();
// write to parent and make sure clone doesn't see it
vmo.test_write(0, 1);
assert_eq!(vmo.test_read(0), 1);
assert_eq!(child_vmo.test_read(0), 0);
// write to clone and make sure parent doesn't see it
child_vmo.test_write(0, 2);
assert_eq!(vmo.test_read(0), 1);
assert_eq!(child_vmo.test_read(0), 2);
}
impl VmObject {
pub fn test_write(&self, page: usize, value: u8) {
self.write(page * PAGE_SIZE, &[value]).unwrap();
}
pub fn test_read(&self, page: usize) -> u8 {
let mut buf = [0; 1];
self.read(page * PAGE_SIZE, &mut buf).unwrap();
buf[0]
}
}
}

@ -0,0 +1,129 @@
use {super::*, alloc::sync::Arc, kernel_hal::MMUFlags, spin::Mutex};
/// VMO representing a physical range of memory.
pub struct VMObjectPhysical {
paddr: PhysAddr,
pages: usize,
/// Lock this when access physical memory.
data_lock: Mutex<()>,
inner: Mutex<VMObjectPhysicalInner>,
}
struct VMObjectPhysicalInner {
cache_policy: CachePolicy,
}
impl VMObjectPhysicalInner {
pub fn new() -> VMObjectPhysicalInner {
VMObjectPhysicalInner {
cache_policy: CachePolicy::Uncached,
}
}
}
impl VMObjectPhysical {
/// Create a new VMO representing a piece of contiguous physical memory.
/// You must ensure nobody has the ownership of this piece of memory yet.
pub fn new(paddr: PhysAddr, pages: usize) -> Arc<Self> {
assert!(page_aligned(paddr));
Arc::new(VMObjectPhysical {
paddr,
pages,
data_lock: Mutex::default(),
inner: Mutex::new(VMObjectPhysicalInner::new()),
})
}
}
impl VMObjectTrait for VMObjectPhysical {
fn read(&self, offset: usize, buf: &mut [u8]) -> ZxResult {
let _ = self.data_lock.lock();
assert!(offset + buf.len() <= self.len());
kernel_hal::pmem_read(self.paddr + offset, buf);
Ok(())
}
fn write(&self, offset: usize, buf: &[u8]) -> ZxResult {
let _ = self.data_lock.lock();
assert!(offset + buf.len() <= self.len());
kernel_hal::pmem_write(self.paddr + offset, buf);
Ok(())
}
fn zero(&self, offset: usize, len: usize) -> ZxResult {
let _ = self.data_lock.lock();
assert!(offset + len <= self.len());
kernel_hal::pmem_zero(self.paddr + offset, len);
Ok(())
}
fn len(&self) -> usize {
self.pages * PAGE_SIZE
}
fn set_len(&self, _len: usize) -> ZxResult {
unimplemented!()
}
fn commit_page(&self, page_idx: usize, _flags: MMUFlags) -> ZxResult<PhysAddr> {
Ok(self.paddr + page_idx * PAGE_SIZE)
}
fn commit_pages_with(
&self,
f: &mut dyn FnMut(&mut dyn FnMut(usize, MMUFlags) -> ZxResult<PhysAddr>) -> ZxResult,
) -> ZxResult {
f(&mut |page_idx, _flags| Ok(self.paddr + page_idx * PAGE_SIZE))
}
fn commit(&self, _offset: usize, _len: usize) -> ZxResult {
// do nothing
Ok(())
}
fn decommit(&self, _offset: usize, _len: usize) -> ZxResult {
// do nothing
Ok(())
}
fn create_child(&self, _offset: usize, _len: usize) -> ZxResult<Arc<dyn VMObjectTrait>> {
Err(ZxError::NOT_SUPPORTED)
}
fn complete_info(&self, _info: &mut VmoInfo) {
warn!("VmoInfo for physical is unimplemented");
}
fn cache_policy(&self) -> CachePolicy {
let inner = self.inner.lock();
inner.cache_policy
}
fn set_cache_policy(&self, policy: CachePolicy) -> ZxResult {
let mut inner = self.inner.lock();
inner.cache_policy = policy;
Ok(())
}
fn committed_pages_in_range(&self, _start_idx: usize, _end_idx: usize) -> usize {
0
}
fn is_contiguous(&self) -> bool {
true
}
}
#[cfg(test)]
mod tests {
#![allow(unsafe_code)]
use super::*;
use kernel_hal::CachePolicy;
#[test]
fn read_write() {
let vmo = VmObject::new_physical(0x1000, 2);
assert_eq!(vmo.cache_policy(), CachePolicy::Uncached);
super::super::tests::read_write(&vmo);
}
}

@ -0,0 +1,112 @@
use {super::*, kernel_hal::MMUFlags};
pub struct VMObjectSlice {
/// Parent node.
parent: Arc<dyn VMObjectTrait>,
/// The offset from parent.
offset: usize,
/// The size in bytes.
size: usize,
}
impl VMObjectSlice {
pub fn new(parent: Arc<dyn VMObjectTrait>, offset: usize, size: usize) -> Arc<Self> {
Arc::new(VMObjectSlice {
parent,
offset,
size,
})
}
fn check_range(&self, offset: usize, len: usize) -> ZxResult {
if offset + len >= self.size {
return Err(ZxError::OUT_OF_RANGE);
}
Ok(())
}
}
impl VMObjectTrait for VMObjectSlice {
fn read(&self, offset: usize, buf: &mut [u8]) -> ZxResult {
self.check_range(offset, buf.len())?;
self.parent.read(offset + self.offset, buf)
}
fn write(&self, offset: usize, buf: &[u8]) -> ZxResult {
self.check_range(offset, buf.len())?;
self.parent.write(offset + self.offset, buf)
}
fn zero(&self, offset: usize, len: usize) -> ZxResult {
self.check_range(offset, len)?;
self.parent.zero(offset + self.offset, len)
}
fn len(&self) -> usize {
self.size
}
fn set_len(&self, _len: usize) -> ZxResult {
unimplemented!()
}
fn commit_page(&self, page_idx: usize, flags: MMUFlags) -> ZxResult<usize> {
self.parent
.commit_page(page_idx + self.offset / PAGE_SIZE, flags)
}
fn commit_pages_with(
&self,
f: &mut dyn FnMut(&mut dyn FnMut(usize, MMUFlags) -> ZxResult<PhysAddr>) -> ZxResult,
) -> ZxResult {
self.parent.commit_pages_with(f)
}
fn commit(&self, offset: usize, len: usize) -> ZxResult {
self.parent.commit(offset + self.offset, len)
}
fn decommit(&self, offset: usize, len: usize) -> ZxResult {
self.parent.decommit(offset + self.offset, len)
}
fn create_child(&self, _offset: usize, _len: usize) -> ZxResult<Arc<dyn VMObjectTrait>> {
Err(ZxError::NOT_SUPPORTED)
}
fn complete_info(&self, info: &mut VmoInfo) {
self.parent.complete_info(info);
}
fn cache_policy(&self) -> CachePolicy {
self.parent.cache_policy()
}
fn set_cache_policy(&self, _policy: CachePolicy) -> ZxResult {
Ok(())
}
fn committed_pages_in_range(&self, start_idx: usize, end_idx: usize) -> usize {
let po = pages(self.offset);
self.parent
.committed_pages_in_range(start_idx + po, end_idx + po)
}
fn pin(&self, offset: usize, len: usize) -> ZxResult {
self.check_range(offset, len)?;
self.parent.pin(offset + self.offset, len)
}
fn unpin(&self, offset: usize, len: usize) -> ZxResult {
self.check_range(offset, len)?;
self.parent.unpin(offset + self.offset, len)
}
fn is_contiguous(&self) -> bool {
self.parent.is_contiguous()
}
fn is_paged(&self) -> bool {
self.parent.is_paged()
}
}
Loading…
Cancel
Save