cherry-pick virtio improvement from lab8-rv32

Move mandelbrot to test mod, cleanup virtio feature negotiation and add addr2line script to Makefile

Implement full virtqueue operations, virtio input driver

Use new virtqueue implementation for virtio net driver

Implement virtio blk device support and use it instead of memory sfs when available

fix dirty hack for virtio_blk
master
Jiajie Chen 6 years ago committed by WangRunji
parent c7d736acd4
commit 8313b8854e

@ -45,11 +45,20 @@ pub trait PageTable {
/// When 'vaddr' is not mapped, map it to 'paddr'.
fn map_if_not_exists(&mut self, vaddr: VirtAddr, paddr: usize) -> bool {
if let None = self.get_entry(vaddr) {
self.map(vaddr, paddr);
true
} else {
false
let entry = self.get_entry(vaddr);
match entry {
None => {
self.map(vaddr, paddr);
true
},
Some(page_table_entry) => {
if page_table_entry.present() {
false
} else {
self.map(vaddr, paddr);
true
}
}
}
}
}

@ -26,10 +26,12 @@ sv39 = []
board_u540 = ["sv39"]
# (for aarch64 RaspberryPi3)
nographic = []
board_raspi3 = ["bcm2837"]
board_raspi3 = ["bcm2837", "link_user"]
# (for riscv64)
board_k210 = ["m_mode"]
raspi3_use_generic_timer = ["bcm2837/use_generic_timer"]
# Hard link user program
link_user = []
[profile.dev]
# MUST >= 1 : Enable RVO to avoid stack overflow

@ -77,7 +77,9 @@ qemu_opts += \
else ifeq ($(arch), riscv32)
qemu_opts += \
-machine virt \
-kernel $(bin)
-kernel $(bin) \
-drive file=$(SFSIMG),format=raw,id=sfs \
-device virtio-blk-device,drive=sfs
ifdef m_mode
qemu_opts += -cpu rv32imacu-nommu
endif
@ -85,7 +87,9 @@ endif
else ifeq ($(arch), riscv64)
qemu_opts += \
-machine virt \
-kernel $(bin)
-kernel $(bin) \
-drive file=$(SFSIMG),format=raw,id=sfs \
-device virtio-blk-device,drive=sfs
ifdef m_mode
qemu_opts += -cpu rv64imacu-nommu
endif
@ -93,7 +97,7 @@ endif
else ifeq ($(arch), aarch64)
qemu_opts += \
-machine $(board) \
-serial null \
-serial null -serial mon:stdio \
-kernel $(bin)
endif
@ -103,8 +107,6 @@ endif
ifeq ($(graphic), off)
qemu_opts += -nographic
else
qemu_opts += -serial stdio
endif
### build args ###
@ -192,6 +194,7 @@ justrunnet: build
justrunui: build
@qemu-system-$(arch) $(qemu_opts) \
-device virtio-gpu-device \
-device virtio-mouse-device
debug: $(kernel) $(bin)
@qemu-system-$(arch) $(qemu_opts) -s -S &

@ -0,0 +1 @@
pub mod virtio_blk;

@ -0,0 +1,184 @@
use alloc::prelude::*;
use alloc::sync::Arc;
use alloc::vec;
use core::cmp::min;
use core::fmt;
use core::mem::{size_of, zeroed};
use core::slice;
use bitflags::*;
use device_tree::Node;
use device_tree::util::SliceRead;
use log::*;
use rcore_memory::PAGE_SIZE;
use rcore_memory::paging::PageTable;
use volatile::Volatile;
use simple_filesystem::BlockedDevice;
use crate::arch::cpu;
use crate::memory::active_table;
use crate::sync::SpinNoIrqLock as Mutex;
use super::super::{DeviceType, Driver, DRIVERS};
use super::super::bus::virtio_mmio::*;
pub struct VirtIOBlk {
interrupt_parent: u32,
interrupt: u32,
header: usize,
queue: VirtIOVirtqueue,
capacity: usize
}
#[derive(Clone)]
pub struct VirtIOBlkDriver(Arc<Mutex<VirtIOBlk>>);
#[repr(C)]
#[derive(Debug)]
struct VirtIOBlkConfig {
capacity: Volatile<u64>, // number of 512 sectors
}
#[repr(C)]
#[derive(Default)]
struct VirtIOBlkReq {
req_type: u32,
reserved: u32,
sector: u64,
}
#[repr(C)]
struct VirtIOBlkResp {
data: [u8; VIRTIO_BLK_BLK_SIZE],
status: u8
}
const VIRTIO_BLK_T_IN: u32 = 0;
const VIRTIO_BLK_T_OUT: u32 = 1;
const VIRTIO_BLK_S_OK: u8 = 0;
const VIRTIO_BLK_S_IOERR: u8 = 1;
const VIRTIO_BLK_S_UNSUPP: u8 = 2;
const VIRTIO_BLK_BLK_SIZE: usize = 512;
bitflags! {
struct VirtIOBlkFeature : u64 {
const BARRIER = 1 << 0;
const SIZE_MAX = 1 << 1;
const SEG_MAX = 1 << 2;
const GEOMETRY = 1 << 4;
const RO = 1 << 5;
const BLK_SIZE = 1 << 6;
const SCSI = 1 << 7;
const FLUSH = 1 << 9;
const TOPOLOGY = 1 << 10;
const CONFIG_WCE = 1 << 11;
const DISCARD = 1 << 13;
const WRITE_ZEROES = 1 << 14;
// device independent
const NOTIFY_ON_EMPTY = 1 << 24; // legacy
const ANY_LAYOUT = 1 << 27; // legacy
const RING_INDIRECT_DESC = 1 << 28;
const RING_EVENT_IDX = 1 << 29;
const UNUSED = 1 << 30; // legacy
const VERSION_1 = 1 << 32; // detect legacy
const ACCESS_PLATFORM = 1 << 33; // since virtio v1.1
const RING_PACKED = 1 << 34; // since virtio v1.1
const IN_ORDER = 1 << 35; // since virtio v1.1
const ORDER_PLATFORM = 1 << 36; // since virtio v1.1
const SR_IOV = 1 << 37; // since virtio v1.1
const NOTIFICATION_DATA = 1 << 38; // since virtio v1.1
}
}
impl Driver for VirtIOBlkDriver {
fn try_handle_interrupt(&mut self) -> bool {
let mut driver = self.0.lock();
// ensure header page is mapped
active_table().map_if_not_exists(driver.header as usize, driver.header as usize);
let header = unsafe { &mut *(driver.header as *mut VirtIOHeader) };
let interrupt = header.interrupt_status.read();
if interrupt != 0 {
header.interrupt_ack.write(interrupt);
debug!("Got interrupt {:?}", interrupt);
return true;
}
return false;
}
fn device_type(&self) -> DeviceType {
DeviceType::Block
}
}
impl BlockedDevice for VirtIOBlkDriver {
const BLOCK_SIZE_LOG2: u8 = 9; // 512
fn read_at(&mut self, block_id: usize, buf: &mut [u8]) -> bool {
let mut driver = self.0.lock();
// ensure header page is mapped
active_table().map_if_not_exists(driver.header as usize, driver.header as usize);
let mut req = VirtIOBlkReq::default();
req.req_type = VIRTIO_BLK_T_IN;
req.reserved = 0;
req.sector = block_id as u64;
let input = [0; size_of::<VirtIOBlkResp>()];
let output = unsafe { slice::from_raw_parts(&req as *const VirtIOBlkReq as *const u8, size_of::<VirtIOBlkReq>()) };
driver.queue.add_and_notify(&[&input], &[output], 0);
driver.queue.get_block();
let resp = unsafe { &*(&input as *const u8 as *const VirtIOBlkResp) };
if resp.status == VIRTIO_BLK_S_OK {
let len = min(buf.len(), VIRTIO_BLK_BLK_SIZE);
buf[..len].clone_from_slice(&resp.data[..len]);
true
} else {
false
}
}
fn write_at(&mut self, block_id: usize, buf: &[u8]) -> bool {
unimplemented!()
}
}
pub fn virtio_blk_init(node: &Node) {
let reg = node.prop_raw("reg").unwrap();
let from = reg.as_slice().read_be_u64(0).unwrap();
let header = unsafe { &mut *(from as *mut VirtIOHeader) };
header.status.write(VirtIODeviceStatus::DRIVER.bits());
let device_features_bits = header.read_device_features();
let device_features = VirtIOBlkFeature::from_bits_truncate(device_features_bits);
info!("Device features {:?}", device_features);
// negotiate these flags only
let supported_features = VirtIOBlkFeature::empty();
let driver_features = (device_features & supported_features).bits();
header.write_driver_features(driver_features);
// read configuration space
let config = unsafe { &mut *((from + VIRTIO_CONFIG_SPACE_OFFSET) as *mut VirtIOBlkConfig) };
info!("Config: {:?}", config);
info!("Found a block device of size {}KB", config.capacity.read() / 2);
// virtio 4.2.4 Legacy interface
// configure two virtqueues: ingress and egress
header.guest_page_size.write(PAGE_SIZE as u32); // one page
let mut driver = VirtIOBlkDriver(Arc::new(Mutex::new(VirtIOBlk {
interrupt: node.prop_u32("interrupts").unwrap(),
interrupt_parent: node.prop_u32("interrupt-parent").unwrap(),
header: from as usize,
queue: VirtIOVirtqueue::new(header, 0, 16),
capacity: config.capacity.read() as usize,
})));
header.status.write(VirtIODeviceStatus::DRIVER_OK.bits());
DRIVERS.lock().push(Box::new(driver));
}

@ -1,15 +1,24 @@
use alloc::{vec, vec::Vec};
use alloc::alloc::{GlobalAlloc, Layout};
use core::mem::size_of;
use core::slice;
use core::sync::atomic::{fence, Ordering};
use bitflags::*;
use device_tree::Node;
use device_tree::util::SliceRead;
use log::*;
use rcore_memory::PAGE_SIZE;
use rcore_memory::paging::PageTable;
use volatile::{ReadOnly, Volatile, WriteOnly};
use crate::arch::memory;
use crate::HEAP_ALLOCATOR;
use crate::memory::active_table;
use super::super::block::virtio_blk;
use super::super::gpu::virtio_gpu;
use super::super::input::virtio_input;
use super::super::net::virtio_net;
// virtio 4.2.4 Legacy interface
@ -53,6 +62,212 @@ pub struct VirtIOHeader {
config_generation: ReadOnly<u32>
}
#[repr(C)]
pub struct VirtIOVirtqueue {
header: usize,
queue_address: usize,
queue_num: usize,
queue: usize,
desc: usize, // *mut VirtIOVirtqueueDesc,
avail: usize, // *mut VirtIOVirtqueueAvailableRing,
used: usize, // *mut VirtIOVirtqueueUsedRing,
desc_state: Vec<usize>,
num_used: usize,
free_head: usize,
avail_idx: u16,
last_used_idx: u16,
}
impl VirtIOVirtqueue {
// Initialize a virtqueue
pub fn new(header: &mut VirtIOHeader, queue: usize, queue_num: usize) -> VirtIOVirtqueue {
header.queue_sel.write(queue as u32);
assert_eq!(header.queue_pfn.read(), 0); // not in use
let queue_num_max = header.queue_num_max.read();
assert!(queue_num_max >= queue_num as u32); // queue available
assert!(queue_num & (queue_num - 1) == 0); // power of two
let align = PAGE_SIZE;
let size = virtqueue_size(queue_num, align);
assert!(size % align == 0);
// alloc continuous pages
let address = unsafe {
HEAP_ALLOCATOR.alloc_zeroed(Layout::from_size_align(size, align).unwrap())
} as usize;
header.queue_num.write(queue_num as u32);
header.queue_align.write(align as u32);
header.queue_pfn.write((address as u32) >> 12);
// link desc together
let desc = unsafe { slice::from_raw_parts_mut(address as *mut VirtIOVirtqueueDesc, queue_num) };
for i in 0..(queue_num - 1) {
desc[i].next.write((i + 1) as u16);
}
VirtIOVirtqueue {
header: header as *mut VirtIOHeader as usize,
queue_address: address,
queue_num,
queue,
desc: address,
avail: address + size_of::<VirtIOVirtqueueDesc>() * queue_num,
used: address + virtqueue_used_elem_offset(queue_num, align),
desc_state: vec![0; queue_num],
num_used: 0,
free_head: 0,
avail_idx: 0,
last_used_idx: 0,
}
}
pub fn can_add(&self, input_len: usize, output_len: usize) -> bool {
return input_len + output_len + self.num_used <= self.queue_num;
}
// Add buffers to the virtqueue
// Return true on success, false otherwise
// ref. linux virtio_ring.c virtqueue_add
pub fn add(&mut self, input: &[&[u8]], output: &[&[u8]], user_data: usize) -> bool {
assert!(input.len() + output.len() > 0);
if !self.can_add(input.len(), output.len()) {
return false;
}
let desc = unsafe { slice::from_raw_parts_mut(self.desc as *mut VirtIOVirtqueueDesc, self.queue_num) };
let head = self.free_head;
let mut prev = 0;
let mut cur = self.free_head;
for i in 0..output.len() {
desc[cur].flags.write(VirtIOVirtqueueFlag::NEXT.bits());
desc[cur].addr.write(output[i].as_ptr() as u64);
desc[cur].len.write(output[i].len() as u32);
prev = cur;
cur = desc[cur].next.read() as usize;
}
for i in 0..input.len() {
desc[cur].flags.write((VirtIOVirtqueueFlag::NEXT | VirtIOVirtqueueFlag::WRITE).bits());
desc[cur].addr.write(input[i].as_ptr() as u64);
desc[cur].len.write(input[i].len() as u32);
prev = cur;
cur = desc[cur].next.read() as usize;
}
desc[prev].flags.write(desc[prev].flags.read() & !(VirtIOVirtqueueFlag::NEXT.bits()));
self.num_used += input.len() + output.len();
self.free_head = cur;
let avail = unsafe { &mut *(self.avail as *mut VirtIOVirtqueueAvailableRing) };
let avail_slot = self.avail_idx as usize & (self.queue_num - 1);
avail.ring[avail_slot].write(head as u16);
// write barrier
fence(Ordering::SeqCst);
self.avail_idx += 1;
avail.idx.write(self.avail_idx);
self.desc_state[head] = user_data;
return true;
}
// Add buffers to the virtqueue and notify device about it
pub fn add_and_notify(&mut self, input: &[&[u8]], output: &[&[u8]], user_data: usize) -> bool {
let res = self.add(input, output, user_data);
if res {
self.notify();
}
return res;
}
pub fn can_get(&self) -> bool {
let used = unsafe { &mut *(self.used as *mut VirtIOVirtqueueUsedRing) };
return self.last_used_idx != used.idx.read();
}
// Get device used buffers (input, output, length, user_data)
// ref. linux virtio_ring.c virtqueue_get_buf_ctx
pub fn get(&mut self) -> Option<(Vec<&'static [u8]>, Vec<&'static [u8]>, usize, usize)> {
let used = unsafe { &mut *(self.used as *mut VirtIOVirtqueueUsedRing) };
if self.last_used_idx == used.idx.read() {
return None
}
// read barrier
fence(Ordering::SeqCst);
let last_used_slot = self.last_used_idx as usize & (self.queue_num - 1);
let index = used.ring[last_used_slot].id.read() as usize;
let len = used.ring[last_used_slot].len.read();
let user_data = self.desc_state[last_used_slot];
self.desc_state[last_used_slot] = 0;
let mut cur = index;
let desc = unsafe { slice::from_raw_parts_mut(self.desc as *mut VirtIOVirtqueueDesc, self.queue_num) };
let mut input = Vec::new();
let mut output = Vec::new();
loop {
let flags = VirtIOVirtqueueFlag::from_bits_truncate(desc[cur].flags.read());
let buffer = unsafe { slice::from_raw_parts(desc[cur].addr.read() as *const u8, desc[cur].len.read() as usize) };
if flags.contains(VirtIOVirtqueueFlag::WRITE) {
input.push(buffer);
} else {
output.push(buffer);
}
if flags.contains(VirtIOVirtqueueFlag::NEXT) {
cur = desc[cur].next.read() as usize;
self.num_used -= 1;
} else {
desc[cur].next.write(self.free_head as u16);
self.num_used -= 1;
break
}
}
self.free_head = index;
self.last_used_idx += 1;
Some((input, output, len as usize, user_data))
}
// Get device used buffers until succeed
// See get() above
pub fn get_block(&mut self) -> (Vec<&'static [u8]>, Vec<&'static [u8]>, usize, usize) {
loop {
let res = self.get();
if res.is_some() {
return res.unwrap();
}
}
}
// Notify device about new buffers
pub fn notify(&mut self) {
let header = unsafe { &mut *(self.header as *mut VirtIOHeader) };
header.queue_notify.write(self.queue as u32);
}
}
pub const VIRTIO_CONFIG_SPACE_OFFSET: u64 = 0x100;
impl VirtIOHeader {
pub fn read_device_features(&mut self) -> u64 {
let mut device_features_bits: u64;
self.device_features_sel.write(0); // device features [0, 32)
device_features_bits = self.device_features.read().into();
self.device_features_sel.write(1); // device features [32, 64)
device_features_bits = device_features_bits + ((self.device_features.read() as u64) << 32);
device_features_bits
}
pub fn write_driver_features(&mut self, driver_features: u64) {
self.driver_features_sel.write(0); // driver features [0, 32)
self.driver_features.write((driver_features & 0xFFFFFFFF) as u32);
self.driver_features_sel.write(1); // driver features [32, 64)
self.driver_features.write(((driver_features & 0xFFFFFFFF00000000) >> 32) as u32);
}
}
bitflags! {
pub struct VirtIODeviceStatus : u32 {
const ACKNOWLEDGE = 1;
@ -87,7 +302,7 @@ pub struct VirtIOVirtqueueAvailableRing {
pub flags: Volatile<u16>,
pub idx: Volatile<u16>,
pub ring: [Volatile<u16>; 32], // actual size: queue_size
used_event: Volatile<u16>
used_event: Volatile<u16> // unused
}
#[repr(C)]
@ -103,7 +318,7 @@ pub struct VirtIOVirtqueueUsedRing {
pub flags: Volatile<u16>,
pub idx: Volatile<u16>,
pub ring: [VirtIOVirtqueueUsedElem; 32], // actual size: queue_size
avail_event: Volatile<u16>
avail_event: Volatile<u16> // unused
}
// virtio 2.4.2 Legacy Interfaces: A Note on Virtqueue Layout
@ -121,8 +336,9 @@ pub fn virtio_probe(node: &Node) {
let from = reg.as_slice().read_be_u64(0).unwrap();
let size = reg.as_slice().read_be_u64(8).unwrap();
// assuming one page
assert_eq!(size as usize, PAGE_SIZE);
active_table().map(from as usize, from as usize);
let mut header = unsafe { &mut *(from as *mut VirtIOHeader) };
let header = unsafe { &mut *(from as *mut VirtIOHeader) };
let magic = header.magic.read();
let version = header.version.read();
let device_id = header.device_id.read();
@ -135,8 +351,12 @@ pub fn virtio_probe(node: &Node) {
header.status.write(VirtIODeviceStatus::ACKNOWLEDGE.bits());
if device_id == 1 { // net device
virtio_net::virtio_net_init(node);
} else if device_id == 2 { // blk device
virtio_blk::virtio_blk_init(node);
} else if device_id == 16 { // gpu device
virtio_gpu::virtio_gpu_init(node);
} else if device_id == 18 { // input device
virtio_input::virtio_input_init(node);
} else {
println!("Unrecognized virtio device {}", device_id);
}

@ -1 +1,2 @@
pub mod virtio_gpu;
mod test;

@ -0,0 +1,59 @@
use core::slice;
// from Wikipedia
fn hsv_to_rgb(h: u32, s: f32, v: f32) -> (f32, f32, f32) {
let hi = (h / 60) % 6;
let f = (h % 60) as f32 / 60.0;
let p = v * (1.0 - s);
let q = v * (1.0 - f * s);
let t = v * (1.0 - (1.0 - f) * s);
match hi {
0 => (v, t, p),
1 => (q, v, p),
2 => (p, v, t),
3 => (p, q, v),
4 => (t, p, v),
5 => (v, p, q),
_ => unreachable!()
}
}
pub fn mandelbrot(width: u32, height: u32, frame_buffer: *mut u32) {
let size = width * height * 4;
let frame_buffer_data = unsafe {
slice::from_raw_parts_mut(frame_buffer as *mut u32, (size / 4) as usize)
};
for x in 0..width {
for y in 0..height {
let index = y * width + x;
let scale = 5e-3;
let xx = (x as f32 - width as f32 / 2.0) * scale;
let yy = (y as f32 - height as f32 / 2.0) * scale;
let mut re = xx as f32;
let mut im = yy as f32;
let mut iter: u32 = 0;
loop {
iter = iter + 1;
let new_re = re * re - im * im + xx as f32;
let new_im = re * im * 2.0 + yy as f32;
if new_re * new_re + new_im * new_im > 1e3 {
break;
}
re = new_re;
im = new_im;
if iter == 60 {
break;
}
}
iter = iter * 6;
let (r, g, b) = hsv_to_rgb(iter, 1.0, 0.5);
let rr = (r * 256.0) as u32;
let gg = (g * 256.0) as u32;
let bb = (b * 256.0) as u32;
let color = (bb << 16) | (gg << 8) | rr;
frame_buffer_data[index as usize] = color;
}
println!("working on x {}/{}", x, width);
}
}

@ -17,6 +17,7 @@ use crate::memory::active_table;
use super::super::{DeviceType, Driver, DRIVERS};
use super::super::bus::virtio_mmio::*;
use super::test::mandelbrot;
const VIRTIO_GPU_EVENT_DISPLAY : u32 = 1 << 0;
@ -24,13 +25,10 @@ struct VirtIOGpu {
interrupt_parent: u32,
interrupt: u32,
header: usize,
// 0 for transmit, 1 for cursor
queue_num: u32,
queue_address: usize,
queue_page: [usize; 2],
last_used_idx: u16,
queue_buffer: [usize; 2],
frame_buffer: usize,
rect: VirtIOGpuRect
rect: VirtIOGpuRect,
queues: [VirtIOVirtqueue; 2]
}
#[repr(C)]
@ -46,7 +44,7 @@ bitflags! {
const VIRGL = 1 << 0;
const EDID = 1 << 1;
// device independent
const NOFIFY_ON_EMPTY = 1 << 24; // legacy
const NOTIFY_ON_EMPTY = 1 << 24; // legacy
const ANY_LAYOUT = 1 << 27; // legacy
const RING_INDIRECT_DESC = 1 << 28;
const RING_EVENT_IDX = 1 << 29;
@ -180,7 +178,10 @@ struct VirtIOGpuResourceFlush {
}
const VIRTIO_QUEUE_TRANSMIT: usize = 0;
const VIRTIO_QUEUE_RECEIVE: usize = 1;
const VIRTIO_QUEUE_CURSOR: usize = 1;
const VIRTIO_BUFFER_TRANSMIT: usize = 0;
const VIRTIO_BUFFER_RECEIVE: usize = 1;
const VIRTIO_GPU_RESOURCE_ID: u32 = 0xbabe;
@ -194,13 +195,11 @@ impl Driver for VirtIOGpu {
// ensure header page is mapped
active_table().map_if_not_exists(self.header as usize, self.header as usize);
let mut header = unsafe { &mut *(self.header as *mut VirtIOHeader) };
let header = unsafe { &mut *(self.header as *mut VirtIOHeader) };
let interrupt = header.interrupt_status.read();
if interrupt != 0 {
header.interrupt_ack.write(interrupt);
debug!("Got interrupt {:?}", interrupt);
let response = unsafe { &mut *(self.queue_page[VIRTIO_QUEUE_RECEIVE] as *mut VirtIOGpuCtrlHdr) };
debug!("response in interrupt: {:?}", response);
return true;
}
return false;
@ -211,51 +210,24 @@ impl Driver for VirtIOGpu {
}
}
fn setup_rings(driver: &mut VirtIOGpu) {
let mut ring = unsafe {
&mut *((driver.queue_address + size_of::<VirtIOVirtqueueDesc>() * driver.queue_num as usize) as *mut VirtIOVirtqueueAvailableRing)
};
// re-add two buffers to desc
// chaining read buffer and write buffer into one desc
for buffer in 0..2 {
let mut desc = unsafe { &mut *(driver.queue_address as *mut VirtIOVirtqueueDesc).add(buffer) };
desc.addr.write(driver.queue_page[buffer] as u64);
desc.len.write(PAGE_SIZE as u32);
if buffer == VIRTIO_QUEUE_TRANSMIT {
// device readable
desc.flags.write(VirtIOVirtqueueFlag::NEXT.bits());
desc.next.write(1);
} else {
// device writable
desc.flags.write(VirtIOVirtqueueFlag::WRITE.bits());
}
ring.ring[buffer].write(0);
}
}
fn notify_device(driver: &mut VirtIOGpu) {
let mut header = unsafe { &mut *(driver.header as *mut VirtIOHeader) };
let mut ring = unsafe {
&mut *((driver.queue_address + size_of::<VirtIOVirtqueueDesc>() * driver.queue_num as usize) as *mut VirtIOVirtqueueAvailableRing)
};
ring.idx.write(ring.idx.read() + 1);
header.queue_notify.write(0);
fn request(driver: &mut VirtIOGpu) {
let input = unsafe { slice::from_raw_parts(driver.queue_buffer[VIRTIO_BUFFER_RECEIVE] as *const u8, PAGE_SIZE) };
let output = unsafe { slice::from_raw_parts(driver.queue_buffer[VIRTIO_BUFFER_TRANSMIT] as *const u8, PAGE_SIZE) };
driver.queues[VIRTIO_QUEUE_TRANSMIT].add_and_notify(&[input], &[output], 0);
}
fn setup_framebuffer(driver: &mut VirtIOGpu) {
// get display info
setup_rings(driver);
let mut request_get_display_info = unsafe { &mut *(driver.queue_page[VIRTIO_QUEUE_TRANSMIT] as *mut VirtIOGpuCtrlHdr) };
let request_get_display_info = unsafe { &mut *(driver.queue_buffer[VIRTIO_BUFFER_TRANSMIT] as *mut VirtIOGpuCtrlHdr) };
*request_get_display_info = VirtIOGpuCtrlHdr::with_type(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
notify_device(driver);
let response_get_display_info = unsafe { &mut *(driver.queue_page[VIRTIO_QUEUE_RECEIVE] as *mut VirtIOGpuRespDisplayInfo) };
request(driver);
driver.queues[VIRTIO_QUEUE_TRANSMIT].get_block();
let response_get_display_info = unsafe { &mut *(driver.queue_buffer[VIRTIO_BUFFER_RECEIVE] as *mut VirtIOGpuRespDisplayInfo) };
info!("response: {:?}", response_get_display_info);
driver.rect = response_get_display_info.rect;
// create resource 2d
setup_rings(driver);
let mut request_resource_create_2d = unsafe { &mut *(driver.queue_page[VIRTIO_QUEUE_TRANSMIT] as *mut VirtIOGpuResourceCreate2D) };
let request_resource_create_2d = unsafe { &mut *(driver.queue_buffer[VIRTIO_BUFFER_TRANSMIT] as *mut VirtIOGpuResourceCreate2D) };
*request_resource_create_2d = VirtIOGpuResourceCreate2D {
header: VirtIOGpuCtrlHdr::with_type(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D),
resource_id: VIRTIO_GPU_RESOURCE_ID,
@ -263,8 +235,9 @@ fn setup_framebuffer(driver: &mut VirtIOGpu) {
width: response_get_display_info.rect.width,
height: response_get_display_info.rect.height
};
notify_device(driver);
let response_resource_create_2d = unsafe { &mut *(driver.queue_page[VIRTIO_QUEUE_RECEIVE] as *mut VirtIOGpuCtrlHdr) };
request(driver);
driver.queues[VIRTIO_QUEUE_TRANSMIT].get_block();
let response_resource_create_2d = unsafe { &mut *(driver.queue_buffer[VIRTIO_BUFFER_RECEIVE] as *mut VirtIOGpuCtrlHdr) };
info!("response: {:?}", response_resource_create_2d);
// alloc continuous pages for the frame buffer
@ -274,8 +247,7 @@ fn setup_framebuffer(driver: &mut VirtIOGpu) {
} as usize;
mandelbrot(driver.rect.width, driver.rect.height, frame_buffer as *mut u32);
driver.frame_buffer = frame_buffer;
setup_rings(driver);
let mut request_resource_attach_backing = unsafe { &mut *(driver.queue_page[VIRTIO_QUEUE_TRANSMIT] as *mut VirtIOGpuResourceAttachBacking) };
let request_resource_attach_backing = unsafe { &mut *(driver.queue_buffer[VIRTIO_BUFFER_TRANSMIT] as *mut VirtIOGpuResourceAttachBacking) };
*request_resource_attach_backing = VirtIOGpuResourceAttachBacking {
header: VirtIOGpuCtrlHdr::with_type(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING),
resource_id: VIRTIO_GPU_RESOURCE_ID,
@ -284,88 +256,30 @@ fn setup_framebuffer(driver: &mut VirtIOGpu) {
length: size,
padding: 0
};
notify_device(driver);
let response_resource_attach_backing = unsafe { &mut *(driver.queue_page[VIRTIO_QUEUE_RECEIVE] as *mut VirtIOGpuCtrlHdr) };
debug!("response: {:?}", response_resource_attach_backing);
request(driver);
driver.queues[VIRTIO_QUEUE_TRANSMIT].get_block();
let response_resource_attach_backing = unsafe { &mut *(driver.queue_buffer[VIRTIO_BUFFER_RECEIVE] as *mut VirtIOGpuCtrlHdr) };
info!("response: {:?}", response_resource_attach_backing);
// map frame buffer to screen
setup_rings(driver);
let mut request_set_scanout = unsafe { &mut *(driver.queue_page[VIRTIO_QUEUE_TRANSMIT] as *mut VirtIOGpuSetScanout) };
let request_set_scanout = unsafe { &mut *(driver.queue_buffer[VIRTIO_BUFFER_TRANSMIT] as *mut VirtIOGpuSetScanout) };
*request_set_scanout = VirtIOGpuSetScanout {
header: VirtIOGpuCtrlHdr::with_type(VIRTIO_GPU_CMD_SET_SCANOUT),
rect: response_get_display_info.rect,
scanout_id: 0,
resource_id: VIRTIO_GPU_RESOURCE_ID
};
notify_device(driver);
let response_set_scanout = unsafe { &mut *(driver.queue_page[VIRTIO_QUEUE_RECEIVE] as *mut VirtIOGpuCtrlHdr) };
request(driver);
driver.queues[VIRTIO_QUEUE_TRANSMIT].get_block();
let response_set_scanout = unsafe { &mut *(driver.queue_buffer[VIRTIO_BUFFER_RECEIVE] as *mut VirtIOGpuCtrlHdr) };
info!("response: {:?}", response_set_scanout);
flush_frame_buffer_to_screen(driver);
}
// from Wikipedia
fn hsv_to_rgb(h: u32, s: f32, v: f32) -> (f32, f32, f32) {
let hi = (h / 60) % 6;
let f = (h % 60) as f32 / 60.0;
let p = v * (1.0 - s);
let q = v * (1.0 - f * s);
let t = v * (1.0 - (1.0 - f) * s);
match hi {
0 => (v, t, p),
1 => (q, v, p),
2 => (p, v, t),
3 => (p, q, v),
4 => (t, p, v),
5 => (v, p, q),
_ => unreachable!()
}
}
fn mandelbrot(width: u32, height: u32, frame_buffer: *mut u32) {
let size = width * height * 4;
let frame_buffer_data = unsafe {
slice::from_raw_parts_mut(frame_buffer as *mut u32, (size / 4) as usize)
};
for x in 0..width {
for y in 0..height {
let index = y * width + x;
let scale = 5e-3;
let xx = (x as f32 - width as f32 / 2.0) * scale;
let yy = (y as f32 - height as f32 / 2.0) * scale;
let mut re = xx as f32;
let mut im = yy as f32;
let mut iter: u32 = 0;
loop {
iter = iter + 1;
let new_re = re * re - im * im + xx as f32;
let new_im = re * im * 2.0 + yy as f32;
if new_re * new_re + new_im * new_im > 1e3 {
break;
}
re = new_re;
im = new_im;
if iter == 60 {
break;
}
}
iter = iter * 6;
let (r, g, b) = hsv_to_rgb(iter, 1.0, 0.5);
let rr = (r * 256.0) as u32;
let gg = (g * 256.0) as u32;
let bb = (b * 256.0) as u32;
let color = (bb << 16) | (gg << 8) | rr;
frame_buffer_data[index as usize] = color;
}
println!("working on x {}/{}", x, width);
}
}
fn flush_frame_buffer_to_screen(driver: &mut VirtIOGpu) {
// copy data from guest to host
setup_rings(driver);
let mut request_transfer_to_host_2d = unsafe { &mut *(driver.queue_page[VIRTIO_QUEUE_TRANSMIT] as *mut VirtIOGpuTransferToHost2D) };
let request_transfer_to_host_2d = unsafe { &mut *(driver.queue_buffer[VIRTIO_BUFFER_TRANSMIT] as *mut VirtIOGpuTransferToHost2D) };
*request_transfer_to_host_2d = VirtIOGpuTransferToHost2D {
header: VirtIOGpuCtrlHdr::with_type(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D),
rect: driver.rect,
@ -373,49 +287,43 @@ fn flush_frame_buffer_to_screen(driver: &mut VirtIOGpu) {
resource_id: VIRTIO_GPU_RESOURCE_ID,
padding: 0
};
notify_device(driver);
let response_transfer_to_host_2d = unsafe { &mut *(driver.queue_page[VIRTIO_QUEUE_RECEIVE] as *mut VirtIOGpuCtrlHdr) };
request(driver);
driver.queues[VIRTIO_QUEUE_TRANSMIT].get_block();
let response_transfer_to_host_2d = unsafe { &mut *(driver.queue_buffer[VIRTIO_BUFFER_RECEIVE] as *mut VirtIOGpuCtrlHdr) };
info!("response: {:?}", response_transfer_to_host_2d);
// flush data to screen
setup_rings(driver);
let mut request_resource_flush = unsafe { &mut *(driver.queue_page[VIRTIO_QUEUE_TRANSMIT] as *mut VirtIOGpuResourceFlush) };
let request_resource_flush = unsafe { &mut *(driver.queue_buffer[VIRTIO_BUFFER_TRANSMIT] as *mut VirtIOGpuResourceFlush) };
*request_resource_flush = VirtIOGpuResourceFlush {
header: VirtIOGpuCtrlHdr::with_type(VIRTIO_GPU_CMD_RESOURCE_FLUSH),
rect: driver.rect,
resource_id: VIRTIO_GPU_RESOURCE_ID,
padding: 0
};
notify_device(driver);
let response_resource_flush = unsafe { &mut *(driver.queue_page[VIRTIO_QUEUE_RECEIVE] as *mut VirtIOGpuCtrlHdr) };
request(driver);
driver.queues[VIRTIO_QUEUE_TRANSMIT].get_block();
let response_resource_flush = unsafe { &mut *(driver.queue_buffer[VIRTIO_BUFFER_RECEIVE] as *mut VirtIOGpuCtrlHdr) };
info!("response: {:?}", response_resource_flush);
}
pub fn virtio_gpu_init(node: &Node) {
let reg = node.prop_raw("reg").unwrap();
let from = reg.as_slice().read_be_u64(0).unwrap();
let mut header = unsafe { &mut *(from as *mut VirtIOHeader) };
let header = unsafe { &mut *(from as *mut VirtIOHeader) };
header.status.write(VirtIODeviceStatus::DRIVER.bits());
let mut device_features_bits: u64;
header.device_features_sel.write(0); // device features [0, 32)
device_features_bits = header.device_features.read().into();
header.device_features_sel.write(1); // device features [32, 64)
device_features_bits = device_features_bits + ((header.device_features.read() as u64) << 32);
let device_features_bits = header.read_device_features();
let device_features = VirtIOGpuFeature::from_bits_truncate(device_features_bits);
info!("Device features {:?}", device_features);
// negotiate these flags only
let supported_features = VirtIOGpuFeature::empty();
let driver_features = (device_features & supported_features).bits();
header.driver_features_sel.write(0); // driver features [0, 32)
header.driver_features.write((driver_features & 0xFFFFFFFF) as u32);
header.driver_features_sel.write(1); // driver features [32, 64)
header.driver_features.write(((driver_features & 0xFFFFFFFF00000000) >> 32) as u32);
header.write_driver_features(driver_features);
// read configuration space
let mut config = unsafe { &mut *((from + 0x100) as *mut VirtIOGpuConfig) };
let config = unsafe { &mut *((from + VIRTIO_CONFIG_SPACE_OFFSET) as *mut VirtIOGpuConfig) };
info!("Config: {:?}", config);
// virtio 4.2.4 Legacy interface
@ -427,48 +335,22 @@ pub fn virtio_gpu_init(node: &Node) {
interrupt: node.prop_u32("interrupts").unwrap(),
interrupt_parent: node.prop_u32("interrupt-parent").unwrap(),
header: from as usize,
queue_num,
queue_address: 0,
queue_page: [0, 0],
last_used_idx: 0,
queue_buffer: [0, 0],
frame_buffer: 0,
rect: VirtIOGpuRect::default()
rect: VirtIOGpuRect::default(),
queues: [VirtIOVirtqueue::new(header, VIRTIO_QUEUE_TRANSMIT, queue_num),
VirtIOVirtqueue::new(header, VIRTIO_QUEUE_CURSOR, queue_num)]
};
// 0 for control, 1 for cursor, we use controlq only
for queue in 0..2 {
header.queue_sel.write(queue);
assert_eq!(header.queue_pfn.read(), 0); // not in use
// 0 for transmit, 1 for receive
let queue_num_max = header.queue_num_max.read();
assert!(queue_num_max >= queue_num); // queue available
let size = virtqueue_size(queue_num as usize, PAGE_SIZE);
assert!(size % PAGE_SIZE == 0);
// alloc continuous pages
let address = unsafe {
HEAP_ALLOCATOR.alloc_zeroed(Layout::from_size_align(size, PAGE_SIZE).unwrap())
for buffer in 0..2 {
// allocate a page for each buffer
let page = unsafe {
HEAP_ALLOCATOR.alloc_zeroed(Layout::from_size_align(PAGE_SIZE, PAGE_SIZE).unwrap())
} as usize;
debug!("queue {} using page address {:#X} with size {}", queue, address as usize, size);
header.queue_num.write(queue_num);
header.queue_align.write(PAGE_SIZE as u32);
header.queue_pfn.write((address as u32) >> 12);
if queue == 0 {
driver.queue_address = address;
// 0 for transmit, 1 for receive
for buffer in 0..2 {
// allocate a page for each buffer
let page = unsafe {
HEAP_ALLOCATOR.alloc_zeroed(Layout::from_size_align(PAGE_SIZE, PAGE_SIZE).unwrap())
} as usize;
driver.queue_page[buffer as usize] = page;
debug!("buffer {} using page address {:#X}", buffer, page as usize);
}
}
header.queue_notify.write(queue);
driver.queue_buffer[buffer as usize] = page;
debug!("buffer {} using page address {:#X}", buffer, page as usize);
}
header.status.write(VirtIODeviceStatus::DRIVER_OK.bits());
setup_framebuffer(&mut driver);

@ -0,0 +1 @@
pub mod virtio_input;

@ -0,0 +1,210 @@
use alloc::prelude::*;
use alloc::vec;
use core::fmt;
use core::mem::size_of;
use core::mem::transmute_copy;
use core::slice;
use bitflags::*;
use device_tree::Node;
use device_tree::util::SliceRead;
use log::*;
use rcore_memory::PAGE_SIZE;
use rcore_memory::paging::PageTable;
use volatile::Volatile;
use crate::arch::cpu;
use crate::memory::active_table;
use super::super::{DeviceType, Driver, DRIVERS};
use super::super::bus::virtio_mmio::*;
struct VirtIOInput {
interrupt_parent: u32,
interrupt: u32,
header: usize,
// 0 for event, 1 for status
queues: [VirtIOVirtqueue; 2],
x: isize,
y: isize,
}
const VIRTIO_INPUT_CFG_UNSET: u8 = 0x00;
const VIRTIO_INPUT_CFG_ID_NAME: u8 = 0x01;
const VIRTIO_INPUT_CFG_ID_SERIAL: u8 = 0x02;
const VIRTIO_INPUT_CFG_ID_DEVIDS: u8 = 0x03;
const VIRTIO_INPUT_CFG_PROP_BITS: u8 = 0x10;
const VIRTIO_INPUT_CFG_EV_BITS: u8 = 0x11;
const VIRTIO_INPUT_CFG_ABS_INFO: u8 = 0x12;
#[repr(C)]
#[derive(Debug)]
struct VirtIOInputConfig {
select: Volatile<u8>,
subsel: Volatile<u8>,
size: u8,
reversed: [u8; 5],
data: [u8; 32]
}
#[repr(C)]
#[derive(Debug)]
struct VirtIOInputAbsInfo {
min: u32,
max: u32,
fuzz: u32,
flat: u32,
res: u32
}
#[repr(C)]
#[derive(Debug)]
struct VirtIOInputDevIDs {
bustype: u16,
vendor: u16,
product: u16,
version: u16
}
#[repr(C)]
#[derive(Clone, Default)]
struct VirtIOInputEvent {
event_type: u16,
code: u16,
value: u32
}
impl fmt::Display for VirtIOInputEvent {
// linux event codes
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.event_type {
0 => {
match self.code {
0 => write!(f, "SYN_REPORT"),
_ => write!(f, "Unknown SYN code {}", self.code)
}
}
2 => {
match self.code {
0 => {
write!(f, "REL_X {}", self.value)
}
1 => {
write!(f, "REL_Y {}", self.value)
}
_ => write!(f, "Unknown REL code {}", self.code)
}
}
_ => write!(f, "Unknown event type {}", self.event_type)
}
}
}
bitflags! {
struct VirtIOInputFeature : u64 {
// device independent
const NOTIFY_ON_EMPTY = 1 << 24; // legacy
const ANY_LAYOUT = 1 << 27; // legacy
const RING_INDIRECT_DESC = 1 << 28;
const RING_EVENT_IDX = 1 << 29;
const UNUSED = 1 << 30; // legacy
const VERSION_1 = 1 << 32; // detect legacy
const ACCESS_PLATFORM = 1 << 33; // since virtio v1.1
const RING_PACKED = 1 << 34; // since virtio v1.1
const IN_ORDER = 1 << 35; // since virtio v1.1
const ORDER_PLATFORM = 1 << 36; // since virtio v1.1
const SR_IOV = 1 << 37; // since virtio v1.1
const NOTIFICATION_DATA = 1 << 38; // since virtio v1.1
}
}
const VIRTIO_QUEUE_EVENT: usize = 0;
const VIRTIO_QUEUE_STATUS: usize = 1;
impl Driver for VirtIOInput {
fn try_handle_interrupt(&mut self) -> bool {
// for simplicity
if cpu::id() > 0 {
return false
}
// ensure header page is mapped
active_table().map_if_not_exists(self.header as usize, self.header as usize);
let header = unsafe { &mut *(self.header as *mut VirtIOHeader) };
let interrupt = header.interrupt_status.read();
if interrupt != 0 {
header.interrupt_ack.write(interrupt);
debug!("Got interrupt {:?}", interrupt);
loop {
if let Some((input, output, _, _)) = self.queues[VIRTIO_QUEUE_EVENT].get() {
let event: VirtIOInputEvent = unsafe { transmute_copy(&input[0][0]) };
if event.event_type == 2 && event.code == 0 {
// X
self.x += event.value as isize;
} else if event.event_type == 2 && event.code == 1 {
// X
self.y += event.value as isize;
}
trace!("got {}", event);
self.queues[VIRTIO_QUEUE_EVENT].add(&input, &output, 0);
} else {
break;
}
}
println!("mouse is at x {} y {}", self.x, self.y);
return true;
}
return false;
}
fn device_type(&self) -> DeviceType {
DeviceType::Input
}
}
pub fn virtio_input_init(node: &Node) {
let reg = node.prop_raw("reg").unwrap();
let from = reg.as_slice().read_be_u64(0).unwrap();
let header = unsafe { &mut *(from as *mut VirtIOHeader) };
header.status.write(VirtIODeviceStatus::DRIVER.bits());
let device_features_bits = header.read_device_features();
let device_features = VirtIOInputFeature::from_bits_truncate(device_features_bits);
println!("Device features {:?}", device_features);
// negotiate these flags only
let supported_features = VirtIOInputFeature::empty();
let driver_features = (device_features & supported_features).bits();
header.write_driver_features(driver_features);
// read configuration space
let config = unsafe { &mut *((from + VIRTIO_CONFIG_SPACE_OFFSET) as *mut VirtIOInputConfig) };
info!("Config: {:?}", config);
// virtio 4.2.4 Legacy interface
// configure two virtqueues: ingress and egress
header.guest_page_size.write(PAGE_SIZE as u32); // one page
let queue_num = 32;
let mut driver = VirtIOInput {
interrupt: node.prop_u32("interrupts").unwrap(),
interrupt_parent: node.prop_u32("interrupt-parent").unwrap(),
header: from as usize,
queues: [VirtIOVirtqueue::new(header, VIRTIO_QUEUE_EVENT, queue_num),
VirtIOVirtqueue::new(header, VIRTIO_QUEUE_STATUS, queue_num)],
x: 0,
y: 0
};
let buffer = vec![VirtIOInputEvent::default(); queue_num];
let input_buffers: &mut [VirtIOInputEvent] = Box::leak(buffer.into_boxed_slice());
for i in 0..queue_num {
let buffer = unsafe { slice::from_raw_parts((&input_buffers[i]) as *const VirtIOInputEvent as *const u8, size_of::<VirtIOInputEvent>()) };
driver.queues[VIRTIO_QUEUE_EVENT].add(&[buffer], &[], 0);
}
header.status.write(VirtIODeviceStatus::DRIVER_OK.bits());
DRIVERS.lock().push(Box::new(driver));
}

@ -9,14 +9,18 @@ use crate::sync::SpinNoIrqLock;
mod device_tree;
pub mod bus;
pub mod net;
pub mod block;
mod gpu;
mod input;
pub enum DeviceType {
Net,
Gpu
Gpu,
Input,
Block
}
pub trait Driver : Send {
pub trait Driver : Send + AsAny {
// if interrupt belongs to this driver, handle it and return true
// return false otherwise
fn try_handle_interrupt(&mut self) -> bool;
@ -25,7 +29,7 @@ pub trait Driver : Send {
fn device_type(&self) -> DeviceType;
}
pub trait NetDriver: Driver + AsAny {
pub trait NetDriver: Driver {
// get mac address for this device
fn get_mac(&self) -> EthernetAddress;

@ -31,11 +31,8 @@ pub struct VirtIONet {
interrupt: u32,
header: usize,
mac: EthernetAddress,
queue_num: u32,
// 0 for receive, 1 for transmit
queue_address: [usize; 2],
queue_page: [usize; 2],
last_used_idx: [u16; 2],
queues: [VirtIOVirtqueue; 2],
}
#[derive(Clone)]
@ -51,33 +48,17 @@ impl Driver for VirtIONetDriver {
return false
}
let mut driver = self.0.lock();
let driver = self.0.lock();
// ensure header page is mapped
active_table().map_if_not_exists(driver.header as usize, driver.header as usize);
let mut header = unsafe { &mut *(driver.header as *mut VirtIOHeader) };
let header = unsafe { &mut *(driver.header as *mut VirtIOHeader) };
let interrupt = header.interrupt_status.read();
if interrupt != 0 {
header.interrupt_ack.write(interrupt);
let interrupt_status = VirtIONetworkInterruptStatus::from_bits_truncate(interrupt);
debug!("Got interrupt {:?}", interrupt_status);
if interrupt_status.contains(VirtIONetworkInterruptStatus::USED_RING_UPDATE) {
// need to change when queue_num is larger than one
let queue = VIRTIO_QUEUE_TRANSMIT;
let used_ring_offset = virtqueue_used_elem_offset(driver.queue_num as usize, PAGE_SIZE);
let mut used_ring = unsafe {
&mut *((driver.queue_address[queue] + used_ring_offset) as *mut VirtIOVirtqueueUsedRing)
};
if driver.last_used_idx[queue] < used_ring.idx.read() {
assert_eq!(driver.last_used_idx[queue], used_ring.idx.read() - 1);
info!("Processing queue {} from {} to {}", queue, driver.last_used_idx[queue], used_ring.idx.read());
driver.last_used_idx[queue] = used_ring.idx.read();
}
} else if interrupt_status.contains(VirtIONetworkInterruptStatus::CONFIGURATION_CHANGE) {
// TODO: update mac and status
unimplemented!("virtio-net configuration change not implemented");
}
return true;
} else {
@ -92,22 +73,12 @@ impl Driver for VirtIONetDriver {
impl VirtIONet {
fn transmit_available(&self) -> bool {
let used_ring_offset = virtqueue_used_elem_offset(self.queue_num as usize, PAGE_SIZE);
let mut used_ring = unsafe {
&mut *((self.queue_address[VIRTIO_QUEUE_TRANSMIT] + used_ring_offset) as *mut VirtIOVirtqueueUsedRing)
};
let result = self.last_used_idx[VIRTIO_QUEUE_TRANSMIT] == used_ring.idx.read();
result
self.queues[VIRTIO_QUEUE_TRANSMIT].can_add(1, 0)
}
fn receive_available(&self) -> bool {
let used_ring_offset = virtqueue_used_elem_offset(self.queue_num as usize, PAGE_SIZE);
let mut used_ring = unsafe {
&mut *((self.queue_address[VIRTIO_QUEUE_RECEIVE] + used_ring_offset) as *mut VirtIOVirtqueueUsedRing)
};
let result = self.last_used_idx[VIRTIO_QUEUE_RECEIVE] < used_ring.idx.read();
result
self.queues[VIRTIO_QUEUE_RECEIVE].can_get()
}
}
@ -132,7 +103,7 @@ impl<'a> phy::Device<'a> for VirtIONetDriver {
fn receive(&'a mut self) -> Option<(Self::RxToken, Self::TxToken)> {
let driver = self.0.lock();
if driver.transmit_available() && driver.receive_available() {
// ugly borrow rules bypass
// potential racing
Some((VirtIONetRxToken(self.clone()),
VirtIONetTxToken(self.clone())))
} else {
@ -158,36 +129,22 @@ impl<'a> phy::Device<'a> for VirtIONetDriver {
}
impl phy::RxToken for VirtIONetRxToken {
fn consume<R, F>(self, timestamp: Instant, f: F) -> Result<R>
fn consume<R, F>(self, _timestamp: Instant, f: F) -> Result<R>
where F: FnOnce(&[u8]) -> Result<R>
{
let buffer = {
let (input, output, _, user_data) = {
let mut driver = (self.0).0.lock();
// ensure header page is mapped
active_table().map_if_not_exists(driver.header as usize, driver.header as usize);
let mut header = unsafe { &mut *(driver.header as *mut VirtIOHeader) };
let used_ring_offset = virtqueue_used_elem_offset(driver.queue_num as usize, PAGE_SIZE);
let mut used_ring = unsafe {
&mut *((driver.queue_address[VIRTIO_QUEUE_RECEIVE] + used_ring_offset) as *mut VirtIOVirtqueueUsedRing)
};
assert!(driver.last_used_idx[VIRTIO_QUEUE_RECEIVE] == used_ring.idx.read() - 1);
driver.last_used_idx[VIRTIO_QUEUE_RECEIVE] = used_ring.idx.read();
let mut payload = unsafe { slice::from_raw_parts_mut((driver.queue_page[VIRTIO_QUEUE_RECEIVE] + size_of::<VirtIONetHeader>()) as *mut u8, PAGE_SIZE - 10)};
let buffer = payload.to_vec();
for i in 0..(PAGE_SIZE - size_of::<VirtIONetHeader>()) {
payload[i] = 0;
}
let mut ring = unsafe {
&mut *((driver.queue_address[VIRTIO_QUEUE_RECEIVE] + size_of::<VirtIOVirtqueueDesc>() * driver.queue_num as usize) as *mut VirtIOVirtqueueAvailableRing)
};
ring.idx.write(ring.idx.read() + 1);
header.queue_notify.write(VIRTIO_QUEUE_RECEIVE as u32);
buffer
driver.queues[VIRTIO_QUEUE_RECEIVE].get().unwrap()
};
f(&buffer)
let result = f(&input[0][size_of::<VirtIONetHeader>()..]);
let mut driver = (self.0).0.lock();
driver.queues[VIRTIO_QUEUE_RECEIVE].add_and_notify(&input, &output, user_data);
result
}
}
@ -195,32 +152,28 @@ impl phy::TxToken for VirtIONetTxToken {
fn consume<R, F>(self, _timestamp: Instant, len: usize, f: F) -> Result<R>
where F: FnOnce(&mut [u8]) -> Result<R>,
{
let mut driver = (self.0).0.lock();
// ensure header page is mapped
active_table().map_if_not_exists(driver.header as usize, driver.header as usize);
let output = {
let mut driver = (self.0).0.lock();
let mut header = unsafe { &mut *(driver.header as *mut VirtIOHeader) };
let payload_target = unsafe { slice::from_raw_parts_mut((driver.queue_page[VIRTIO_QUEUE_TRANSMIT] + size_of::<VirtIONetHeader>()) as *mut u8, len)};
let result = f(payload_target);
let mut net_header = unsafe { &mut *(driver.queue_page[VIRTIO_QUEUE_TRANSMIT] as *mut VirtIONetHeader) };
// ensure header page is mapped
active_table().map_if_not_exists(driver.header as usize, driver.header as usize);
let mut header = unsafe { &mut *(driver.header as *mut VirtIOHeader) };
let mut ring = unsafe {
&mut *((driver.queue_address[VIRTIO_QUEUE_TRANSMIT] + size_of::<VirtIOVirtqueueDesc>() * driver.queue_num as usize) as *mut VirtIOVirtqueueAvailableRing)
if let Some((_, output, _, _)) = driver.queues[VIRTIO_QUEUE_TRANSMIT].get() {
unsafe { slice::from_raw_parts_mut(output[0].as_ptr() as *mut u8, output[0].len())}
} else {
// allocate a page for buffer
let page = unsafe {
HEAP_ALLOCATOR.alloc_zeroed(Layout::from_size_align(PAGE_SIZE, PAGE_SIZE).unwrap())
} as usize;
unsafe { slice::from_raw_parts_mut(page as *mut u8, PAGE_SIZE) }
}
};
let output_buffer = &mut output[size_of::<VirtIONetHeader>()..(size_of::<VirtIONetHeader>() +len)];
let result = f(output_buffer);
println!("output {:?}", output_buffer);
// re-add buffer to desc
let mut desc = unsafe { &mut *(driver.queue_address[VIRTIO_QUEUE_TRANSMIT] as *mut VirtIOVirtqueueDesc) };
desc.addr.write(driver.queue_page[VIRTIO_QUEUE_TRANSMIT] as u64);
desc.len.write((len + size_of::<VirtIONetHeader>()) as u32);
// memory barrier
fence(Ordering::SeqCst);
// add desc to available ring
ring.idx.write(ring.idx.read() + 1);
header.queue_notify.write(VIRTIO_QUEUE_TRANSMIT as u32);
let mut driver = (self.0).0.lock();
assert!(driver.queues[VIRTIO_QUEUE_TRANSMIT].add_and_notify(&[], &[output], 0));
result
}
}
@ -296,108 +249,49 @@ struct VirtIONetHeader {
pub fn virtio_net_init(node: &Node) {
let reg = node.prop_raw("reg").unwrap();
let from = reg.as_slice().read_be_u64(0).unwrap();
let mut header = unsafe { &mut *(from as *mut VirtIOHeader) };
let header = unsafe { &mut *(from as *mut VirtIOHeader) };
header.status.write(VirtIODeviceStatus::DRIVER.bits());
let mut device_features_bits: u64;
header.device_features_sel.write(0); // device features [0, 32)
device_features_bits = header.device_features.read().into();
header.device_features_sel.write(1); // device features [32, 64)
device_features_bits = device_features_bits + ((header.device_features.read() as u64) << 32);
let device_features_bits = header.read_device_features();
let device_features = VirtIONetFeature::from_bits_truncate(device_features_bits);
debug!("Device features {:?}", device_features);
// negotiate these flags only
let supported_features = VirtIONetFeature::MAC | VirtIONetFeature::STATUS;
let driver_features = (device_features & supported_features).bits();
header.driver_features_sel.write(0); // driver features [0, 32)
header.driver_features.write((driver_features & 0xFFFFFFFF) as u32);
header.driver_features_sel.write(1); // driver features [32, 64)
header.driver_features.write(((driver_features & 0xFFFFFFFF00000000) >> 32) as u32);
header.write_driver_features(driver_features);
// read configuration space
let mut mac: [u8; 6];
let mut status: VirtIONetworkStatus;
let mut config = unsafe { &mut *((from + 0x100) as *mut VirtIONetworkConfig) };
mac = config.mac;
status = VirtIONetworkStatus::from_bits_truncate(config.status.read());
let config = unsafe { &mut *((from + VIRTIO_CONFIG_SPACE_OFFSET) as *mut VirtIONetworkConfig) };
let mac = config.mac;
let status = VirtIONetworkStatus::from_bits_truncate(config.status.read());
debug!("Got MAC address {:?} and status {:?}", mac, status);
// virtio 4.2.4 Legacy interface
// configure two virtqueues: ingress and egress
header.guest_page_size.write(PAGE_SIZE as u32); // one page
let queue_num = 1; // for simplicity
let queue_num = 2; // for simplicity
let mut driver = VirtIONet {
interrupt: node.prop_u32("interrupts").unwrap(),
interrupt_parent: node.prop_u32("interrupt-parent").unwrap(),
header: from as usize,
mac: EthernetAddress(mac),
queue_num: queue_num,
queue_address: [0, 0],
queue_page: [0, 0],
last_used_idx: [0, 0],
queues: [VirtIOVirtqueue::new(header, VIRTIO_QUEUE_RECEIVE, queue_num),
VirtIOVirtqueue::new(header, VIRTIO_QUEUE_TRANSMIT, queue_num)],
};
// 0 for receive, 1 for transmit
for queue in 0..2 {
header.queue_sel.write(queue as u32);
assert_eq!(header.queue_pfn.read(), 0); // not in use
let queue_num_max = header.queue_num_max.read();
assert!(queue_num_max >= queue_num); // queue available
let size = virtqueue_size(queue_num as usize, PAGE_SIZE);
assert!(size % PAGE_SIZE == 0);
// alloc continuous pages
let address = unsafe {
HEAP_ALLOCATOR.alloc_zeroed(Layout::from_size_align(size, PAGE_SIZE).unwrap())
} as usize;
driver.queue_address[queue] = address;
debug!("queue {} using page address {:#X} with size {}", queue, address as usize, size);
header.queue_num.write(queue_num);
header.queue_align.write(PAGE_SIZE as u32);
header.queue_pfn.write((address as u32) >> 12);
// allocate a page for buffer
let page = unsafe {
HEAP_ALLOCATOR.alloc_zeroed(Layout::from_size_align(PAGE_SIZE, PAGE_SIZE).unwrap())
} as usize;
driver.queue_page[queue] = page;
// fill first desc
let mut desc = unsafe { &mut *(address as *mut VirtIOVirtqueueDesc) };
desc.addr.write(page as u64);
desc.len.write(PAGE_SIZE as u32);
if queue == VIRTIO_QUEUE_RECEIVE {
// device writable
desc.flags.write(VirtIOVirtqueueFlag::WRITE.bits());
} else if queue == VIRTIO_QUEUE_TRANSMIT {
// driver readable
desc.flags.write(0);
}
// memory barrier
fence(Ordering::SeqCst);
if queue == VIRTIO_QUEUE_RECEIVE {
// add the desc to the ring
let mut ring = unsafe {
&mut *((address + size_of::<VirtIOVirtqueueDesc>() * queue_num as usize) as *mut VirtIOVirtqueueAvailableRing)
};
ring.ring[0].write(0);
// wait for first packet
ring.idx.write(ring.idx.read() + 1);
}
// notify device about the new buffer
header.queue_notify.write(queue as u32);
debug!("queue {} using page address {:#X}", queue, page);
}
// allocate a page for buffer
let page = unsafe {
HEAP_ALLOCATOR.alloc_zeroed(Layout::from_size_align(PAGE_SIZE, PAGE_SIZE).unwrap())
} as usize;
let input = unsafe { slice::from_raw_parts(page as *const u8, PAGE_SIZE) };
driver.queues[VIRTIO_QUEUE_RECEIVE].add_and_notify(&[input], &[], 0);
header.status.write(VirtIODeviceStatus::DRIVER_OK.bits());
let mut net_driver = VirtIONetDriver(Arc::new(Mutex::new(driver)));
let net_driver = VirtIONetDriver(Arc::new(Mutex::new(driver)));
DRIVERS.lock().push(Box::new(net_driver.clone()));
NET_DRIVERS.lock().push(Box::new(net_driver));

@ -1,25 +1,40 @@
use simple_filesystem::*;
use alloc::{boxed::Box, sync::Arc, string::String, collections::VecDeque, vec::Vec};
use core::any::Any;
use core::ops::Deref;
use lazy_static::lazy_static;
#[cfg(target_arch = "x86_64")]
use crate::arch::driver::ide;
use crate::sync::Condvar;
use crate::sync::SpinNoIrqLock as Mutex;
use crate::drivers::{self, AsAny};
use crate::drivers::block::virtio_blk::VirtIOBlkDriver;
lazy_static! {
pub static ref ROOT_INODE: Arc<INode> = {
#[cfg(any(target_arch = "riscv32", target_arch = "riscv64", target_arch = "aarch64"))]
#[cfg(not(feature = "link_user"))]
let device = {
#[cfg(any(target_arch = "riscv32", target_arch = "riscv64"))]
{
Box::new(drivers::DRIVERS.lock().iter()
.map(|device| device.deref().as_any().downcast_ref::<VirtIOBlkDriver>())
.find(|maybe_blk| maybe_blk.is_some())
.expect("VirtIOBlk not found")
.unwrap().clone())
}
#[cfg(target_arch = "x86_64")]
{
Box::new(ide::IDE::new(1))
}
};
#[cfg(feature = "link_user")]
let device = {
extern {
fn _user_img_start();
fn _user_img_end();
}
// Hard link user program
Box::new(unsafe { MemBuf::new(_user_img_start, _user_img_end) })
};
#[cfg(target_arch = "x86_64")]
let device = Box::new(ide::IDE::new(1));
let sfs = SimpleFileSystem::open(device).expect("failed to open SFS");
sfs.root_inode()

@ -17,7 +17,7 @@ pub extern fn server(_arg: usize) -> ! {
}
}
let mut driver = {
let driver = {
let ref_driver = &mut *NET_DRIVERS.lock()[0];
ref_driver.as_any().downcast_ref::<VirtIONetDriver>().unwrap().clone()
};
@ -60,7 +60,7 @@ pub extern fn server(_arg: usize) -> ! {
}
let client = match socket.recv() {
Ok((data, endpoint)) => {
Ok((_, endpoint)) => {
Some(endpoint)
}
Err(_) => None

Loading…
Cancel
Save