cherry-pick jiegec's work (#3) from lab8-rv32: virtio drivers, network stack
Read and parse dtb upon boot Implement virtio net device detection Negotiate feature bits for virtio-net Read MAC address from virtio-net device and detect virtqueues Receiving from virtio net device is working for the first time Implement driver interface and interrupt handling routines Rearrange drivers into upper directory Implement initial support for processing arp request and reply packet Refactor MAC address and IPv4 address into structs, and implement ICMP echo reply Fix typos Implement initial support for virtio gpu driver Complete first working version of virtio gpu driver with mandelbrot example Use smoltcp and implement a udp and tcp server on top of it Cleanup virtio net codemaster
parent
353850f41d
commit
047f4ffdcc
@ -0,0 +1 @@
|
||||
pub mod virtio_mmio;
|
@ -0,0 +1,147 @@
|
||||
use core::mem::size_of;
|
||||
|
||||
use bitflags::*;
|
||||
use device_tree::Node;
|
||||
use device_tree::util::SliceRead;
|
||||
use log::*;
|
||||
use rcore_memory::paging::PageTable;
|
||||
use volatile::{ReadOnly, Volatile, WriteOnly};
|
||||
|
||||
use crate::memory::active_table;
|
||||
|
||||
use super::super::gpu::virtio_gpu;
|
||||
use super::super::net::virtio_net;
|
||||
|
||||
// virtio 4.2.4 Legacy interface
|
||||
#[repr(packed)]
|
||||
#[derive(Debug)]
|
||||
pub struct VirtIOHeader {
|
||||
magic: ReadOnly<u32>, // 0x000
|
||||
version: ReadOnly<u32>, // 0x004
|
||||
device_id: ReadOnly<u32>, // 0x008
|
||||
vendor_id: ReadOnly<u32>, // 0x00c
|
||||
pub device_features: ReadOnly<u32>, // 0x010
|
||||
pub device_features_sel: WriteOnly<u32>, // 0x014
|
||||
__r1: [ReadOnly<u32>; 2],
|
||||
pub driver_features: WriteOnly<u32>, // 0x020
|
||||
pub driver_features_sel: WriteOnly<u32>, // 0x024
|
||||
pub guest_page_size: WriteOnly<u32>, // 0x028
|
||||
__r2: ReadOnly<u32>,
|
||||
pub queue_sel: WriteOnly<u32>, // 0x030
|
||||
pub queue_num_max: ReadOnly<u32>, // 0x034
|
||||
pub queue_num: WriteOnly<u32>, // 0x038
|
||||
pub queue_align: WriteOnly<u32>, // 0x03c
|
||||
pub queue_pfn: Volatile<u32>, // 0x040
|
||||
queue_ready: Volatile<u32>, // new interface only
|
||||
__r3: [ReadOnly<u32>; 2],
|
||||
pub queue_notify: WriteOnly<u32>, // 0x050
|
||||
__r4: [ReadOnly<u32>; 3],
|
||||
pub interrupt_status: ReadOnly<u32>, // 0x060
|
||||
pub interrupt_ack: WriteOnly<u32>, // 0x064
|
||||
__r5: [ReadOnly<u32>; 2],
|
||||
pub status: Volatile<u32>, // 0x070
|
||||
__r6: [ReadOnly<u32>; 3],
|
||||
queue_desc_low: WriteOnly<u32>, // new interface only since here
|
||||
queue_desc_high: WriteOnly<u32>,
|
||||
__r7: [ReadOnly<u32>; 2],
|
||||
queue_avail_low: WriteOnly<u32>,
|
||||
queue_avail_high: WriteOnly<u32>,
|
||||
__r8: [ReadOnly<u32>; 2],
|
||||
queue_used_low: WriteOnly<u32>,
|
||||
queue_used_high: WriteOnly<u32>,
|
||||
__r9: [ReadOnly<u32>; 21],
|
||||
config_generation: ReadOnly<u32>
|
||||
}
|
||||
|
||||
bitflags! {
|
||||
pub struct VirtIODeviceStatus : u32 {
|
||||
const ACKNOWLEDGE = 1;
|
||||
const DRIVER = 2;
|
||||
const FAILED = 128;
|
||||
const FEATURES_OK = 8;
|
||||
const DRIVER_OK = 4;
|
||||
const DEVICE_NEEDS_RESET = 64;
|
||||
}
|
||||
}
|
||||
|
||||
#[repr(packed)]
|
||||
#[derive(Debug)]
|
||||
pub struct VirtIOVirtqueueDesc {
|
||||
pub addr: Volatile<u64>,
|
||||
pub len: Volatile<u32>,
|
||||
pub flags: Volatile<u16>,
|
||||
pub next: Volatile<u16>
|
||||
}
|
||||
|
||||
bitflags! {
|
||||
pub struct VirtIOVirtqueueFlag : u16 {
|
||||
const NEXT = 1;
|
||||
const WRITE = 2;
|
||||
const INDIRECT = 4;
|
||||
}
|
||||
}
|
||||
|
||||
#[repr(packed)]
|
||||
#[derive(Debug)]
|
||||
pub struct VirtIOVirtqueueAvailableRing {
|
||||
pub flags: Volatile<u16>,
|
||||
pub idx: Volatile<u16>,
|
||||
pub ring: [Volatile<u16>; 32], // actual size: queue_size
|
||||
used_event: Volatile<u16>
|
||||
}
|
||||
|
||||
#[repr(packed)]
|
||||
#[derive(Debug)]
|
||||
pub struct VirtIOVirtqueueUsedElem {
|
||||
id: Volatile<u32>,
|
||||
len: Volatile<u32>
|
||||
}
|
||||
|
||||
#[repr(packed)]
|
||||
#[derive(Debug)]
|
||||
pub struct VirtIOVirtqueueUsedRing {
|
||||
pub flags: Volatile<u16>,
|
||||
pub idx: Volatile<u16>,
|
||||
pub ring: [VirtIOVirtqueueUsedElem; 32], // actual size: queue_size
|
||||
avail_event: Volatile<u16>
|
||||
}
|
||||
|
||||
// virtio 2.4.2 Legacy Interfaces: A Note on Virtqueue Layout
|
||||
pub fn virtqueue_size(num: usize, align: usize) -> usize {
|
||||
(((size_of::<VirtIOVirtqueueDesc>() * num + size_of::<u16>() * (3 + num)) + align) & !(align-1)) +
|
||||
(((size_of::<u16>() * 3 + size_of::<VirtIOVirtqueueUsedElem>() * num) + align) & !(align-1))
|
||||
}
|
||||
|
||||
pub fn virtqueue_used_elem_offset(num: usize, align: usize) -> usize {
|
||||
((size_of::<VirtIOVirtqueueDesc>() * num + size_of::<u16>() * (3 + num)) + align) & !(align-1)
|
||||
}
|
||||
|
||||
pub fn virtio_probe(node: &Node) {
|
||||
if let Some(reg) = node.prop_raw("reg") {
|
||||
let from = reg.as_slice().read_be_u64(0).unwrap();
|
||||
let size = reg.as_slice().read_be_u64(8).unwrap();
|
||||
// assuming one page
|
||||
active_table().map(from as usize, from as usize);
|
||||
let mut header = unsafe { &mut *(from as *mut VirtIOHeader) };
|
||||
let magic = header.magic.read();
|
||||
let version = header.version.read();
|
||||
let device_id = header.device_id.read();
|
||||
// only support legacy device
|
||||
if magic == 0x74726976 && version == 1 && device_id != 0 { // "virt" magic
|
||||
info!("Detected virtio net device with vendor id {:#X}", header.vendor_id.read());
|
||||
info!("Device tree node {:?}", node);
|
||||
// virtio 3.1.1 Device Initialization
|
||||
header.status.write(0);
|
||||
header.status.write(VirtIODeviceStatus::ACKNOWLEDGE.bits());
|
||||
if device_id == 1 { // net device
|
||||
virtio_net::virtio_net_init(node);
|
||||
} else if device_id == 16 { // gpu device
|
||||
virtio_gpu::virtio_gpu_init(node);
|
||||
} else {
|
||||
println!("Unrecognized virtio device {}", device_id);
|
||||
}
|
||||
} else {
|
||||
active_table().unmap(from as usize);
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,36 @@
|
||||
use core::slice;
|
||||
|
||||
use device_tree::{DeviceTree, Node};
|
||||
|
||||
use super::bus::virtio_mmio::virtio_probe;
|
||||
|
||||
const DEVICE_TREE_MAGIC: u32 = 0xd00dfeed;
|
||||
|
||||
fn walk_dt_node(dt: &Node) {
|
||||
if let Ok(compatible) = dt.prop_str("compatible") {
|
||||
// TODO: query this from table
|
||||
if compatible == "virtio,mmio" {
|
||||
virtio_probe(dt);
|
||||
}
|
||||
}
|
||||
for child in dt.children.iter() {
|
||||
walk_dt_node(child);
|
||||
}
|
||||
}
|
||||
|
||||
struct DtbHeader {
|
||||
magic: u32,
|
||||
size: u32,
|
||||
}
|
||||
|
||||
pub fn init(dtb: usize) {
|
||||
let header = unsafe {&*(dtb as *const DtbHeader)};
|
||||
let magic = u32::from_be(header.magic);
|
||||
if magic == DEVICE_TREE_MAGIC {
|
||||
let size = u32::from_be(header.size);
|
||||
let dtb_data = unsafe { slice::from_raw_parts(dtb as *const u8, size as usize) };
|
||||
if let Ok(dt) = DeviceTree::load(dtb_data) {
|
||||
walk_dt_node(&dt.root);
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1 @@
|
||||
pub mod virtio_gpu;
|
@ -0,0 +1,477 @@
|
||||
use alloc::alloc::{GlobalAlloc, Layout};
|
||||
use alloc::prelude::*;
|
||||
use core::mem::size_of;
|
||||
use core::slice;
|
||||
|
||||
use bitflags::*;
|
||||
use device_tree::Node;
|
||||
use device_tree::util::SliceRead;
|
||||
use log::*;
|
||||
use rcore_memory::PAGE_SIZE;
|
||||
use rcore_memory::paging::PageTable;
|
||||
use volatile::{ReadOnly, Volatile, WriteOnly};
|
||||
|
||||
use crate::arch::cpu;
|
||||
use crate::HEAP_ALLOCATOR;
|
||||
use crate::memory::active_table;
|
||||
|
||||
use super::super::{DeviceType, Driver, DRIVERS};
|
||||
use super::super::bus::virtio_mmio::*;
|
||||
|
||||
const VIRTIO_GPU_EVENT_DISPLAY : u32 = 1 << 0;
|
||||
|
||||
struct VirtIOGpu {
|
||||
interrupt_parent: u32,
|
||||
interrupt: u32,
|
||||
header: usize,
|
||||
// 0 for transmit, 1 for cursor
|
||||
queue_num: u32,
|
||||
queue_address: usize,
|
||||
queue_page: [usize; 2],
|
||||
last_used_idx: u16,
|
||||
frame_buffer: usize,
|
||||
rect: VirtIOGpuRect
|
||||
}
|
||||
|
||||
#[repr(packed)]
|
||||
#[derive(Debug)]
|
||||
struct VirtIOGpuConfig {
|
||||
events_read: ReadOnly<u32>,
|
||||
events_clear: WriteOnly<u32>,
|
||||
num_scanouts: Volatile<u32>
|
||||
}
|
||||
|
||||
bitflags! {
|
||||
struct VirtIOGpuFeature : u64 {
|
||||
const VIRGL = 1 << 0;
|
||||
const EDID = 1 << 1;
|
||||
// device independent
|
||||
const NOFIFY_ON_EMPTY = 1 << 24; // legacy
|
||||
const ANY_LAYOUT = 1 << 27; // legacy
|
||||
const RING_INDIRECT_DESC = 1 << 28;
|
||||
const RING_EVENT_IDX = 1 << 29;
|
||||
const UNUSED = 1 << 30; // legacy
|
||||
const VERSION_1 = 1 << 32; // detect legacy
|
||||
const ACCESS_PLATFORM = 1 << 33; // since virtio v1.1
|
||||
const RING_PACKED = 1 << 34; // since virtio v1.1
|
||||
const IN_ORDER = 1 << 35; // since virtio v1.1
|
||||
const ORDER_PLATFORM = 1 << 36; // since virtio v1.1
|
||||
const SR_IOV = 1 << 37; // since virtio v1.1
|
||||
const NOTIFICATION_DATA = 1 << 38; // since virtio v1.1
|
||||
}
|
||||
}
|
||||
|
||||
const VIRTIO_GPU_CMD_GET_DISPLAY_INFO : u32 = 0x100;
|
||||
const VIRTIO_GPU_CMD_RESOURCE_CREATE_2D : u32 = 0x101;
|
||||
const VIRTIO_GPU_CMD_RESOURCE_UNREF : u32 = 0x102;
|
||||
const VIRTIO_GPU_CMD_SET_SCANOUT : u32 = 0x103;
|
||||
const VIRTIO_GPU_CMD_RESOURCE_FLUSH : u32 = 0x104;
|
||||
const VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D : u32 = 0x105;
|
||||
const VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING : u32 = 0x106;
|
||||
const VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING : u32 = 0x107;
|
||||
const VIRTIO_GPU_CMD_GET_CAPSET_INFO : u32 = 0x108;
|
||||
const VIRTIO_GPU_CMD_GET_CAPSET : u32 = 0x109;
|
||||
const VIRTIO_GPU_CMD_GET_EDID : u32 = 0x10a;
|
||||
|
||||
const VIRTIO_GPU_CMD_UPDATE_CURSOR : u32 = 0x300;
|
||||
const VIRTIO_GPU_CMD_MOVE_CURSOR : u32 = 0x301;
|
||||
|
||||
const VIRTIO_GPU_RESP_OK_NODATA : u32 = 0x1100;
|
||||
const VIRTIO_GPU_RESP_OK_DISPLAY_INFO : u32 = 0x1101;
|
||||
const VIRTIO_GPU_RESP_OK_CAPSET_INFO : u32 = 0x1102;
|
||||
const VIRTIO_GPU_RESP_OK_CAPSET : u32 = 0x1103;
|
||||
const VIRTIO_GPU_RESP_OK_EDID : u32 = 0x1104;
|
||||
|
||||
const VIRTIO_GPU_RESP_ERR_UNSPEC : u32 = 0x1200;
|
||||
const VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY : u32 = 0x1201;
|
||||
const VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID : u32 = 0x1202;
|
||||
|
||||
const VIRTIO_GPU_FLAG_FENCE : u32 = 1 << 0;
|
||||
|
||||
#[repr(packed)]
|
||||
#[derive(Debug)]
|
||||
struct VirtIOGpuCtrlHdr {
|
||||
hdr_type: u32,
|
||||
flags: u32,
|
||||
fence_id: u64,
|
||||
ctx_id: u32,
|
||||
padding: u32
|
||||
}
|
||||
|
||||
impl VirtIOGpuCtrlHdr {
|
||||
fn with_type(hdr_type: u32) -> VirtIOGpuCtrlHdr {
|
||||
VirtIOGpuCtrlHdr {
|
||||
hdr_type,
|
||||
flags: 0,
|
||||
fence_id: 0,
|
||||
ctx_id: 0,
|
||||
padding: 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[repr(packed)]
|
||||
#[derive(Debug, Copy, Clone, Default)]
|
||||
struct VirtIOGpuRect {
|
||||
x: u32,
|
||||
y: u32,
|
||||
width: u32,
|
||||
height: u32
|
||||
}
|
||||
|
||||
#[repr(packed)]
|
||||
#[derive(Debug)]
|
||||
struct VirtIOGpuRespDisplayInfo {
|
||||
header: VirtIOGpuCtrlHdr,
|
||||
rect: VirtIOGpuRect,
|
||||
enabled: u32,
|
||||
flags: u32
|
||||
}
|
||||
|
||||
const VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM: u32 = 1;
|
||||
|
||||
#[repr(packed)]
|
||||
#[derive(Debug)]
|
||||
struct VirtIOGpuResourceCreate2D {
|
||||
header: VirtIOGpuCtrlHdr,
|
||||
resource_id: u32,
|
||||
format: u32,
|
||||
width: u32,
|
||||
height: u32
|
||||
}
|
||||
|
||||
#[repr(packed)]
|
||||
#[derive(Debug)]
|
||||
struct VirtIOGpuResourceAttachBacking {
|
||||
header: VirtIOGpuCtrlHdr,
|
||||
resource_id: u32,
|
||||
nr_entries: u32, // always 1
|
||||
addr: u64,
|
||||
length: u32,
|
||||
padding: u32
|
||||
}
|
||||
|
||||
#[repr(packed)]
|
||||
#[derive(Debug)]
|
||||
struct VirtIOGpuSetScanout {
|
||||
header: VirtIOGpuCtrlHdr,
|
||||
rect: VirtIOGpuRect,
|
||||
scanout_id: u32,
|
||||
resource_id: u32
|
||||
}
|
||||
|
||||
#[repr(packed)]
|
||||
#[derive(Debug)]
|
||||
struct VirtIOGpuTransferToHost2D {
|
||||
header: VirtIOGpuCtrlHdr,
|
||||
rect: VirtIOGpuRect,
|
||||
offset: u64,
|
||||
resource_id: u32,
|
||||
padding: u32
|
||||
}
|
||||
|
||||
#[repr(packed)]
|
||||
#[derive(Debug)]
|
||||
struct VirtIOGpuResourceFlush {
|
||||
header: VirtIOGpuCtrlHdr,
|
||||
rect: VirtIOGpuRect,
|
||||
resource_id: u32,
|
||||
padding: u32
|
||||
}
|
||||
|
||||
const VIRTIO_QUEUE_TRANSMIT: usize = 0;
|
||||
const VIRTIO_QUEUE_RECEIVE: usize = 1;
|
||||
|
||||
const VIRTIO_GPU_RESOURCE_ID: u32 = 0xbabe;
|
||||
|
||||
impl Driver for VirtIOGpu {
|
||||
fn try_handle_interrupt(&mut self) -> bool {
|
||||
// for simplicity
|
||||
if cpu::id() > 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
// ensure header page is mapped
|
||||
active_table().map_if_not_exists(self.header as usize, self.header as usize);
|
||||
|
||||
let mut header = unsafe { &mut *(self.header as *mut VirtIOHeader) };
|
||||
let interrupt = header.interrupt_status.read();
|
||||
if interrupt != 0 {
|
||||
header.interrupt_ack.write(interrupt);
|
||||
debug!("Got interrupt {:?}", interrupt);
|
||||
let response = unsafe { &mut *(self.queue_page[VIRTIO_QUEUE_RECEIVE] as *mut VirtIOGpuCtrlHdr) };
|
||||
debug!("response in interrupt: {:?}", response);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
fn device_type(&self) -> DeviceType {
|
||||
DeviceType::Gpu
|
||||
}
|
||||
}
|
||||
|
||||
fn setup_rings(driver: &mut VirtIOGpu) {
|
||||
let mut ring = unsafe {
|
||||
&mut *((driver.queue_address + size_of::<VirtIOVirtqueueDesc>() * driver.queue_num as usize) as *mut VirtIOVirtqueueAvailableRing)
|
||||
};
|
||||
|
||||
// re-add two buffers to desc
|
||||
// chaining read buffer and write buffer into one desc
|
||||
for buffer in 0..2 {
|
||||
let mut desc = unsafe { &mut *(driver.queue_address as *mut VirtIOVirtqueueDesc).add(buffer) };
|
||||
desc.addr.write(driver.queue_page[buffer] as u64);
|
||||
desc.len.write(PAGE_SIZE as u32);
|
||||
if buffer == VIRTIO_QUEUE_TRANSMIT {
|
||||
// device readable
|
||||
desc.flags.write(VirtIOVirtqueueFlag::NEXT.bits());
|
||||
desc.next.write(1);
|
||||
} else {
|
||||
// device writable
|
||||
desc.flags.write(VirtIOVirtqueueFlag::WRITE.bits());
|
||||
}
|
||||
ring.ring[buffer].write(0);
|
||||
}
|
||||
}
|
||||
|
||||
fn notify_device(driver: &mut VirtIOGpu) {
|
||||
let mut header = unsafe { &mut *(driver.header as *mut VirtIOHeader) };
|
||||
let mut ring = unsafe {
|
||||
&mut *((driver.queue_address + size_of::<VirtIOVirtqueueDesc>() * driver.queue_num as usize) as *mut VirtIOVirtqueueAvailableRing)
|
||||
};
|
||||
ring.idx.write(ring.idx.read() + 1);
|
||||
header.queue_notify.write(0);
|
||||
}
|
||||
|
||||
fn setup_framebuffer(driver: &mut VirtIOGpu) {
|
||||
// get display info
|
||||
setup_rings(driver);
|
||||
let mut request_get_display_info = unsafe { &mut *(driver.queue_page[VIRTIO_QUEUE_TRANSMIT] as *mut VirtIOGpuCtrlHdr) };
|
||||
*request_get_display_info = VirtIOGpuCtrlHdr::with_type(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
|
||||
notify_device(driver);
|
||||
let response_get_display_info = unsafe { &mut *(driver.queue_page[VIRTIO_QUEUE_RECEIVE] as *mut VirtIOGpuRespDisplayInfo) };
|
||||
info!("response: {:?}", response_get_display_info);
|
||||
driver.rect = response_get_display_info.rect;
|
||||
|
||||
// create resource 2d
|
||||
setup_rings(driver);
|
||||
let mut request_resource_create_2d = unsafe { &mut *(driver.queue_page[VIRTIO_QUEUE_TRANSMIT] as *mut VirtIOGpuResourceCreate2D) };
|
||||
*request_resource_create_2d = VirtIOGpuResourceCreate2D {
|
||||
header: VirtIOGpuCtrlHdr::with_type(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D),
|
||||
resource_id: VIRTIO_GPU_RESOURCE_ID,
|
||||
format: VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM,
|
||||
width: response_get_display_info.rect.width,
|
||||
height: response_get_display_info.rect.height
|
||||
};
|
||||
notify_device(driver);
|
||||
let response_resource_create_2d = unsafe { &mut *(driver.queue_page[VIRTIO_QUEUE_RECEIVE] as *mut VirtIOGpuCtrlHdr) };
|
||||
info!("response: {:?}", response_resource_create_2d);
|
||||
|
||||
// alloc continuous pages for the frame buffer
|
||||
let size = response_get_display_info.rect.width * response_get_display_info.rect.height * 4;
|
||||
let frame_buffer = unsafe {
|
||||
HEAP_ALLOCATOR.alloc_zeroed(Layout::from_size_align(size as usize, PAGE_SIZE).unwrap())
|
||||
} as usize;
|
||||
mandelbrot(driver.rect.width, driver.rect.height, frame_buffer as *mut u32);
|
||||
driver.frame_buffer = frame_buffer;
|
||||
setup_rings(driver);
|
||||
let mut request_resource_attach_backing = unsafe { &mut *(driver.queue_page[VIRTIO_QUEUE_TRANSMIT] as *mut VirtIOGpuResourceAttachBacking) };
|
||||
*request_resource_attach_backing = VirtIOGpuResourceAttachBacking {
|
||||
header: VirtIOGpuCtrlHdr::with_type(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING),
|
||||
resource_id: VIRTIO_GPU_RESOURCE_ID,
|
||||
nr_entries: 1,
|
||||
addr: frame_buffer as u64,
|
||||
length: size,
|
||||
padding: 0
|
||||
};
|
||||
notify_device(driver);
|
||||
let response_resource_attach_backing = unsafe { &mut *(driver.queue_page[VIRTIO_QUEUE_RECEIVE] as *mut VirtIOGpuCtrlHdr) };
|
||||
debug!("response: {:?}", response_resource_attach_backing);
|
||||
|
||||
// map frame buffer to screen
|
||||
setup_rings(driver);
|
||||
let mut request_set_scanout = unsafe { &mut *(driver.queue_page[VIRTIO_QUEUE_TRANSMIT] as *mut VirtIOGpuSetScanout) };
|
||||
*request_set_scanout = VirtIOGpuSetScanout {
|
||||
header: VirtIOGpuCtrlHdr::with_type(VIRTIO_GPU_CMD_SET_SCANOUT),
|
||||
rect: response_get_display_info.rect,
|
||||
scanout_id: 0,
|
||||
resource_id: VIRTIO_GPU_RESOURCE_ID
|
||||
};
|
||||
notify_device(driver);
|
||||
let response_set_scanout = unsafe { &mut *(driver.queue_page[VIRTIO_QUEUE_RECEIVE] as *mut VirtIOGpuCtrlHdr) };
|
||||
info!("response: {:?}", response_set_scanout);
|
||||
|
||||
flush_frame_buffer_to_screen(driver);
|
||||
}
|
||||
|
||||
// from Wikipedia
|
||||
fn hsv_to_rgb(h: u32, s: f32, v: f32) -> (f32, f32, f32) {
|
||||
let hi = (h / 60) % 6;
|
||||
let f = (h % 60) as f32 / 60.0;
|
||||
let p = v * (1.0 - s);
|
||||
let q = v * (1.0 - f * s);
|
||||
let t = v * (1.0 - (1.0 - f) * s);
|
||||
match hi {
|
||||
0 => (v, t, p),
|
||||
1 => (q, v, p),
|
||||
2 => (p, v, t),
|
||||
3 => (p, q, v),
|
||||
4 => (t, p, v),
|
||||
5 => (v, p, q),
|
||||
_ => unreachable!()
|
||||
}
|
||||
}
|
||||
|
||||
fn mandelbrot(width: u32, height: u32, frame_buffer: *mut u32) {
|
||||
let size = width * height * 4;
|
||||
let frame_buffer_data = unsafe {
|
||||
slice::from_raw_parts_mut(frame_buffer as *mut u32, (size / 4) as usize)
|
||||
};
|
||||
for x in 0..width {
|
||||
for y in 0..height {
|
||||
let index = y * width + x;
|
||||
let scale = 5e-3;
|
||||
let xx = (x as f32 - width as f32 / 2.0) * scale;
|
||||
let yy = (y as f32 - height as f32 / 2.0) * scale;
|
||||
let mut re = xx as f32;
|
||||
let mut im = yy as f32;
|
||||
let mut iter: u32 = 0;
|
||||
loop {
|
||||
iter = iter + 1;
|
||||
let new_re = re * re - im * im + xx as f32;
|
||||
let new_im = re * im * 2.0 + yy as f32;
|
||||
if new_re * new_re + new_im * new_im > 1e3 {
|
||||
break;
|
||||
}
|
||||
re = new_re;
|
||||
im = new_im;
|
||||
|
||||
if iter == 60 {
|
||||
break;
|
||||
}
|
||||
}
|
||||
iter = iter * 6;
|
||||
let (r, g, b) = hsv_to_rgb(iter, 1.0, 0.5);
|
||||
let rr = (r * 256.0) as u32;
|
||||
let gg = (g * 256.0) as u32;
|
||||
let bb = (b * 256.0) as u32;
|
||||
let color = (bb << 16) | (gg << 8) | rr;
|
||||
frame_buffer_data[index as usize] = color;
|
||||
}
|
||||
println!("working on x {}/{}", x, width);
|
||||
}
|
||||
}
|
||||
|
||||
fn flush_frame_buffer_to_screen(driver: &mut VirtIOGpu) {
|
||||
// copy data from guest to host
|
||||
setup_rings(driver);
|
||||
let mut request_transfer_to_host_2d = unsafe { &mut *(driver.queue_page[VIRTIO_QUEUE_TRANSMIT] as *mut VirtIOGpuTransferToHost2D) };
|
||||
*request_transfer_to_host_2d = VirtIOGpuTransferToHost2D {
|
||||
header: VirtIOGpuCtrlHdr::with_type(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D),
|
||||
rect: driver.rect,
|
||||
offset: 0,
|
||||
resource_id: VIRTIO_GPU_RESOURCE_ID,
|
||||
padding: 0
|
||||
};
|
||||
notify_device(driver);
|
||||
let response_transfer_to_host_2d = unsafe { &mut *(driver.queue_page[VIRTIO_QUEUE_RECEIVE] as *mut VirtIOGpuCtrlHdr) };
|
||||
info!("response: {:?}", response_transfer_to_host_2d);
|
||||
|
||||
// flush data to screen
|
||||
setup_rings(driver);
|
||||
let mut request_resource_flush = unsafe { &mut *(driver.queue_page[VIRTIO_QUEUE_TRANSMIT] as *mut VirtIOGpuResourceFlush) };
|
||||
*request_resource_flush = VirtIOGpuResourceFlush {
|
||||
header: VirtIOGpuCtrlHdr::with_type(VIRTIO_GPU_CMD_RESOURCE_FLUSH),
|
||||
rect: driver.rect,
|
||||
resource_id: VIRTIO_GPU_RESOURCE_ID,
|
||||
padding: 0
|
||||
};
|
||||
notify_device(driver);
|
||||
let response_resource_flush = unsafe { &mut *(driver.queue_page[VIRTIO_QUEUE_RECEIVE] as *mut VirtIOGpuCtrlHdr) };
|
||||
info!("response: {:?}", response_resource_flush);
|
||||
}
|
||||
|
||||
pub fn virtio_gpu_init(node: &Node) {
|
||||
let reg = node.prop_raw("reg").unwrap();
|
||||
let from = reg.as_slice().read_be_u64(0).unwrap();
|
||||
let mut header = unsafe { &mut *(from as *mut VirtIOHeader) };
|
||||
|
||||
header.status.write(VirtIODeviceStatus::DRIVER.bits());
|
||||
|
||||
let mut device_features_bits: u64;
|
||||
header.device_features_sel.write(0); // device features [0, 32)
|
||||
device_features_bits = header.device_features.read().into();
|
||||
header.device_features_sel.write(1); // device features [32, 64)
|
||||
device_features_bits = device_features_bits + ((header.device_features.read() as u64) << 32);
|
||||
let device_features = VirtIOGpuFeature::from_bits_truncate(device_features_bits);
|
||||
info!("Device features {:?}", device_features);
|
||||
|
||||
// negotiate these flags only
|
||||
let supported_features = VirtIOGpuFeature::empty();
|
||||
let driver_features = (device_features & supported_features).bits();
|
||||
header.driver_features_sel.write(0); // driver features [0, 32)
|
||||
header.driver_features.write((driver_features & 0xFFFFFFFF) as u32);
|
||||
header.driver_features_sel.write(1); // driver features [32, 64)
|
||||
header.driver_features.write(((driver_features & 0xFFFFFFFF00000000) >> 32) as u32);
|
||||
|
||||
// read configuration space
|
||||
let mut config = unsafe { &mut *((from + 0x100) as *mut VirtIOGpuConfig) };
|
||||
info!("Config: {:?}", config);
|
||||
|
||||
// virtio 4.2.4 Legacy interface
|
||||
// configure two virtqueues: ingress and egress
|
||||
header.guest_page_size.write(PAGE_SIZE as u32); // one page
|
||||
|
||||
let queue_num = 2;
|
||||
let mut driver = VirtIOGpu {
|
||||
interrupt: node.prop_u32("interrupts").unwrap(),
|
||||
interrupt_parent: node.prop_u32("interrupt-parent").unwrap(),
|
||||
header: from as usize,
|
||||
queue_num,
|
||||
queue_address: 0,
|
||||
queue_page: [0, 0],
|
||||
last_used_idx: 0,
|
||||
frame_buffer: 0,
|
||||
rect: VirtIOGpuRect::default()
|
||||
};
|
||||
|
||||
// 0 for control, 1 for cursor, we use controlq only
|
||||
for queue in 0..2 {
|
||||
header.queue_sel.write(queue);
|
||||
assert_eq!(header.queue_pfn.read(), 0); // not in use
|
||||
// 0 for transmit, 1 for receive
|
||||
let queue_num_max = header.queue_num_max.read();
|
||||
assert!(queue_num_max >= queue_num); // queue available
|
||||
let size = virtqueue_size(queue_num as usize, PAGE_SIZE);
|
||||
assert!(size % PAGE_SIZE == 0);
|
||||
// alloc continuous pages
|
||||
let address = unsafe {
|
||||
HEAP_ALLOCATOR.alloc_zeroed(Layout::from_size_align(size, PAGE_SIZE).unwrap())
|
||||
} as usize;
|
||||
|
||||
debug!("queue {} using page address {:#X} with size {}", queue, address as usize, size);
|
||||
|
||||
header.queue_num.write(queue_num);
|
||||
header.queue_align.write(PAGE_SIZE as u32);
|
||||
header.queue_pfn.write((address as u32) >> 12);
|
||||
|
||||
if queue == 0 {
|
||||
driver.queue_address = address;
|
||||
// 0 for transmit, 1 for receive
|
||||
for buffer in 0..2 {
|
||||
// allocate a page for each buffer
|
||||
let page = unsafe {
|
||||
HEAP_ALLOCATOR.alloc_zeroed(Layout::from_size_align(PAGE_SIZE, PAGE_SIZE).unwrap())
|
||||
} as usize;
|
||||
driver.queue_page[buffer as usize] = page;
|
||||
debug!("buffer {} using page address {:#X}", buffer, page as usize);
|
||||
}
|
||||
}
|
||||
header.queue_notify.write(queue);
|
||||
}
|
||||
header.status.write(VirtIODeviceStatus::DRIVER_OK.bits());
|
||||
|
||||
setup_framebuffer(&mut driver);
|
||||
|
||||
DRIVERS.lock().push(Box::new(driver));
|
||||
}
|
@ -0,0 +1,55 @@
|
||||
use alloc::prelude::*;
|
||||
use core::any::Any;
|
||||
|
||||
use lazy_static::lazy_static;
|
||||
use smoltcp::wire::EthernetAddress;
|
||||
|
||||
use crate::sync::SpinNoIrqLock;
|
||||
|
||||
mod device_tree;
|
||||
pub mod bus;
|
||||
pub mod net;
|
||||
mod gpu;
|
||||
|
||||
pub enum DeviceType {
|
||||
Net,
|
||||
Gpu
|
||||
}
|
||||
|
||||
pub trait Driver : Send {
|
||||
// if interrupt belongs to this driver, handle it and return true
|
||||
// return false otherwise
|
||||
fn try_handle_interrupt(&mut self) -> bool;
|
||||
|
||||
// return the correspondent device type, see DeviceType
|
||||
fn device_type(&self) -> DeviceType;
|
||||
}
|
||||
|
||||
pub trait NetDriver: Driver + AsAny {
|
||||
// get mac address for this device
|
||||
fn get_mac(&self) -> EthernetAddress;
|
||||
|
||||
// get interface name for this device
|
||||
fn get_ifname(&self) -> String;
|
||||
}
|
||||
|
||||
// little hack, see https://users.rust-lang.org/t/how-to-downcast-from-a-trait-any-to-a-struct/11219/3
|
||||
pub trait AsAny {
|
||||
fn as_any(&self) -> &Any;
|
||||
}
|
||||
|
||||
impl<T: Any> AsAny for T {
|
||||
fn as_any(&self) -> &Any { self }
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
pub static ref DRIVERS: SpinNoIrqLock<Vec<Box<Driver>>> = SpinNoIrqLock::new(Vec::new());
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
pub static ref NET_DRIVERS: SpinNoIrqLock<Vec<Box<NetDriver>>> = SpinNoIrqLock::new(Vec::new());
|
||||
}
|
||||
|
||||
pub fn init(dtb: usize) {
|
||||
device_tree::init(dtb);
|
||||
}
|
@ -0,0 +1 @@
|
||||
pub mod virtio_net;
|
@ -0,0 +1,403 @@
|
||||
use alloc::alloc::{GlobalAlloc, Layout};
|
||||
use alloc::format;
|
||||
use alloc::prelude::*;
|
||||
use alloc::sync::Arc;
|
||||
use core::mem::size_of;
|
||||
use core::slice;
|
||||
|
||||
use bitflags::*;
|
||||
use device_tree::Node;
|
||||
use device_tree::util::SliceRead;
|
||||
use log::*;
|
||||
use rcore_memory::PAGE_SIZE;
|
||||
use rcore_memory::paging::PageTable;
|
||||
use smoltcp::phy::{self, DeviceCapabilities};
|
||||
use smoltcp::Result;
|
||||
use smoltcp::time::Instant;
|
||||
use smoltcp::wire::EthernetAddress;
|
||||
use volatile::{ReadOnly, Volatile};
|
||||
|
||||
use crate::arch::cpu;
|
||||
use crate::HEAP_ALLOCATOR;
|
||||
use crate::memory::active_table;
|
||||
use crate::sync::SpinNoIrqLock as Mutex;
|
||||
|
||||
use super::super::{DeviceType, Driver, DRIVERS, NET_DRIVERS, NetDriver};
|
||||
use super::super::bus::virtio_mmio::*;
|
||||
|
||||
pub struct VirtIONet {
|
||||
interrupt_parent: u32,
|
||||
interrupt: u32,
|
||||
header: usize,
|
||||
mac: EthernetAddress,
|
||||
queue_num: u32,
|
||||
// 0 for receive, 1 for transmit
|
||||
queue_address: [usize; 2],
|
||||
queue_page: [usize; 2],
|
||||
last_used_idx: [u16; 2],
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct VirtIONetDriver(Arc<Mutex<VirtIONet>>);
|
||||
|
||||
const VIRTIO_QUEUE_RECEIVE: usize = 0;
|
||||
const VIRTIO_QUEUE_TRANSMIT: usize = 1;
|
||||
|
||||
impl Driver for VirtIONetDriver {
|
||||
fn try_handle_interrupt(&mut self) -> bool {
|
||||
// for simplicity
|
||||
if cpu::id() > 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
let mut driver = self.0.lock();
|
||||
|
||||
// ensure header page is mapped
|
||||
active_table().map_if_not_exists(driver.header as usize, driver.header as usize);
|
||||
|
||||
let mut header = unsafe { &mut *(driver.header as *mut VirtIOHeader) };
|
||||
let interrupt = header.interrupt_status.read();
|
||||
if interrupt != 0 {
|
||||
header.interrupt_ack.write(interrupt);
|
||||
let interrupt_status = VirtIONetworkInterruptStatus::from_bits_truncate(interrupt);
|
||||
debug!("Got interrupt {:?}", interrupt_status);
|
||||
if interrupt_status.contains(VirtIONetworkInterruptStatus::USED_RING_UPDATE) {
|
||||
// need to change when queue_num is larger than one
|
||||
let queue = VIRTIO_QUEUE_TRANSMIT;
|
||||
let used_ring_offset = virtqueue_used_elem_offset(driver.queue_num as usize, PAGE_SIZE);
|
||||
let mut used_ring = unsafe {
|
||||
&mut *((driver.queue_address[queue] + used_ring_offset) as *mut VirtIOVirtqueueUsedRing)
|
||||
};
|
||||
if driver.last_used_idx[queue] < used_ring.idx.read() {
|
||||
assert_eq!(driver.last_used_idx[queue], used_ring.idx.read() - 1);
|
||||
info!("Processing queue {} from {} to {}", queue, driver.last_used_idx[queue], used_ring.idx.read());
|
||||
driver.last_used_idx[queue] = used_ring.idx.read();
|
||||
}
|
||||
} else if interrupt_status.contains(VirtIONetworkInterruptStatus::CONFIGURATION_CHANGE) {
|
||||
// TODO: update mac and status
|
||||
unimplemented!("virtio-net configuration change not implemented");
|
||||
}
|
||||
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
fn device_type(&self) -> DeviceType {
|
||||
DeviceType::Net
|
||||
}
|
||||
}
|
||||
|
||||
impl VirtIONet {
|
||||
fn transmit_available(&self) -> bool {
|
||||
let used_ring_offset = virtqueue_used_elem_offset(self.queue_num as usize, PAGE_SIZE);
|
||||
let mut used_ring = unsafe {
|
||||
&mut *((self.queue_address[VIRTIO_QUEUE_TRANSMIT] + used_ring_offset) as *mut VirtIOVirtqueueUsedRing)
|
||||
};
|
||||
let result = self.last_used_idx[VIRTIO_QUEUE_TRANSMIT] == used_ring.idx.read();
|
||||
result
|
||||
}
|
||||
|
||||
|
||||
fn receive_available(&self) -> bool {
|
||||
let used_ring_offset = virtqueue_used_elem_offset(self.queue_num as usize, PAGE_SIZE);
|
||||
let mut used_ring = unsafe {
|
||||
&mut *((self.queue_address[VIRTIO_QUEUE_RECEIVE] + used_ring_offset) as *mut VirtIOVirtqueueUsedRing)
|
||||
};
|
||||
let result = self.last_used_idx[VIRTIO_QUEUE_RECEIVE] < used_ring.idx.read();
|
||||
result
|
||||
}
|
||||
}
|
||||
|
||||
impl NetDriver for VirtIONetDriver {
|
||||
fn get_mac(&self) -> EthernetAddress {
|
||||
self.0.lock().mac
|
||||
}
|
||||
|
||||
fn get_ifname(&self) -> String {
|
||||
format!("virtio{}", self.0.lock().interrupt)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
pub struct VirtIONetRxToken(VirtIONetDriver);
|
||||
pub struct VirtIONetTxToken(VirtIONetDriver);
|
||||
|
||||
impl<'a> phy::Device<'a> for VirtIONetDriver {
|
||||
type RxToken = VirtIONetRxToken;
|
||||
type TxToken = VirtIONetTxToken;
|
||||
|
||||
fn receive(&'a mut self) -> Option<(Self::RxToken, Self::TxToken)> {
|
||||
let driver = self.0.lock();
|
||||
if driver.transmit_available() && driver.receive_available() {
|
||||
// ugly borrow rules bypass
|
||||
Some((VirtIONetRxToken(self.clone()),
|
||||
VirtIONetTxToken(self.clone())))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
fn transmit(&'a mut self) -> Option<Self::TxToken> {
|
||||
let driver = self.0.lock();
|
||||
if driver.transmit_available() {
|
||||
Some(VirtIONetTxToken(self.clone()))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
fn capabilities(&self) -> DeviceCapabilities {
|
||||
let mut caps = DeviceCapabilities::default();
|
||||
caps.max_transmission_unit = 1536;
|
||||
caps.max_burst_size = Some(1);
|
||||
caps
|
||||
}
|
||||
}
|
||||
|
||||
impl phy::RxToken for VirtIONetRxToken {
|
||||
fn consume<R, F>(self, timestamp: Instant, f: F) -> Result<R>
|
||||
where F: FnOnce(&[u8]) -> Result<R>
|
||||
{
|
||||
let buffer = {
|
||||
let mut driver = (self.0).0.lock();
|
||||
|
||||
// ensure header page is mapped
|
||||
active_table().map_if_not_exists(driver.header as usize, driver.header as usize);
|
||||
|
||||
let mut header = unsafe { &mut *(driver.header as *mut VirtIOHeader) };
|
||||
let used_ring_offset = virtqueue_used_elem_offset(driver.queue_num as usize, PAGE_SIZE);
|
||||
let mut used_ring = unsafe {
|
||||
&mut *((driver.queue_address[VIRTIO_QUEUE_RECEIVE] + used_ring_offset) as *mut VirtIOVirtqueueUsedRing)
|
||||
};
|
||||
assert!(driver.last_used_idx[VIRTIO_QUEUE_RECEIVE] == used_ring.idx.read() - 1);
|
||||
driver.last_used_idx[VIRTIO_QUEUE_RECEIVE] = used_ring.idx.read();
|
||||
let mut payload = unsafe { slice::from_raw_parts_mut((driver.queue_page[VIRTIO_QUEUE_RECEIVE] + size_of::<VirtIONetHeader>()) as *mut u8, PAGE_SIZE - 10)};
|
||||
let buffer = payload.to_vec();
|
||||
for i in 0..(PAGE_SIZE - size_of::<VirtIONetHeader>()) {
|
||||
payload[i] = 0;
|
||||
}
|
||||
|
||||
let mut ring = unsafe {
|
||||
&mut *((driver.queue_address[VIRTIO_QUEUE_RECEIVE] + size_of::<VirtIOVirtqueueDesc>() * driver.queue_num as usize) as *mut VirtIOVirtqueueAvailableRing)
|
||||
};
|
||||
ring.idx.write(ring.idx.read() + 1);
|
||||
header.queue_notify.write(VIRTIO_QUEUE_RECEIVE as u32);
|
||||
buffer
|
||||
};
|
||||
f(&buffer)
|
||||
}
|
||||
}
|
||||
|
||||
impl phy::TxToken for VirtIONetTxToken {
|
||||
fn consume<R, F>(self, _timestamp: Instant, len: usize, f: F) -> Result<R>
|
||||
where F: FnOnce(&mut [u8]) -> Result<R>,
|
||||
{
|
||||
let mut driver = (self.0).0.lock();
|
||||
|
||||
// ensure header page is mapped
|
||||
active_table().map_if_not_exists(driver.header as usize, driver.header as usize);
|
||||
|
||||
let mut header = unsafe { &mut *(driver.header as *mut VirtIOHeader) };
|
||||
let payload_target = unsafe { slice::from_raw_parts_mut((driver.queue_page[VIRTIO_QUEUE_TRANSMIT] + size_of::<VirtIONetHeader>()) as *mut u8, len)};
|
||||
let result = f(payload_target);
|
||||
let mut net_header = unsafe { &mut *(driver.queue_page[VIRTIO_QUEUE_TRANSMIT] as *mut VirtIONetHeader) };
|
||||
|
||||
let mut header = unsafe { &mut *(driver.header as *mut VirtIOHeader) };
|
||||
let mut ring = unsafe {
|
||||
&mut *((driver.queue_address[VIRTIO_QUEUE_TRANSMIT] + size_of::<VirtIOVirtqueueDesc>() * driver.queue_num as usize) as *mut VirtIOVirtqueueAvailableRing)
|
||||
};
|
||||
|
||||
// re-add buffer to desc
|
||||
let mut desc = unsafe { &mut *(driver.queue_address[VIRTIO_QUEUE_TRANSMIT] as *mut VirtIOVirtqueueDesc) };
|
||||
desc.addr.write(driver.queue_page[VIRTIO_QUEUE_TRANSMIT] as u64);
|
||||
desc.len.write((len + size_of::<VirtIONetHeader>()) as u32);
|
||||
|
||||
// memory barrier
|
||||
crate::arch::cpu::fence();
|
||||
|
||||
// add desc to available ring
|
||||
ring.idx.write(ring.idx.read() + 1);
|
||||
header.queue_notify.write(VIRTIO_QUEUE_TRANSMIT as u32);
|
||||
result
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bitflags! {
|
||||
struct VirtIONetFeature : u64 {
|
||||
const CSUM = 1 << 0;
|
||||
const GUEST_CSUM = 1 << 1;
|
||||
const CTRL_GUEST_OFFLOADS = 1 << 2;
|
||||
const MTU = 1 << 3;
|
||||
const MAC = 1 << 5;
|
||||
const GSO = 1 << 6;
|
||||
const GUEST_TSO4 = 1 << 7;
|
||||
const GUEST_TSO6 = 1 << 8;
|
||||
const GUEST_ECN = 1 << 9;
|
||||
const GUEST_UFO = 1 << 10;
|
||||
const HOST_TSO4 = 1 << 11;
|
||||
const HOST_TSO6 = 1 << 12;
|
||||
const HOST_ECN = 1 << 13;
|
||||
const HOST_UFO = 1 << 14;
|
||||
const MRG_RXBUF = 1 << 15;
|
||||
const STATUS = 1 << 16;
|
||||
const CTRL_VQ = 1 << 17;
|
||||
const CTRL_RX = 1 << 18;
|
||||
const CTRL_VLAN = 1 << 19;
|
||||
const CTRL_RX_EXTRA = 1 << 20;
|
||||
const GUEST_ANNOUNCE = 1 << 21;
|
||||
const MQ = 1 << 22;
|
||||
const CTL_MAC_ADDR = 1 << 23;
|
||||
// device independent
|
||||
const RING_INDIRECT_DESC = 1 << 28;
|
||||
const RING_EVENT_IDX = 1 << 29;
|
||||
const VERSION_1 = 1 << 32; // legacy
|
||||
}
|
||||
}
|
||||
|
||||
bitflags! {
|
||||
struct VirtIONetworkStatus : u16 {
|
||||
const LINK_UP = 1;
|
||||
const ANNOUNCE = 2;
|
||||
}
|
||||
}
|
||||
|
||||
bitflags! {
|
||||
struct VirtIONetworkInterruptStatus : u32 {
|
||||
const USED_RING_UPDATE = 1 << 0;
|
||||
const CONFIGURATION_CHANGE = 1 << 1;
|
||||
}
|
||||
}
|
||||
|
||||
#[repr(packed)]
|
||||
#[derive(Debug)]
|
||||
struct VirtIONetworkConfig {
|
||||
mac: [u8; 6],
|
||||
status: ReadOnly<u16>
|
||||
}
|
||||
|
||||
// virtio 5.1.6 Device Operation
|
||||
#[repr(packed)]
|
||||
#[derive(Debug)]
|
||||
struct VirtIONetHeader {
|
||||
flags: Volatile<u8>,
|
||||
gso_type: Volatile<u8>,
|
||||
hdr_len: Volatile<u16>, // cannot rely on this
|
||||
gso_size: Volatile<u16>,
|
||||
csum_start: Volatile<u16>,
|
||||
csum_offset: Volatile<u16>,
|
||||
// payload starts from here
|
||||
}
|
||||
|
||||
|
||||
pub fn virtio_net_init(node: &Node) {
|
||||
let reg = node.prop_raw("reg").unwrap();
|
||||
let from = reg.as_slice().read_be_u64(0).unwrap();
|
||||
let mut header = unsafe { &mut *(from as *mut VirtIOHeader) };
|
||||
|
||||
header.status.write(VirtIODeviceStatus::DRIVER.bits());
|
||||
|
||||
let mut device_features_bits: u64;
|
||||
header.device_features_sel.write(0); // device features [0, 32)
|
||||
device_features_bits = header.device_features.read().into();
|
||||
header.device_features_sel.write(1); // device features [32, 64)
|
||||
device_features_bits = device_features_bits + ((header.device_features.read() as u64) << 32);
|
||||
let device_features = VirtIONetFeature::from_bits_truncate(device_features_bits);
|
||||
debug!("Device features {:?}", device_features);
|
||||
|
||||
// negotiate these flags only
|
||||
let supported_features = VirtIONetFeature::MAC | VirtIONetFeature::STATUS;
|
||||
let driver_features = (device_features & supported_features).bits();
|
||||
header.driver_features_sel.write(0); // driver features [0, 32)
|
||||
header.driver_features.write((driver_features & 0xFFFFFFFF) as u32);
|
||||
header.driver_features_sel.write(1); // driver features [32, 64)
|
||||
header.driver_features.write(((driver_features & 0xFFFFFFFF00000000) >> 32) as u32);
|
||||
|
||||
// read configuration space
|
||||
let mut mac: [u8; 6];
|
||||
let mut status: VirtIONetworkStatus;
|
||||
let mut config = unsafe { &mut *((from + 0x100) as *mut VirtIONetworkConfig) };
|
||||
mac = config.mac;
|
||||
status = VirtIONetworkStatus::from_bits_truncate(config.status.read());
|
||||
debug!("Got MAC address {:?} and status {:?}", mac, status);
|
||||
|
||||
// virtio 4.2.4 Legacy interface
|
||||
// configure two virtqueues: ingress and egress
|
||||
header.guest_page_size.write(PAGE_SIZE as u32); // one page
|
||||
|
||||
let queue_num = 1; // for simplicity
|
||||
let mut driver = VirtIONet {
|
||||
interrupt: node.prop_u32("interrupts").unwrap(),
|
||||
interrupt_parent: node.prop_u32("interrupt-parent").unwrap(),
|
||||
header: from as usize,
|
||||
mac: EthernetAddress(mac),
|
||||
queue_num: queue_num,
|
||||
queue_address: [0, 0],
|
||||
queue_page: [0, 0],
|
||||
last_used_idx: [0, 0],
|
||||
};
|
||||
|
||||
// 0 for receive, 1 for transmit
|
||||
for queue in 0..2 {
|
||||
header.queue_sel.write(queue as u32);
|
||||
assert_eq!(header.queue_pfn.read(), 0); // not in use
|
||||
let queue_num_max = header.queue_num_max.read();
|
||||
assert!(queue_num_max >= queue_num); // queue available
|
||||
let size = virtqueue_size(queue_num as usize, PAGE_SIZE);
|
||||
assert!(size % PAGE_SIZE == 0);
|
||||
// alloc continuous pages
|
||||
let address = unsafe {
|
||||
HEAP_ALLOCATOR.alloc_zeroed(Layout::from_size_align(size, PAGE_SIZE).unwrap())
|
||||
} as usize;
|
||||
driver.queue_address[queue] = address;
|
||||
debug!("queue {} using page address {:#X} with size {}", queue, address as usize, size);
|
||||
|
||||
header.queue_num.write(queue_num);
|
||||
header.queue_align.write(PAGE_SIZE as u32);
|
||||
header.queue_pfn.write((address as u32) >> 12);
|
||||
|
||||
// allocate a page for buffer
|
||||
let page = unsafe {
|
||||
HEAP_ALLOCATOR.alloc_zeroed(Layout::from_size_align(PAGE_SIZE, PAGE_SIZE).unwrap())
|
||||
} as usize;
|
||||
driver.queue_page[queue] = page;
|
||||
|
||||
// fill first desc
|
||||
let mut desc = unsafe { &mut *(address as *mut VirtIOVirtqueueDesc) };
|
||||
desc.addr.write(page as u64);
|
||||
desc.len.write(PAGE_SIZE as u32);
|
||||
if queue == VIRTIO_QUEUE_RECEIVE {
|
||||
// device writable
|
||||
desc.flags.write(VirtIOVirtqueueFlag::WRITE.bits());
|
||||
} else if queue == VIRTIO_QUEUE_TRANSMIT {
|
||||
// driver readable
|
||||
desc.flags.write(0);
|
||||
}
|
||||
// memory barrier
|
||||
crate::arch::cpu::fence();
|
||||
|
||||
|
||||
if queue == VIRTIO_QUEUE_RECEIVE {
|
||||
// add the desc to the ring
|
||||
let mut ring = unsafe {
|
||||
&mut *((address + size_of::<VirtIOVirtqueueDesc>() * queue_num as usize) as *mut VirtIOVirtqueueAvailableRing)
|
||||
};
|
||||
ring.ring[0].write(0);
|
||||
// wait for first packet
|
||||
ring.idx.write(ring.idx.read() + 1);
|
||||
}
|
||||
|
||||
// notify device about the new buffer
|
||||
header.queue_notify.write(queue as u32);
|
||||
debug!("queue {} using page address {:#X}", queue, page);
|
||||
}
|
||||
|
||||
header.status.write(VirtIODeviceStatus::DRIVER_OK.bits());
|
||||
|
||||
let mut net_driver = VirtIONetDriver(Arc::new(Mutex::new(driver)));
|
||||
|
||||
DRIVERS.lock().push(Box::new(net_driver.clone()));
|
||||
NET_DRIVERS.lock().push(Box::new(net_driver));
|
||||
}
|
@ -0,0 +1,2 @@
|
||||
mod test;
|
||||
pub use self::test::server;
|
@ -0,0 +1,91 @@
|
||||
use crate::thread;
|
||||
use crate::drivers::NET_DRIVERS;
|
||||
use smoltcp::wire::*;
|
||||
use smoltcp::iface::*;
|
||||
use smoltcp::socket::*;
|
||||
use alloc::collections::BTreeMap;
|
||||
use crate::drivers::NetDriver;
|
||||
use crate::drivers::net::virtio_net::VirtIONetDriver;
|
||||
use alloc::vec;
|
||||
use smoltcp::time::Instant;
|
||||
use core::fmt::Write;
|
||||
|
||||
pub extern fn server(_arg: usize) -> ! {
|
||||
if NET_DRIVERS.lock().len() < 1 {
|
||||
loop {
|
||||
thread::yield_now();
|
||||
}
|
||||
}
|
||||
|
||||
let mut driver = {
|
||||
let ref_driver = &mut *NET_DRIVERS.lock()[0];
|
||||
ref_driver.as_any().downcast_ref::<VirtIONetDriver>().unwrap().clone()
|
||||
};
|
||||
let ethernet_addr = driver.get_mac();
|
||||
let ip_addrs = [IpCidr::new(IpAddress::v4(10,0,0,2), 24)];
|
||||
let neighbor_cache = NeighborCache::new(BTreeMap::new());
|
||||
let mut iface = EthernetInterfaceBuilder::new(driver.clone())
|
||||
.ethernet_addr(ethernet_addr)
|
||||
.ip_addrs(ip_addrs)
|
||||
.neighbor_cache(neighbor_cache)
|
||||
.finalize();
|
||||
|
||||
let udp_rx_buffer = UdpSocketBuffer::new(vec![UdpPacketMetadata::EMPTY], vec![0; 64]);
|
||||
let udp_tx_buffer = UdpSocketBuffer::new(vec![UdpPacketMetadata::EMPTY], vec![0; 128]);
|
||||
let udp_socket = UdpSocket::new(udp_rx_buffer, udp_tx_buffer);
|
||||
|
||||
let tcp_rx_buffer = TcpSocketBuffer::new(vec![0; 1024]);
|
||||
let tcp_tx_buffer = TcpSocketBuffer::new(vec![0; 1024]);
|
||||
let tcp_socket = TcpSocket::new(tcp_rx_buffer, tcp_tx_buffer);
|
||||
|
||||
let mut sockets = SocketSet::new(vec![]);
|
||||
let udp_handle = sockets.add(udp_socket);
|
||||
let tcp_handle = sockets.add(tcp_socket);
|
||||
|
||||
loop {
|
||||
{
|
||||
let timestamp = Instant::from_millis(unsafe { crate::trap::TICK as i64 });
|
||||
match iface.poll(&mut sockets, timestamp) {
|
||||
Ok(_) => {},
|
||||
Err(e) => {
|
||||
println!("poll error: {}", e);
|
||||
}
|
||||
}
|
||||
|
||||
// udp server
|
||||
{
|
||||
let mut socket = sockets.get::<UdpSocket>(udp_handle);
|
||||
if !socket.is_open() {
|
||||
socket.bind(6969).unwrap();
|
||||
}
|
||||
|
||||
let client = match socket.recv() {
|
||||
Ok((data, endpoint)) => {
|
||||
Some(endpoint)
|
||||
}
|
||||
Err(_) => None
|
||||
};
|
||||
if let Some(endpoint) = client {
|
||||
let hello = b"hello\n";
|
||||
socket.send_slice(hello, endpoint).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
// simple http server
|
||||
{
|
||||
let mut socket = sockets.get::<TcpSocket>(tcp_handle);
|
||||
if !socket.is_open() {
|
||||
socket.listen(80).unwrap();
|
||||
}
|
||||
|
||||
if socket.can_send() {
|
||||
write!(socket, "HTTP/1.1 200 OK\r\nServer: rCore\r\nContent-Length: 13\r\nContent-Type: text/html\r\nConnection: Closed\r\n\r\nHello, world!\r\n").unwrap();
|
||||
socket.close();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
thread::yield_now();
|
||||
}
|
||||
|
||||
}
|
Loading…
Reference in new issue