Extensive refactor of IO,PCI,Virtio

This commit is contained in:
Bruce Leidl 2023-08-02 13:49:49 -04:00
parent 97d9ef95c5
commit 8f64743a0e
50 changed files with 3000 additions and 2164 deletions

45
src/devices/irq_event.rs Normal file
View File

@ -0,0 +1,45 @@
use std::{io, result};
use vmm_sys_util::eventfd::EventFd;
use crate::vm::KvmVm;
pub struct IrqLevelEvent {
trigger_event: EventFd,
resample_event: EventFd,
}
type Result<T> = result::Result<T, io::Error>;
impl IrqLevelEvent {
pub fn register(kvm_vm: &KvmVm, irq: u8) -> Result<Self> {
let ev = Self::new()?;
kvm_vm.vm_fd()
.register_irqfd_with_resample(&ev.trigger_event, &ev.resample_event, irq as u32)?;
Ok(ev)
}
pub fn new() -> Result<Self> {
let trigger_event = EventFd::new(0)?;
let resample_event = EventFd::new(0)?;
Ok(IrqLevelEvent {
trigger_event, resample_event,
})
}
pub fn try_clone(&self) -> Result<IrqLevelEvent> {
let trigger_event = self.trigger_event.try_clone()?;
let resample_event = self.resample_event.try_clone()?;
Ok(IrqLevelEvent {
trigger_event,
resample_event,
})
}
pub fn trigger(&self) -> Result<()> {
self.trigger_event.write(1)
}
pub fn wait_resample(&self) -> Result<()> {
let _ = self.resample_event.read()?;
Ok(())
}
}

View File

@ -1,8 +1,7 @@
use std::sync::{Arc,RwLock};
use std::mem; use std::mem;
use libc; use libc;
use crate::io::bus::BusDevice;
use crate::vm::io::{IoDispatcher,IoPortOps}; use crate::io::ReadableInt;
const RTC_SECONDS: u8 = 0x00; const RTC_SECONDS: u8 = 0x00;
const RTC_MINUTES: u8 = 0x02; const RTC_MINUTES: u8 = 0x02;
@ -21,31 +20,30 @@ pub struct Rtc {
data: [u8; 128] data: [u8; 128]
} }
impl IoPortOps for Rtc { impl BusDevice for Rtc {
fn io_in(&mut self, port: u16, _size: usize) -> u32 { fn read(&mut self, offset: u64, data: &mut [u8]) {
if port == 0x0071 { if offset == 1 && data.len() == 1 {
self.data_in() as u32 ReadableInt::new_byte(self.data_in())
.read(data);
} else { } else {
0 data.fill(0);
} }
} }
fn io_out(&mut self, port: u16, _size: usize, val: u32) { fn write(&mut self, offset: u64, data: &[u8]) {
if port == 0x0070 { if data.len() == 1 {
self.index_out(val as u8); match offset {
} else if port == 0x0071 { 0 => self.index_out(data[0]),
self.data_out(val as u8) 1 => self.data_out(data[0]),
_ => {},
}
} }
} }
} }
impl Rtc { impl Rtc {
pub fn register(io: Arc<IoDispatcher>) {
let rtc = Arc::new(RwLock::new(Rtc::new()));
io.register_ioports(0x0070, 2, rtc);
}
fn new() -> Rtc { pub fn new() -> Rtc {
Rtc { Rtc {
idx:0, idx:0,
data: [0; 128] data: [0; 128]

View File

@ -1,7 +1,6 @@
use std::sync::{Arc, RwLock};
use std::io::{self, Write}; use std::io::{self, Write};
use crate::io::bus::BusDevice;
use crate::vm::io::{IoPortOps,IoDispatcher};
use crate::vm::KvmVm; use crate::vm::KvmVm;
const UART_TX: u16 = 0; const UART_TX: u16 = 0;
@ -43,7 +42,29 @@ const UART_SCR: u16 = 7;
const FIFO_LEN: usize = 64; const FIFO_LEN: usize = 64;
pub enum SerialPort {
COM1,
COM2,
COM3,
COM4,
}
impl SerialPort {
pub fn io_port(&self) -> u16 {
match self {
SerialPort::COM1 => 0x3f8,
SerialPort::COM2 => 0x2f8,
SerialPort::COM3 => 0x3e8,
SerialPort::COM4 => 0x2e8,
}
}
pub fn irq(&self) -> u8 {
match self {
SerialPort::COM1|SerialPort::COM3 => 4,
SerialPort::COM2|SerialPort::COM4 => 4,
}
}
}
trait Bits { trait Bits {
fn set(&mut self, flag: Self); fn set(&mut self, flag: Self);
@ -66,7 +87,6 @@ impl Bits for u8 {
} }
pub struct SerialDevice { pub struct SerialDevice {
iobase: u16,
kvm_vm: KvmVm, kvm_vm: KvmVm,
irq: u8, irq: u8,
irq_state: u8, irq_state: u8,
@ -87,15 +107,17 @@ pub struct SerialDevice {
scr: u8, scr: u8,
} }
impl IoPortOps for SerialDevice { impl BusDevice for SerialDevice {
fn io_in(&mut self, port: u16, _size: usize) -> u32 { fn read(&mut self, offset: u64, data: &mut [u8]) {
let off = port - self.iobase; if data.len() == 1 {
self.serial_in(off) as u32 data[0] = self.serial_in(offset as u16);
}
} }
fn io_out(&mut self, port: u16, _size: usize, val: u32) { fn write(&mut self, offset: u64, data: &[u8]) {
let off = port - self.iobase; if data.len() == 1 {
self.serial_out(off, val as u8); self.serial_out(offset as u16, data[0])
}
} }
} }
@ -270,6 +292,7 @@ impl SerialDevice {
} }
} }
/*
pub fn register(kvm_vm: KvmVm, io: Arc<IoDispatcher>, id: u8) { pub fn register(kvm_vm: KvmVm, io: Arc<IoDispatcher>, id: u8) {
if let Some((base,irq)) = SerialDevice::base_irq_for_id(id) { if let Some((base,irq)) = SerialDevice::base_irq_for_id(id) {
let dev = SerialDevice::new(kvm_vm, base, irq); let dev = SerialDevice::new(kvm_vm, base, irq);
@ -287,9 +310,11 @@ impl SerialDevice {
} }
} }
fn new(kvm_vm: KvmVm, iobase: u16, irq: u8) -> SerialDevice { */
pub fn new(kvm_vm: KvmVm, irq: u8) -> SerialDevice {
SerialDevice { SerialDevice {
iobase, // iobase,
kvm_vm, kvm_vm,
irq, irq,
irq_state: 0, irq_state: 0,

View File

@ -1,10 +1,8 @@
use std::sync::{Arc,RwLock};
use std::thread; use std::thread;
use std::path::{PathBuf, Path}; use std::path::{PathBuf, Path};
use crate::memory::{GuestRam, MemoryManager}; use crate::memory::GuestRam;
use crate::virtio::{self,VirtioBus,VirtioDeviceOps, VirtQueue, Result};
use crate::devices::virtio_9p::server::Server; use crate::devices::virtio_9p::server::Server;
use crate::devices::virtio_9p::filesystem::{FileSystem, FileSystemOps}; use crate::devices::virtio_9p::filesystem::{FileSystem, FileSystemOps};
use self::pdu::PduParser; use self::pdu::PduParser;
@ -16,16 +14,15 @@ mod filesystem;
mod server; mod server;
mod synthetic; mod synthetic;
const VIRTIO_ID_9P: u16 = 9;
const VIRTIO_9P_MOUNT_TAG: u64 = 0x1; const VIRTIO_9P_MOUNT_TAG: u64 = 0x1;
pub use synthetic::SyntheticFS; pub use synthetic::SyntheticFS;
use crate::io::{FeatureBits, Queues, VirtioDevice, VirtioDeviceType, VirtQueue};
pub struct VirtioP9<T: FileSystemOps> { pub struct VirtioP9<T: FileSystemOps> {
filesystem: T, filesystem: T,
root_dir: PathBuf, root_dir: PathBuf,
feature_bits: u64, features: FeatureBits,
debug: bool, debug: bool,
config: Vec<u8>, config: Vec<u8>,
} }
@ -41,52 +38,54 @@ impl <T: FileSystemOps+'static> VirtioP9<T> {
config config
} }
fn new(filesystem: T, tag_name: &str, root_dir: &str, debug: bool) -> Arc<RwLock<Self>> { pub fn new(filesystem: T, tag_name: &str, root_dir: &str, debug: bool) -> Self {
Arc::new(RwLock::new(VirtioP9 { VirtioP9 {
filesystem, filesystem,
root_dir: PathBuf::from(root_dir), root_dir: PathBuf::from(root_dir),
feature_bits: 0, features: FeatureBits::new_default(VIRTIO_9P_MOUNT_TAG),
debug, debug,
config: VirtioP9::<T>::create_config(tag_name), config: VirtioP9::<T>::create_config(tag_name),
})) }
} }
pub fn create_with_filesystem(filesystem: T, vbus: &mut VirtioBus, tag_name: &str, root_dir: &str, debug: bool) -> Result<()> {
vbus.new_virtio_device(VIRTIO_ID_9P, VirtioP9::new(filesystem, tag_name, root_dir, debug))
.set_num_queues(1)
.set_features(VIRTIO_9P_MOUNT_TAG)
.set_config_size(tag_name.len() + 3)
.register()
}
} }
impl VirtioP9<FileSystem> { impl VirtioP9<FileSystem> {
pub fn new_filesystem(tag_name: &str, root_dir: &str, read_only: bool, debug: bool) -> Self {
pub fn create(vbus: &mut VirtioBus, tag_name: &str, root_dir: &str, read_only: bool, debug: bool) -> Result<()> {
let filesystem = FileSystem::new(PathBuf::from(root_dir), read_only); let filesystem = FileSystem::new(PathBuf::from(root_dir), read_only);
Self::create_with_filesystem(filesystem, vbus, tag_name, root_dir, debug) Self::new(filesystem, tag_name, root_dir, debug)
} }
} }
impl <T: FileSystemOps+'static> VirtioDeviceOps for VirtioP9<T> { impl <T: FileSystemOps+'static> VirtioDevice for VirtioP9<T> {
fn reset(&mut self) { fn features(&self) -> &FeatureBits {
println!("Reset called"); &self.features
} }
fn enable_features(&mut self, bits: u64) -> bool { fn queue_sizes(&self) -> &[u16] {
self.feature_bits = bits; &[VirtQueue::DEFAULT_QUEUE_SIZE]
true
} }
fn read_config(&mut self, offset: usize, size: usize) -> u64 { fn device_type(&self) -> VirtioDeviceType {
virtio::read_config_buffer(&self.config, offset, size) VirtioDeviceType::NineP
} }
fn start(&mut self, memory: &MemoryManager, mut queues: Vec<VirtQueue>) { fn config_size(&self) -> usize {
let vq = queues.pop().unwrap(); self.config.len()
}
fn read_config(&self, offset: u64, data: &mut [u8]) {
let offset = offset as usize;
if offset + data.len() <= self.config.len() {
data.copy_from_slice(&self.config[offset..offset+data.len()])
}
}
fn start(&mut self, queues: &Queues) {
let vq = queues.get_queue(0);
let root_dir = self.root_dir.clone(); let root_dir = self.root_dir.clone();
let filesystem = self.filesystem.clone(); let filesystem = self.filesystem.clone();
let ram = memory.guest_ram().clone(); let ram = queues.memory().guest_ram().clone();
let debug = self.debug; let debug = self.debug;
thread::spawn(move || run_device(ram, vq, &root_dir, filesystem, debug)); thread::spawn(move || run_device(ram, vq, &root_dir, filesystem, debug));
} }

View File

@ -6,8 +6,8 @@ use libc;
use byteorder::{LittleEndian,ReadBytesExt,WriteBytesExt}; use byteorder::{LittleEndian,ReadBytesExt,WriteBytesExt};
use crate::devices::virtio_9p::file::Qid; use crate::devices::virtio_9p::file::Qid;
use crate::io::Chain;
use crate::memory::GuestRam; use crate::memory::GuestRam;
use crate::virtio::Chain;
const P9_HEADER_LEN: usize = 7; const P9_HEADER_LEN: usize = 7;
const P9_RLERROR: u8 = 7; const P9_RLERROR: u8 = 7;

View File

@ -1,13 +1,12 @@
use std::io::Write; use std::io::Write;
use std::sync::{RwLock, Arc};
use std::{result, io, thread}; use std::{result, io, thread};
use crate::{disk, virtio}; use crate::disk;
use crate::virtio::{VirtioBus, VirtioDeviceOps, VirtQueue, DeviceConfigArea, Chain};
use crate::memory::MemoryManager;
use crate::disk::DiskImage; use crate::disk::DiskImage;
use thiserror::Error; use thiserror::Error;
use crate::io::{Chain, FeatureBits, Queues, VirtioDevice, VirtioDeviceType, VirtioError, VirtQueue};
use crate::io::virtio::DeviceConfigArea;
const VIRTIO_BLK_F_RO: u64 = 1 << 5; const VIRTIO_BLK_F_RO: u64 = 1 << 5;
const VIRTIO_BLK_F_BLK_SIZE: u64 = 1 << 6; const VIRTIO_BLK_F_BLK_SIZE: u64 = 1 << 6;
@ -39,7 +38,7 @@ enum Error {
#[error("error flushing disk image: {0}")] #[error("error flushing disk image: {0}")]
DiskFlush(disk::Error), DiskFlush(disk::Error),
#[error("error waiting on virtqueue: {0}")] #[error("error waiting on virtqueue: {0}")]
VirtQueueWait(virtio::Error), VirtQueueWait(VirtioError),
#[error("virtqueue read descriptor size ({0}) is invalid. Not a multiple of sector size")] #[error("virtqueue read descriptor size ({0}) is invalid. Not a multiple of sector size")]
InvalidReadDescriptor(usize), InvalidReadDescriptor(usize),
} }
@ -49,66 +48,66 @@ type Result<T> = result::Result<T, Error>;
pub struct VirtioBlock<D: DiskImage+'static> { pub struct VirtioBlock<D: DiskImage+'static> {
disk_image: Option<D>, disk_image: Option<D>,
config: DeviceConfigArea, config: DeviceConfigArea,
enabled_features: u64, features: FeatureBits,
} }
const HEADER_SIZE: usize = 16; const HEADER_SIZE: usize = 16;
const VIRTIO_ID_BLOCK: u16 = 2;
const CAPACITY_OFFSET: usize = 0; const CAPACITY_OFFSET: usize = 0;
const SEG_MAX_OFFSET: usize = 12; const SEG_MAX_OFFSET: usize = 12;
const BLK_SIZE_OFFSET: usize = 20; const BLK_SIZE_OFFSET: usize = 20;
const CONFIG_SIZE: usize = 24; const CONFIG_SIZE: usize = 24;
impl <D: DiskImage + 'static> VirtioBlock<D> { impl <D: DiskImage + 'static> VirtioBlock<D> {
fn new(disk_image: D) -> Self { pub fn new(disk_image: D) -> Self {
let mut config = DeviceConfigArea::new(CONFIG_SIZE); let mut config = DeviceConfigArea::new(CONFIG_SIZE);
config.write_u64(CAPACITY_OFFSET, disk_image.sector_count()); config.write_u64(CAPACITY_OFFSET, disk_image.sector_count());
config.write_u32(SEG_MAX_OFFSET, QUEUE_SIZE as u32 - 2); config.write_u32(SEG_MAX_OFFSET, QUEUE_SIZE as u32 - 2);
config.write_u32(BLK_SIZE_OFFSET, 1024); config.write_u32(BLK_SIZE_OFFSET, 1024);
let features = FeatureBits::new_default( VIRTIO_BLK_F_FLUSH |
VIRTIO_BLK_F_BLK_SIZE |
VIRTIO_BLK_F_SEG_MAX |
if disk_image.read_only() {
VIRTIO_BLK_F_RO
} else {
0
}
);
VirtioBlock { VirtioBlock {
disk_image: Some(disk_image), disk_image: Some(disk_image),
config, config,
enabled_features: 0, features,
} }
} }
pub fn create(vbus: &mut VirtioBus, disk_image: D) -> virtio::Result<()> {
let feature_bits = VIRTIO_BLK_F_FLUSH |
VIRTIO_BLK_F_BLK_SIZE |
VIRTIO_BLK_F_SEG_MAX |
if disk_image.read_only() {
VIRTIO_BLK_F_RO
} else {
0
};
let dev = Arc::new(RwLock::new(VirtioBlock::new(disk_image)));
vbus.new_virtio_device(VIRTIO_ID_BLOCK, dev)
.set_queue_sizes(&[QUEUE_SIZE])
.set_config_size(CONFIG_SIZE)
.set_features(feature_bits)
.register()
}
} }
impl <D: DiskImage> VirtioDeviceOps for VirtioBlock<D> { impl <D: DiskImage> VirtioDevice for VirtioBlock<D> {
fn enable_features(&mut self, bits: u64) -> bool { fn features(&self) -> &FeatureBits {
self.enabled_features = bits; &self.features
true
} }
fn write_config(&mut self, offset: usize, size: usize, val: u64) { fn queue_sizes(&self) -> &[u16] {
self.config.write_config(offset, size, val); &[QUEUE_SIZE as u16]
} }
fn read_config(&mut self, offset: usize, size: usize) -> u64 { fn device_type(&self) -> VirtioDeviceType {
self.config.read_config(offset, size) VirtioDeviceType::Block
} }
fn start(&mut self, _: &MemoryManager, mut queues: Vec<VirtQueue>) { fn config_size(&self) -> usize {
let vq = queues.pop().unwrap(); CONFIG_SIZE
}
fn read_config(&self, offset: u64, data: &mut [u8]) {
self.config.read_config(offset, data);
}
fn write_config(&mut self, offset: u64, data: &[u8]) {
self.config.write_config(offset, data);
}
fn start(&mut self, queues: &Queues) {
let vq = queues.get_queue(0);
let mut disk = self.disk_image.take().expect("No disk image?"); let mut disk = self.disk_image.take().expect("No disk image?");
if let Err(err) = disk.open() { if let Err(err) = disk.open() {

View File

@ -1,7 +1,4 @@
use crate::virtio::{VirtioDeviceOps, VirtQueue, VirtioBus, Chain}; use crate::system;
use crate::memory::MemoryManager;
use crate::{system, virtio};
use std::sync::{RwLock, Arc};
use std::{result, thread, io}; use std::{result, thread, io};
use crate::system::{EPoll,Event}; use crate::system::{EPoll,Event};
use std::io::{Read, Write}; use std::io::{Read, Write};
@ -9,8 +6,8 @@ use std::os::unix::io::AsRawFd;
use crate::system::Tap; use crate::system::Tap;
use thiserror::Error; use thiserror::Error;
use crate::io::{Chain, FeatureBits, Queues, VirtioDevice, VirtioDeviceType, VirtQueue};
const VIRTIO_ID_NET: u16 = 1;
const MAC_ADDR_LEN: usize = 6; const MAC_ADDR_LEN: usize = 6;
#[derive(Debug,Error)] #[derive(Debug,Error)]
@ -46,23 +43,16 @@ const VIRTIO_NET_F_HOST_ECN: u64 = 1 << 13;
const VIRTIO_NET_HDR_SIZE: i32 = 12; const VIRTIO_NET_HDR_SIZE: i32 = 12;
pub struct VirtioNet { pub struct VirtioNet {
_features_supported: u64, features: FeatureBits,
tap: Option<Tap>, tap: Option<Tap>,
} }
impl VirtioNet { impl VirtioNet {
fn new(tap: Tap, features_supported: u64) -> Self { pub fn new(tap: Tap) -> Self {
VirtioNet{
_features_supported: features_supported,
tap: Some(tap)
}
}
pub fn create(vbus: &mut VirtioBus, tap: Tap) -> virtio::Result<()> {
tap.set_offload(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6| TUN_F_TSO_ECN).unwrap(); tap.set_offload(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6| TUN_F_TSO_ECN).unwrap();
tap.set_vnet_hdr_size(VIRTIO_NET_HDR_SIZE).unwrap(); tap.set_vnet_hdr_size(VIRTIO_NET_HDR_SIZE).unwrap();
let feature_bits = let feature_bits =
VIRTIO_NET_F_CSUM | VIRTIO_NET_F_CSUM |
VIRTIO_NET_F_GUEST_CSUM | VIRTIO_NET_F_GUEST_CSUM |
VIRTIO_NET_F_GUEST_TSO4 | VIRTIO_NET_F_GUEST_TSO4 |
VIRTIO_NET_F_GUEST_TSO6 | VIRTIO_NET_F_GUEST_TSO6 |
@ -70,25 +60,44 @@ impl VirtioNet {
VIRTIO_NET_F_HOST_TSO4 | VIRTIO_NET_F_HOST_TSO4 |
VIRTIO_NET_F_HOST_TSO6 | VIRTIO_NET_F_HOST_TSO6 |
VIRTIO_NET_F_HOST_ECN; VIRTIO_NET_F_HOST_ECN;
let features = FeatureBits::new_default(feature_bits);
let dev = Arc::new(RwLock::new(VirtioNet::new(tap, feature_bits))); VirtioNet{
vbus.new_virtio_device(VIRTIO_ID_NET, dev) features,
.set_queue_sizes(&[256, 256]) tap: Some(tap)
.set_config_size(MAC_ADDR_LEN) }
.set_features(feature_bits)
.register()
} }
} }
pub const TUN_F_CSUM: u32 = 1; impl VirtioDevice for VirtioNet {
pub const TUN_F_TSO4: u32 = 2; fn features(&self) -> &FeatureBits {
pub const TUN_F_TSO6: u32 = 4; &self.features
pub const TUN_F_TSO_ECN: u32 = 8; }
fn queue_sizes(&self) -> &[u16] {
&[256, 256]
}
fn device_type(&self) -> VirtioDeviceType {
VirtioDeviceType::Net
}
fn config_size(&self) -> usize {
MAC_ADDR_LEN
}
fn read_config(&self, offset: u64, data: &mut [u8]) {
let (_,_) = (offset, data);
}
fn write_config(&mut self, offset: u64, data: &[u8]) {
let (_,_) = (offset, data);
}
fn start(&mut self, queues: &Queues) {
let rx = queues.get_queue(0);
let tx = queues.get_queue(1);
impl VirtioDeviceOps for VirtioNet {
fn start(&mut self, _memory: &MemoryManager, mut queues: Vec<VirtQueue>) {
let tx = queues.pop().unwrap();
let rx = queues.pop().unwrap();
let tap = self.tap.take().unwrap(); let tap = self.tap.take().unwrap();
let poll = match EPoll::new() { let poll = match EPoll::new() {
Ok(poll) => poll, Ok(poll) => poll,
@ -105,6 +114,11 @@ impl VirtioDeviceOps for VirtioNet {
}); });
} }
} }
pub const TUN_F_CSUM: u32 = 1;
pub const TUN_F_TSO4: u32 = 2;
pub const TUN_F_TSO6: u32 = 4;
pub const TUN_F_TSO_ECN: u32 = 8;
const MAX_BUFFER_SIZE: usize = 65562; const MAX_BUFFER_SIZE: usize = 65562;
const RX_VQ_TOKEN:u64 = 1; const RX_VQ_TOKEN:u64 = 1;

View File

@ -1,33 +1,17 @@
use std::sync::{Arc,RwLock};
use std::thread; use std::thread;
use std::fs::File; use std::fs::File;
use crate::io::{FeatureBits, Queues, VirtioDevice, VirtioDeviceType, VirtQueue};
use crate::virtio::{VirtioDeviceOps,VirtioBus,VirtQueue,Result}; pub struct VirtioRandom {
use crate::memory::MemoryManager; features: FeatureBits,
const VIRTIO_ID_RANDOM: u16 = 4;
pub struct VirtioRandom;
impl VirtioRandom {
fn new() -> VirtioRandom { VirtioRandom }
pub fn create(vbus: &mut VirtioBus) -> Result<()> {
let dev = Arc::new(RwLock::new(VirtioRandom::new()));
vbus.new_virtio_device(VIRTIO_ID_RANDOM, dev)
.set_num_queues(1)
.register()
}
} }
impl VirtioDeviceOps for VirtioRandom { impl VirtioRandom {
pub fn new() -> VirtioRandom {
fn start(&mut self, _memory: &MemoryManager, mut queues: Vec<VirtQueue>) { VirtioRandom {
thread::spawn(move|| { features: FeatureBits::new_default(0),
run(queues.pop().unwrap()) }
});
} }
} }
@ -42,3 +26,24 @@ fn run(q: VirtQueue) {
}); });
} }
} }
impl VirtioDevice for VirtioRandom {
fn features(&self) -> &FeatureBits {
&self.features
}
fn queue_sizes(&self) -> &[u16] {
&[VirtQueue::DEFAULT_QUEUE_SIZE]
}
fn device_type(&self) -> VirtioDeviceType {
VirtioDeviceType::Rng
}
fn start(&mut self, queues: &Queues) {
let vq = queues.get_queue(0);
thread::spawn(move|| {
run(vq)
});
}
}

View File

@ -1,12 +1,8 @@
use std::sync::{Arc,RwLock};
use std::io::{self,Write,Read}; use std::io::{self,Write,Read};
use std::thread::spawn; use std::thread::spawn;
use termios::*; use termios::*;
use crate::virtio::{VirtioDeviceOps,VirtioBus, VirtQueue,Result}; use crate::io::{VirtioDevice, VirtioDeviceType, FeatureBits, VirtQueue, ReadableInt, Queues};
use crate::memory::MemoryManager;
const VIRTIO_ID_CONSOLE: u16 = 3;
const VIRTIO_CONSOLE_F_SIZE: u64 = 0x1; const VIRTIO_CONSOLE_F_SIZE: u64 = 0x1;
const VIRTIO_CONSOLE_F_MULTIPORT: u64 = 0x2; const VIRTIO_CONSOLE_F_MULTIPORT: u64 = 0x2;
@ -21,25 +17,18 @@ const VIRTIO_CONSOLE_PORT_OPEN: u16 = 6;
const _VIRTIO_CONSOLE_PORT_NAME: u16 = 7; const _VIRTIO_CONSOLE_PORT_NAME: u16 = 7;
pub struct VirtioSerial { pub struct VirtioSerial {
feature_bits: u64, features: FeatureBits,
} }
impl VirtioSerial { impl VirtioSerial {
fn new() -> VirtioSerial { pub fn new() -> VirtioSerial {
VirtioSerial{feature_bits:0} let features = FeatureBits::new_default(VIRTIO_CONSOLE_F_MULTIPORT|VIRTIO_CONSOLE_F_SIZE);
VirtioSerial{
features,
}
} }
pub fn create(vbus: &mut VirtioBus) -> Result<()> { fn start_console(&self, q: VirtQueue) {
let dev = Arc::new(RwLock::new(VirtioSerial::new()));
vbus.new_virtio_device(VIRTIO_ID_CONSOLE, dev)
.set_num_queues(4)
.set_device_class(0x0700)
.set_config_size(12)
.set_features(VIRTIO_CONSOLE_F_MULTIPORT|VIRTIO_CONSOLE_F_SIZE)
.register()
}
fn start_console(&self, _memory: &MemoryManager, q: VirtQueue) {
spawn(move || { spawn(move || {
loop { loop {
q.wait_ready().unwrap(); q.wait_ready().unwrap();
@ -52,7 +41,7 @@ impl VirtioSerial {
} }
fn multiport(&self) -> bool { fn multiport(&self) -> bool {
self.feature_bits & VIRTIO_CONSOLE_F_MULTIPORT != 0 self.features.has_guest_bit(VIRTIO_CONSOLE_F_MULTIPORT)
} }
} }
@ -69,39 +58,50 @@ struct WinSz {
const TIOCGWINSZ: u64 = 0x5413; const TIOCGWINSZ: u64 = 0x5413;
impl VirtioDeviceOps for VirtioSerial { impl VirtioDevice for VirtioSerial {
fn reset(&mut self) { fn features(&self) -> &FeatureBits {
println!("Reset called"); &self.features
} }
fn enable_features(&mut self, bits: u64) -> bool {
self.feature_bits = bits; fn queue_sizes(&self) -> &[u16] {
true &[
VirtQueue::DEFAULT_QUEUE_SIZE,
VirtQueue::DEFAULT_QUEUE_SIZE,
VirtQueue::DEFAULT_QUEUE_SIZE,
VirtQueue::DEFAULT_QUEUE_SIZE,
]
} }
fn read_config(&mut self, offset: usize, _size: usize) -> u64 { fn device_type(&self) -> VirtioDeviceType {
if offset == 4 { VirtioDeviceType::Console
return 1; }
fn config_size(&self) -> usize {
12
}
fn read_config(&self, offset: u64, data: &mut [u8]) {
if offset == 4 && data.len() == 4 {
ReadableInt::new_dword(1).read(data);
} else {
data.fill(0);
} }
0
} }
fn start(&mut self, memory: &MemoryManager, mut queues: Vec<VirtQueue>) { fn start(&mut self, queues: &Queues) {
let mut term = Terminal::create(queues.remove(0)); let mut term = Terminal::create(queues.get_queue(0));
self.start_console(memory, queues.remove(0)); self.start_console(queues.get_queue(1));
spawn( move || { spawn( move || {
term.read_loop(); term.read_loop();
}); });
if self.multiport() { if self.multiport() {
let mut control = Control::new(queues.remove(0), queues.remove(0)); let mut control = Control::new(queues.get_queue(2), queues.get_queue(3));
spawn(move || { spawn(move || {
control.run(); control.run();
}); });
} }
} }
} }
struct Control { struct Control {

View File

@ -1,16 +1,15 @@
use std::os::unix::io::{AsRawFd, RawFd}; use std::os::unix::io::{AsRawFd, RawFd};
use std::sync::{RwLock, Arc};
use std::thread; use std::thread;
use crate::{system, virtio}; use crate::system;
use crate::system::EPoll; use crate::system::EPoll;
use crate::memory::{MemoryManager, DrmDescriptor}; use crate::memory::{MemoryManager, DrmDescriptor};
use crate::virtio::{VirtQueue, VirtioBus, VirtioDeviceOps, Chain};
use crate::devices::virtio_wl::{vfd::VfdManager, consts::*, Error, Result, VfdObject}; use crate::devices::virtio_wl::{vfd::VfdManager, consts::*, Error, Result, VfdObject};
use crate::system::ioctl::ioctl_with_ref; use crate::system::ioctl::ioctl_with_ref;
use std::os::raw::{c_ulong, c_uint, c_ulonglong}; use std::os::raw::{c_ulong, c_uint, c_ulonglong};
use vmm_sys_util::eventfd::EventFd; use vmm_sys_util::eventfd::EventFd;
use crate::io::{Chain, FeatureBits, Queues, VirtioDevice, VirtioDeviceType, VirtQueue};
#[repr(C)] #[repr(C)]
struct dma_buf_sync { struct dma_buf_sync {
@ -20,25 +19,21 @@ const DMA_BUF_IOCTL_BASE: c_uint = 0x62;
const DMA_BUF_IOCTL_SYNC: c_ulong = iow!(DMA_BUF_IOCTL_BASE, 0, ::std::mem::size_of::<dma_buf_sync>() as i32); const DMA_BUF_IOCTL_SYNC: c_ulong = iow!(DMA_BUF_IOCTL_BASE, 0, ::std::mem::size_of::<dma_buf_sync>() as i32);
pub struct VirtioWayland { pub struct VirtioWayland {
feature_bits: u64, features: FeatureBits,
enable_dmabuf: bool, enable_dmabuf: bool,
} }
impl VirtioWayland { impl VirtioWayland {
fn new(enable_dmabuf: bool) -> Self { pub fn new(enable_dmabuf: bool) -> Self {
VirtioWayland { feature_bits: 0, enable_dmabuf } let features = FeatureBits::new_default(VIRTIO_WL_F_TRANS_FLAGS as u64);
} VirtioWayland {
features,
pub fn create(vbus: &mut VirtioBus, dmabuf: bool) -> virtio::Result<()> { enable_dmabuf
let dev = Arc::new(RwLock::new(VirtioWayland::new(dmabuf))); }
vbus.new_virtio_device(VIRTIO_ID_WL, dev)
.set_num_queues(2)
.set_features(VIRTIO_WL_F_TRANS_FLAGS as u64)
.register()
} }
fn transition_flags(&self) -> bool { fn transition_flags(&self) -> bool {
self.feature_bits & VIRTIO_WL_F_TRANS_FLAGS as u64 != 0 self.features.has_guest_bit(VIRTIO_WL_F_TRANS_FLAGS as u64)
} }
fn create_device(memory: MemoryManager, in_vq: VirtQueue, out_vq: VirtQueue, transition: bool, enable_dmabuf: bool) -> Result<WaylandDevice> { fn create_device(memory: MemoryManager, in_vq: VirtQueue, out_vq: VirtQueue, transition: bool, enable_dmabuf: bool) -> Result<WaylandDevice> {
@ -48,21 +43,28 @@ impl VirtioWayland {
} }
} }
impl VirtioDeviceOps for VirtioWayland { impl VirtioDevice for VirtioWayland {
fn enable_features(&mut self, bits: u64) -> bool { fn features(&self) -> &FeatureBits {
self.feature_bits = bits; &self.features
true
} }
fn start(&mut self, memory: &MemoryManager, mut queues: Vec<VirtQueue>) { fn queue_sizes(&self) -> &[u16] {
&[VirtQueue::DEFAULT_QUEUE_SIZE, VirtQueue::DEFAULT_QUEUE_SIZE]
}
fn device_type(&self) -> VirtioDeviceType {
VirtioDeviceType::Wl
}
fn start(&mut self, queues: &Queues) {
thread::spawn({ thread::spawn({
let memory = memory.clone();
let transition = self.transition_flags(); let transition = self.transition_flags();
let enable_dmabuf = self.enable_dmabuf; let enable_dmabuf = self.enable_dmabuf;
let in_vq = queues.get_queue(0);
let out_vq = queues.get_queue(1);
let memory = queues.memory().clone();
move || { move || {
let out_vq = queues.pop().unwrap(); let mut dev = match Self::create_device(memory, in_vq, out_vq,transition, enable_dmabuf) {
let in_vq = queues.pop().unwrap();
let mut dev = match Self::create_device(memory.clone(), in_vq, out_vq,transition, enable_dmabuf) {
Err(e) => { Err(e) => {
warn!("Error creating virtio wayland device: {}", e); warn!("Error creating virtio wayland device: {}", e);
return; return;

View File

@ -16,7 +16,6 @@ mod device;
mod consts { mod consts {
use std::mem; use std::mem;
pub const VIRTIO_ID_WL: u16 = 63;
pub const VIRTWL_SEND_MAX_ALLOCS: usize = 28; pub const VIRTWL_SEND_MAX_ALLOCS: usize = 28;
pub const VIRTIO_WL_CMD_VFD_NEW: u32 = 256; pub const VIRTIO_WL_CMD_VFD_NEW: u32 = 256;
pub const VIRTIO_WL_CMD_VFD_CLOSE: u32 = 257; pub const VIRTIO_WL_CMD_VFD_CLOSE: u32 = 257;

View File

@ -6,11 +6,11 @@ use std::time::Duration;
use crate::memory::{MemoryManager, DrmDescriptor}; use crate::memory::{MemoryManager, DrmDescriptor};
use crate::system::{FileDesc, FileFlags,EPoll,MemoryFd}; use crate::system::{FileDesc, FileFlags,EPoll,MemoryFd};
use crate::virtio::{VirtQueue, Chain};
use crate::devices::virtio_wl::{ use crate::devices::virtio_wl::{
consts::*, Error, Result, shm::VfdSharedMemory, pipe::VfdPipe, socket::VfdSocket, VfdObject consts::*, Error, Result, shm::VfdSharedMemory, pipe::VfdPipe, socket::VfdSocket, VfdObject
}; };
use crate::io::{Chain, VirtQueue};
pub struct VfdManager { pub struct VfdManager {
wayland_path: PathBuf, wayland_path: PathBuf,

144
src/io/bus.rs Normal file
View File

@ -0,0 +1,144 @@
use std::cmp::Ordering;
use std::collections::BTreeMap;
use std::result;
use std::sync::{Arc, Mutex};
use thiserror::Error;
#[derive(Debug,Error)]
pub enum Error {
#[error("New device overlaps with an old device.")]
Overlap,
}
pub type Result<T> = result::Result<T, Error>;
pub trait BusDevice {
fn read(&mut self, offset: u64, data: &mut [u8]) {
let (_,_) = (offset, data);
}
fn write(&mut self, offset: u64, data: &[u8]) {
let (_,_) = (offset, data);
}
}
#[derive(Debug,Copy,Clone)]
struct BusRange(u64, u64);
impl Eq for BusRange {}
impl PartialEq for BusRange {
fn eq(&self, other: &Self) -> bool {
self.0 == other.0
}
}
impl Ord for BusRange {
fn cmp(&self, other: &Self) -> Ordering {
self.0.cmp(&other.0)
}
}
impl PartialOrd for BusRange {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
self.0.partial_cmp(&other.0)
}
}
/// A device container for routing reads and writes over some address space.
///
/// This doesn't have any restrictions on what kind of device or address space this applies to. The
/// only restriction is that no two devices can overlap in this address space.
#[derive(Clone,Default)]
pub struct Bus {
devices: BTreeMap<BusRange, Arc<Mutex<dyn BusDevice + Send>>>,
}
impl Bus {
/// Constructs an a bus with an empty address space.
pub fn new() -> Bus {
Bus {
devices: BTreeMap::new(),
}
}
fn first_before(&self, addr: u64) -> Option<(BusRange, &Arc<Mutex<dyn BusDevice+Send>>)> {
for (range, dev) in self.devices.iter().rev() {
if range.0 <= addr {
return Some((*range, dev))
}
}
None
}
/// Puts the given device at the given address space.
pub fn get_device(&self, addr: u64) -> Option<(u64, &Arc<Mutex<dyn BusDevice+Send>>)> {
if let Some((BusRange(start, len), dev)) = self.first_before(addr) {
let offset = addr - start;
if offset < len {
return Some((offset, dev))
}
}
None
}
/// Puts the given device at the given address space.
pub fn insert(&mut self, device: Arc<Mutex<dyn BusDevice+Send>>, base: u64, len: u64) -> Result<()> {
if len == 0 {
return Err(Error::Overlap);
}
// Reject all cases where the new device's base is within an old device's range.
if self.get_device(base).is_some() {
return Err(Error::Overlap);
}
// The above check will miss an overlap in which the new device's base address is before the
// range of another device. To catch that case, we search for a device with a range before
// the new device's range's end. If there is no existing device in that range that starts
// after the new device, then there will be no overlap.
if let Some((BusRange(start, _), _)) = self.first_before(base + len - 1) {
// Such a device only conflicts with the new device if it also starts after the new
// device because of our initial `get_device` check above.
if start >= base {
return Err(Error::Overlap);
}
}
if self.devices.insert(BusRange(base, len), device).is_some() {
return Err(Error::Overlap);
}
Ok(())
}
/// Reads data from the device that owns the range containing `addr` and puts it into `data`.
///
/// Returns true on success, otherwise `data` is untouched.
pub fn read(&self, addr: u64, data: &mut [u8]) -> bool {
if let Some((offset, dev)) = self.get_device(addr) {
// OK to unwrap as lock() failing is a serious error condition and should panic.
dev.lock()
.expect("Failed to acquire device lock")
.read(offset, data);
true
} else {
false
}
}
/// Writes `data` to the device that owns the range containing `addr`.
///
/// Returns true on success, otherwise `data` is untouched.
pub fn write(&self, addr: u64, data: &[u8]) -> bool {
if let Some((offset, dev)) = self.get_device(addr) {
// OK to unwrap as lock() failing is a serious error condition and should panic.
dev.lock()
.expect("Failed to acquire device lock")
.write(offset, data);
true
} else {
false
}
}
}

131
src/io/busdata.rs Normal file
View File

@ -0,0 +1,131 @@
use std::convert::TryInto;
use std::fmt::{Debug, Formatter};
/*
pub enum IoInt {
Byte(u8, [u8; 1]),
Word(u16, [u8; 2]),
DWord(u32, [u8; 4]),
QWord(u64, [u8; 8]),
Data(Vec<u8>),
}
impl IoInt {
pub fn new_byte(n: u8) -> Self {
Self::Byte(n, [n])
}
pub fn new_word(n: u16) -> Self {
Self::Word(n, n.to_le_bytes())
}
pub fn new_dword(n: u32) -> Self {
Self::DWord(n, n.to_le_bytes())
}
pub fn new_qword(n: u64) -> Self {
Self::QWord(n, n.to_le_bytes())
}
}
impl From<&[u8]> for IoInt {
fn from(bytes: &[u8]) -> Self {
match bytes.len() {
1 => Self::Byte(bytes[0], [bytes[0]]),
2 => {
let n = u16::from_le_bytes(bytes.try_into().unwrap());
Self::Word(n, n.to_le_bytes())
},
4 => {
let n = u32::from_le_bytes(bytes.try_into().unwrap());
Self::DWord(n, n.to_le_bytes())
},
8 => {
let n = u64::from_le_bytes(bytes.try_into().unwrap());
Self::QWord(n, n.to_le_bytes())
},
_ => Self::Data(bytes.to_vec()),
}
}
}
*/
pub enum WriteableInt {
Byte(u8),
Word(u16),
DWord(u32),
QWord(u64),
Data(Vec<u8>),
}
impl From<&[u8]> for WriteableInt {
fn from(bytes: &[u8]) -> Self {
match bytes.len() {
1 => Self::Byte(bytes[0]),
2 => Self::Word(u16::from_le_bytes(bytes.try_into().unwrap())),
4 => Self::DWord(u32::from_le_bytes(bytes.try_into().unwrap())),
8 => Self::QWord(u64::from_le_bytes(bytes.try_into().unwrap())),
_ => Self::Data(bytes.to_vec()),
}
}
}
pub enum ReadableInt {
Byte(u8, [u8; 1]),
Word(u16, [u8; 2]),
DWord(u32, [u8; 4]),
}
impl ReadableInt {
pub fn new_byte(n: u8) -> Self {
Self::Byte(n, [n])
}
pub fn new_word(n: u16) -> Self {
Self::Word(n, n.to_le_bytes())
}
pub fn new_dword(n: u32) -> Self {
Self::DWord(n, n.to_le_bytes())
}
fn as_bytes(&self) -> &[u8] {
match self {
ReadableInt::Byte(_, bs) => bs,
ReadableInt::Word(_, bs) => bs,
ReadableInt::DWord(_, bs) => bs,
}
}
pub fn read(&self, buffer: &mut [u8]) {
let bs = self.as_bytes();
if buffer.len() >= bs.len() {
buffer[..bs.len()].copy_from_slice(bs);
}
}
}
impl From<u8> for ReadableInt {
fn from(value: u8) -> Self {
ReadableInt::new_byte(value)
}
}
impl From<u16> for ReadableInt {
fn from(value: u16) -> Self {
ReadableInt::new_word(value)
}
}
impl From<u32> for ReadableInt {
fn from(value: u32) -> Self {
ReadableInt::new_dword(value)
}
}
impl Debug for ReadableInt {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
ReadableInt::Byte(n, _) => write!(f, "Byte({})", n),
ReadableInt::Word(n, _) => write!(f, "Word({})", n),
ReadableInt::DWord(n, _) => write!(f, "DWord({})", n),
}
}
}

192
src/io/manager.rs Normal file
View File

@ -0,0 +1,192 @@
use std::sync::{Arc, Mutex, MutexGuard};
use vm_allocator::{AddressAllocator, AllocPolicy, IdAllocator, RangeInclusive};
use vmm_sys_util::eventfd::EventFd;
use crate::devices::rtc::Rtc;
use crate::devices::serial::{SerialDevice, SerialPort};
use crate::io::bus::{Bus, BusDevice};
use crate::io::pci::{MmioHandler, PciBarAllocation, PciBus, PciDevice};
use crate::io::{PciIrq, virtio};
use crate::io::virtio::{VirtioDeviceState,VirtioDevice};
use crate::memory::{AddressRange, MemoryManager};
use crate::vm::arch;
#[derive(Clone)]
pub struct IoAllocator {
mmio_allocator: Arc<Mutex<AddressAllocator>>,
irq_allocator: Arc<Mutex<IdAllocator>>,
}
impl IoAllocator {
fn new() -> Self {
let mmio_allocator = AddressAllocator::new(arch::PCI_MMIO_RESERVED_BASE, arch::PCI_MMIO_RESERVED_SIZE as u64)
.expect("Failed to create address allocator");
let irq_allocator = IdAllocator::new(arch::IRQ_BASE, arch::IRQ_MAX)
.expect("Failed to create IRQ allocator");
IoAllocator {
mmio_allocator: Arc::new(Mutex::new(mmio_allocator)),
irq_allocator: Arc::new(Mutex::new(irq_allocator)),
}
}
pub fn allocate_mmio(&self, size: usize) -> RangeInclusive {
let mut allocator = self.mmio_allocator.lock().unwrap();
allocator.allocate(size as u64, 4096, AllocPolicy::FirstMatch).unwrap()
}
pub fn allocate_irq(&self) -> u8 {
let mut allocator = self.irq_allocator.lock().unwrap();
allocator.allocate_id().unwrap() as u8
}
}
#[derive(Clone)]
pub struct IoManager {
memory: MemoryManager,
pio_bus: Bus,
mmio_bus: Bus,
pci_bus: Arc<Mutex<PciBus>>,
allocator: IoAllocator,
}
impl IoManager {
pub fn new(memory: MemoryManager) -> IoManager {
let pci_bus = Arc::new(Mutex::new(PciBus::new()));
let mut pio_bus = Bus::new();
pio_bus.insert(pci_bus.clone(), PciBus::PCI_CONFIG_ADDRESS as u64, 8)
.expect("Failed to add PCI configuration to PIO");
IoManager {
memory,
pio_bus,
mmio_bus: Bus::new(),
pci_bus,
allocator: IoAllocator::new(),
}
}
pub fn register_legacy_devices(&mut self, reset_evt: EventFd) {
let rtc = Arc::new(Mutex::new(Rtc::new()));
self.pio_bus.insert(rtc, 0x0070, 2).unwrap();
let i8042 = Arc::new(Mutex::new(I8042Device::new(reset_evt)));
self.pio_bus.insert(i8042, 0x0060, 8).unwrap();
}
pub fn register_serial_port(&mut self, port: SerialPort) {
let serial = SerialDevice::new(self.memory.kvm_vm().clone(), port.irq());
let serial = Arc::new(Mutex::new(serial));
self.pio_bus.insert(serial, port.io_port() as u64, 8).unwrap();
}
pub fn allocator(&self) -> IoAllocator {
self.allocator.clone()
}
pub fn mmio_read(&self, addr: u64, data: &mut [u8]) -> bool {
self.mmio_bus.read(addr, data)
}
pub fn mmio_write(&self, addr: u64, data: &[u8]) -> bool {
self.mmio_bus.write(addr, data)
}
pub fn pio_read(&self, port: u16, data: &mut [u8]) -> bool {
self.pio_bus.read(port as u64, data)
}
pub fn pio_write(&self, port: u16, data: &[u8]) -> bool {
self.pio_bus.write(port as u64, data)
}
fn pci_bus(&self) -> MutexGuard<PciBus> {
self.pci_bus.lock().unwrap()
}
pub fn pci_irqs(&self) -> Vec<PciIrq> {
self.pci_bus().pci_irqs()
}
fn allocate_pci_bars(&mut self, dev: &Arc<Mutex<dyn PciDevice+Send>>) {
let allocations = dev.lock().unwrap().bar_allocations();
if allocations.is_empty() {
return;
}
for a in allocations {
let mut allocated = Vec::new();
match a {
PciBarAllocation::Mmio(bar, size) => {
let range = self.allocator.allocate_mmio(size);
let mmio = AddressRange::new(range.start(), range.len() as usize);
dev.lock().unwrap().config_mut().set_mmio_bar(bar, mmio);
allocated.push((bar,range.start()));
let handler = Arc::new(Mutex::new(MmioHandler::new(bar, dev.clone())));
self.mmio_bus.insert(handler, range.start(), range.len()).unwrap();
}
}
dev.lock().unwrap().configure_bars(allocated);
}
}
pub fn add_pci_device(&mut self, device: Arc<Mutex<dyn PciDevice+Send>>) {
self.allocate_pci_bars(&device);
let mut pci = self.pci_bus.lock().unwrap();
pci.add_device(device);
}
pub fn add_virtio_device<D: VirtioDevice+'static>(&mut self, dev: D) -> virtio::Result<()> {
//let devtype = dev.device_type();
//let dev = Arc::new(Mutex::new(dev));
//let devstate = VirtioDeviceState::new(dev.clone(), self.memory.clone(), self.allocator.clone())?;
let irq = self.allocator.allocate_irq();
//let devstate = VirtioDeviceState::new(dev, self.memory.clone(), self.allocator.clone())?;
let devstate = VirtioDeviceState::new(dev, self.memory.clone(), irq)?;
self.add_pci_device(Arc::new(Mutex::new(devstate)));
// let mmio_range = devstate.mmio_range();
//let mut pci = self.pci_bus.lock().unwrap();
//pci.add_device(devstate);
// let mut pci_device = pci.new_device(devstate.irq() as u8, PCI_VENDOR_ID_REDHAT, devtype.device_id(), devtype.class_id());
// XXX add mmio bar
//pci_device.set_mmio_bar(0, AddressRange::new(mmio_range.start(), mmio_range.len() as usize));
// devstate.add_pci_capabilities(&mut pci_device);
// XXX add devstate to mmio bus
//self.mmio_bus.insert(Arc::new(Mutex::new(devstate)), mmio_range.start(), mmio_range.len())?;
//pci.add_device(pci_device);
Ok(())
}
}
pub struct I8042Device {
reset_evt: EventFd,
}
impl I8042Device {
fn new(reset_evt: EventFd) -> Self {
I8042Device { reset_evt }
}
}
impl BusDevice for I8042Device {
fn read(&mut self, offset: u64, data: &mut [u8]) {
if data.len() == 1 {
match offset {
0 => data[0] = 0x20,
1 => data[0] = 0x00,
_ => {},
}
}
}
fn write(&mut self, offset: u64, data: &[u8]) {
if data.len() == 1 {
if offset == 3 && data[0] == 0xfe {
if let Err(err) = self.reset_evt.write(1) {
warn!("Error triggering i8042 reset event: {}", err);
}
}
}
}
}

12
src/io/mod.rs Normal file
View File

@ -0,0 +1,12 @@
pub mod bus;
pub mod busdata;
pub mod pci;
pub mod manager;
pub mod virtio;
pub use virtio::{VirtioDevice,FeatureBits,VirtioDeviceType,VirtQueue,Chain,Queues};
pub use virtio::Error as VirtioError;
pub use busdata::{ReadableInt,WriteableInt};
pub use pci::PciIrq;
// PCI Vendor id for Virtio devices
pub const PCI_VENDOR_ID_REDHAT: u16 = 0x1af4;

32
src/io/pci/address.rs Normal file
View File

@ -0,0 +1,32 @@
#[derive(Copy,Clone,Debug,PartialEq,Eq,PartialOrd,Ord,Hash)]
pub struct PciAddress(u16);
impl PciAddress {
pub fn empty() -> Self {
Self::new(0,0,0)
}
pub fn new(bus: u8, device: u8, function: u8) -> Self {
const DEVICE_MASK: u16 = 0x1f;
const FUNCTION_MASK: u16 = 0x07;
let bus = bus as u16;
let device = device as u16;
let function = function as u16;
let addr = bus << 8
| (device & DEVICE_MASK) << 3
| (function & FUNCTION_MASK);
PciAddress(addr)
}
pub fn device(&self) -> u8 {
((self.0 & 0xF) >> 3) as u8
}
pub fn address(&self) -> u16 {
self.0
}
}

198
src/io/pci/bus.rs Normal file
View File

@ -0,0 +1,198 @@
use std::collections::BTreeMap;
use std::sync::{Arc, Mutex};
use crate::io::bus::BusDevice;
use crate::io::pci::address::PciAddress;
use crate::io::pci::config::PciConfiguration;
use crate::io::pci::consts::{PCI_CLASS_BRIDGE_HOST, PCI_MAX_DEVICES, PCI_VENDOR_ID_INTEL};
use crate::io::pci::PciDevice;
/// Current address to read/write from (io port 0xcf8)
struct PciConfigAddress([u8; 4]);
impl PciConfigAddress {
fn new() -> Self {
PciConfigAddress([0u8; 4])
}
fn bus(&self) -> u8 {
self.0[2]
}
fn function(&self) -> u8 {
self.0[1] & 0x7
}
fn device(&self) -> u8 {
self.0[1] >> 3
}
fn offset(&self) -> u8 {
self.0[0] & !0x3
}
fn enabled(&self) -> bool {
self.0[3] & 0x80 != 0
}
fn pci_address(&self) -> PciAddress {
PciAddress::new(self.bus(), self.device(), self.function())
}
fn write(&mut self, offset: u64, data: &[u8]) {
let offset = offset as usize;
if offset + data.len() <= 4 {
self.0[offset..offset+data.len()]
.copy_from_slice(data)
}
}
fn read(&self, offset: u64, data: &mut [u8]) {
let offset = offset as usize;
if offset + data.len() <= 4 {
data.copy_from_slice(&self.0[offset..offset+data.len()])
}
}
}
struct PciRootDevice(PciConfiguration);
impl PciRootDevice {
fn new() -> Self {
let config = PciConfiguration::new(0, PCI_VENDOR_ID_INTEL, 0, PCI_CLASS_BRIDGE_HOST);
PciRootDevice(config)
}
}
impl PciDevice for PciRootDevice {
fn config(&self) -> &PciConfiguration {
&self.0
}
fn config_mut(&mut self) -> &mut PciConfiguration {
&mut self.0
}
}
pub struct PciBus {
devices: BTreeMap<PciAddress, Arc<Mutex<dyn PciDevice>>>,
config_address: PciConfigAddress,
used_device_ids: Vec<bool>,
}
impl PciBus {
pub const PCI_CONFIG_ADDRESS: u16 = 0xcf8;
pub fn new() -> PciBus {
let mut pci = PciBus {
devices: BTreeMap::new(),
config_address: PciConfigAddress::new(),
used_device_ids: vec![false; PCI_MAX_DEVICES],
};
let root = PciRootDevice::new();
pci.add_device(Arc::new(Mutex::new(root)));
pci
}
pub fn add_device(&mut self, device: Arc<Mutex<dyn PciDevice>>) {
let id = self.allocate_id().unwrap();
let address = PciAddress::new(0, id, 0);
device.lock().unwrap().config_mut().set_address(address);
self.devices.insert(address, device);
}
pub fn pci_irqs(&self) -> Vec<PciIrq> {
let mut irqs = Vec::new();
for (addr, dev) in &self.devices {
let lock = dev.lock().unwrap();
if let Some(irq) = lock.irq() {
irqs.push(PciIrq::new(addr.device(), irq));
}
}
irqs
}
fn allocate_id(&mut self) -> Option<u8> {
for i in 0..PCI_MAX_DEVICES {
if !self.used_device_ids[i] {
self.used_device_ids[i] = true;
return Some(i as u8)
}
}
None
}
fn is_in_range(base: u64, offset: u64, len: usize) -> bool {
let end = offset + len as u64;
offset >= base && end <= (base + 4)
}
fn is_config_address(offset: u64, len: usize) -> bool {
Self::is_in_range(0, offset, len)
}
fn is_config_data(offset: u64, len: usize) -> bool {
Self::is_in_range(4, offset, len)
}
fn current_config_device(&self) -> Option<Arc<Mutex<dyn PciDevice>>> {
if self.config_address.enabled() {
let addr = self.config_address.pci_address();
self.devices.get(&addr).cloned()
} else {
None
}
}
}
impl BusDevice for PciBus {
fn read(&mut self, offset: u64, data: &mut [u8]) {
if PciBus::is_config_address(offset, data.len()) {
self.config_address.read(offset, data);
} else if PciBus::is_config_data(offset, data.len()) {
if let Some(dev) = self.current_config_device() {
let lock = dev.lock().unwrap();
let offset = (offset - 4) + self.config_address.offset() as u64;
lock.config().read(offset, data)
} else {
data.fill(0xff)
}
}
}
fn write(&mut self, offset: u64, data: &[u8]) {
if PciBus::is_config_address(offset, data.len()) {
self.config_address.write(offset, data)
} else if PciBus::is_config_data(offset, data.len()) {
if let Some(dev) = self.current_config_device() {
let mut lock = dev.lock().unwrap();
let offset = (offset - 4) + self.config_address.offset() as u64;
lock.config_mut().write(offset, data)
}
}
}
}
#[derive(Debug)]
pub struct PciIrq {
pci_id: u8,
int_pin: u8,
irq: u8,
}
impl PciIrq {
fn new(pci_id: u8, irq: u8) -> PciIrq {
PciIrq {
pci_id,
int_pin: 1,
irq,
}
}
pub fn src_bus_irq(&self) -> u8 {
(self.pci_id << 2) | (self.int_pin - 1)
}
pub fn irq_line(&self) -> u8 {
self.irq
}
}

248
src/io/pci/config.rs Normal file
View File

@ -0,0 +1,248 @@
use crate::io::pci::address::PciAddress;
use crate::io::pci::consts::{PCI_BAR0, PCI_BAR5, PCI_CACHE_LINE_SIZE, PCI_CAP_BASE_OFFSET, PCI_CAP_ID_VENDOR, PCI_CAPABILITY_LIST, PCI_CLASS_DEVICE, PCI_CLASS_REVISION, PCI_COMMAND, PCI_COMMAND_IO, PCI_COMMAND_MEMORY, PCI_DEVICE_ID, PCI_INTERRUPT_LINE, PCI_INTERRUPT_PIN, PCI_STATUS, PCI_STATUS_CAP_LIST, PCI_SUBSYSTEM_ID, PCI_VENDOR_ID};
use crate::io::pci::device::PciBar;
use crate::memory::AddressRange;
use crate::util::{ByteBuffer,Writeable};
const PCI_CONFIG_SPACE_SIZE: usize = 256;
const MAX_CAPABILITY_COUNT:usize = 16; // arbitrary
pub struct PciCapability<'a> {
config: &'a mut PciConfiguration,
buffer: ByteBuffer<Vec<u8>>,
}
impl <'a> PciCapability<'a> {
pub fn new_vendor_capability(config: &'a mut PciConfiguration) -> Self {
let mut buffer = ByteBuffer::new_empty();
buffer.write(PCI_CAP_ID_VENDOR);
buffer.write(0u8);
PciCapability { config, buffer }
}
pub fn write<V: Writeable>(&mut self, val: V) {
self.buffer.write(val);
}
pub fn store(&mut self) {
let offset = self.config.next_capability_offset;
self.config.update_capability_chain(self.buffer.len());
self.config.write_bytes(offset, self.buffer.as_ref());
}
}
pub struct PciConfiguration {
address: PciAddress,
irq: u8,
bytes: [u8; PCI_CONFIG_SPACE_SIZE],
bar_write_masks: [u32; 6],
next_capability_offset: usize,
}
impl PciConfiguration {
pub fn new(irq: u8, vendor: u16, device: u16, class_id: u16) -> Self {
let mut config = PciConfiguration {
address: PciAddress::empty(),
irq,
bytes: [0; PCI_CONFIG_SPACE_SIZE],
bar_write_masks: [0; 6],
next_capability_offset: PCI_CAP_BASE_OFFSET,
};
config.buffer()
.write_at(PCI_VENDOR_ID, vendor)
.write_at(PCI_DEVICE_ID, device)
.write_at(PCI_COMMAND, PCI_COMMAND_IO | PCI_COMMAND_MEMORY)
.write_at(PCI_CLASS_REVISION, u8::from(1))
.write_at(PCI_CLASS_DEVICE, class_id)
.write_at(PCI_INTERRUPT_PIN, u8::from(1))
.write_at(PCI_INTERRUPT_LINE, irq)
.write_at(PCI_SUBSYSTEM_ID, 0x40u16);
config
}
pub fn address(&self) -> PciAddress {
self.address
}
pub fn set_address(&mut self, address: PciAddress) {
self.address = address;
}
pub fn irq(&self) -> u8 {
self.irq
}
fn buffer(&mut self) -> ByteBuffer<&mut[u8]> {
ByteBuffer::from_bytes_mut(&mut self.bytes).little_endian()
}
fn write_bytes(&mut self, offset: usize, bytes: &[u8]) {
(&mut self.bytes[offset..offset+bytes.len()])
.copy_from_slice(bytes)
}
fn read_bytes(&self, offset: usize, bytes: &mut [u8]) {
bytes.copy_from_slice(&self.bytes[offset..offset+bytes.len()]);
}
fn bar_mask(&self, offset: usize) -> Option<u32> {
fn is_bar_offset(offset: usize) -> bool {
offset >= PCI_BAR0 && offset < (PCI_BAR5 + 4)
}
fn bar_idx(offset: usize) -> usize {
(offset - PCI_BAR0) / 4
}
if is_bar_offset(offset) {
Some(self.bar_write_masks[bar_idx(offset)])
} else {
None
}
}
fn write_masked_byte(&mut self, offset: usize, mask: u8, new_byte: u8) {
let orig = self.bytes[offset];
self.bytes[offset] = (orig & !mask) | (new_byte & mask);
}
fn write_bar(&mut self, offset: usize, data: &[u8]) {
let mask_bytes = match self.bar_mask(offset) {
Some(mask) if mask != 0 => mask.to_le_bytes(),
_ => return,
};
let mod4 = offset % 4;
let mask_bytes = &mask_bytes[mod4..];
assert!(mask_bytes.len() >= data.len());
for idx in 0..data.len() {
self.write_masked_byte(offset + idx, mask_bytes[idx], data[idx])
}
}
fn write_config(&mut self, offset: usize, data: &[u8]) {
let size = data.len();
match offset {
PCI_COMMAND | PCI_STATUS if size == 2 => {
self.write_bytes(offset, data)
},
PCI_CACHE_LINE_SIZE if size == 1 => {
self.write_bytes(offset, data)
},
PCI_BAR0..=0x27 => {
self.write_bar(offset, data)
}, // bars
_ => {},
}
}
fn is_valid_access(offset: u64, size: usize) -> bool {
fn check_aligned_range(offset: u64, size: usize) -> bool {
let offset = offset as usize;
offset + size <= PCI_CONFIG_SPACE_SIZE && offset % size == 0
}
match size {
4 => check_aligned_range(offset, 4),
2 => check_aligned_range(offset, 2),
1 => check_aligned_range(offset, 1),
_ => false,
}
}
fn next_capability(&self, offset: usize) -> Option<usize> {
fn is_valid_cap_offset(offset: usize) -> bool {
offset < 254 && offset >= PCI_CAP_BASE_OFFSET
}
if is_valid_cap_offset(offset) {
Some(self.bytes[offset + 1] as usize)
} else {
None
}
}
fn update_next_capability_offset(&mut self, caplen: usize) {
let aligned = (caplen + 3) & !3;
self.next_capability_offset += aligned;
assert!(self.next_capability_offset < PCI_CONFIG_SPACE_SIZE);
}
fn update_capability_chain(&mut self, caplen: usize) {
let next_offset = self.next_capability_offset as u8;
self.update_next_capability_offset(caplen);
let mut cap_ptr = self.bytes[PCI_CAPABILITY_LIST] as usize;
if cap_ptr == 0 {
self.bytes[PCI_CAPABILITY_LIST] = next_offset;
self.bytes[PCI_STATUS] |= PCI_STATUS_CAP_LIST as u8;
return;
}
for _ in 0..MAX_CAPABILITY_COUNT {
if let Some(next) = self.next_capability(cap_ptr) {
if next == 0 {
self.bytes[cap_ptr + 1] = next_offset;
return;
}
cap_ptr = next;
}
}
}
pub fn new_capability(&mut self) -> PciCapability {
PciCapability::new_vendor_capability(self)
}
pub fn set_mmio_bar(&mut self, bar: PciBar, range: AddressRange) {
assert!(range.is_naturally_aligned(), "cannot set_mmio_bar() because mmio range is not naturally aligned");
self.bar_write_masks[bar.idx()] = !((range.size() as u32) - 1);
let offset = PCI_BAR0 + (bar.idx() * 4);
let address = (range.base() as u32).to_le_bytes();
self.write_bytes(offset, &address);
}
pub fn read(&self, offset: u64, data: &mut [u8]) {
if Self::is_valid_access(offset, data.len()) {
self.read_bytes(offset as usize, data)
} else {
data.fill(0xff)
}
}
pub fn write(&mut self, offset: u64, data: &[u8]) {
if Self::is_valid_access(offset, data.len()) {
self.write_config(offset as usize, data);
}
}
}
/*
impl BusDevice for PciConfiguration {
fn read(&mut self, offset: u64, data: &mut [u8]) {
if Self::is_valid_access(offset, data.len()) {
self.read_bytes(offset as usize, data)
} else {
data.fill(0xff)
}
}
fn write(&mut self, offset: u64, data: &[u8]) {
if Self::is_valid_access(offset, data.len()) {
self.write_config(offset as usize, data);
}
}
}
*/

33
src/io/pci/consts.rs Normal file
View File

@ -0,0 +1,33 @@
// Maximum number of logical devices on a PCI bus
pub const PCI_MAX_DEVICES: usize = 32;
// Vendor specific PCI capabilities
pub const PCI_CAP_ID_VENDOR: u8 = 0x09;
pub const PCI_CAP_BASE_OFFSET: usize = 0x40;
pub const PCI_VENDOR_ID: usize = 0x00;
pub const PCI_DEVICE_ID: usize = 0x02;
pub const PCI_COMMAND: usize = 0x04;
pub const PCI_COMMAND_IO: u16 = 0x01;
pub const PCI_COMMAND_MEMORY: u16 = 0x02;
pub const PCI_STATUS: usize = 0x06;
pub const PCI_BAR0: usize = 0x10;
pub const PCI_BAR5: usize = 0x24;
pub const PCI_STATUS_CAP_LIST: u16 = 0x10;
pub const PCI_CLASS_REVISION: usize = 0x08;
pub const PCI_CLASS_DEVICE: usize = 0x0a;
pub const PCI_CACHE_LINE_SIZE: usize = 0x0c;
pub const _PCI_SUBSYSTEM_VENDOR_ID: usize = 0x2c;
pub const PCI_SUBSYSTEM_ID: usize = 0x2e;
pub const PCI_CAPABILITY_LIST: usize = 0x34;
pub const PCI_INTERRUPT_LINE: usize = 0x3C;
pub const PCI_INTERRUPT_PIN: usize = 0x3D;
pub const PCI_VENDOR_ID_INTEL: u16 = 0x8086;
pub const PCI_CLASS_BRIDGE_HOST: u16 = 0x0600;

68
src/io/pci/device.rs Normal file
View File

@ -0,0 +1,68 @@
use std::sync::{Arc, Mutex};
use crate::io::bus::BusDevice;
use crate::io::pci::PciConfiguration;
#[derive(Copy,Clone,Eq,PartialEq)]
#[repr(u8)]
pub enum PciBar {
Bar0 = 0,
Bar1 = 1,
Bar2 = 2,
Bar3 = 3,
Bar4 = 4,
Bar5 = 5,
}
impl PciBar {
pub fn idx(&self) -> usize {
*self as usize
}
}
pub enum PciBarAllocation {
Mmio(PciBar, usize),
}
pub trait PciDevice: Send {
fn config(&self) -> &PciConfiguration;
fn config_mut(&mut self) -> &mut PciConfiguration;
fn read_bar(&mut self, bar: PciBar, offset: u64, data: &mut [u8]) {
let (_,_,_) = (bar, offset, data);
}
fn write_bar(&mut self, bar: PciBar, offset: u64, data: &[u8]) {
let (_,_,_) = (bar,offset, data);
}
fn irq(&self) -> Option<u8> { None }
fn bar_allocations(&self) -> Vec<PciBarAllocation> { vec![] }
fn configure_bars(&mut self, allocations: Vec<(PciBar, u64)>) { let _ = allocations; }
}
pub struct MmioHandler {
bar: PciBar,
device: Arc<Mutex<dyn PciDevice+Send>>
}
impl MmioHandler {
pub fn new(bar: PciBar, device: Arc<Mutex<dyn PciDevice+Send>>) -> Self {
MmioHandler {
bar, device,
}
}
}
impl BusDevice for MmioHandler {
fn read(&mut self, offset: u64, data: &mut [u8]) {
let mut lock = self.device.lock().unwrap();
lock.read_bar(self.bar, offset, data)
}
fn write(&mut self, offset: u64, data: &[u8]) {
let mut lock = self.device.lock().unwrap();
lock.write_bar(self.bar, offset, data)
}
}

10
src/io/pci/mod.rs Normal file
View File

@ -0,0 +1,10 @@
mod address;
mod bus;
mod config;
mod consts;
mod device;
pub use bus::{PciBus,PciIrq};
pub use config::{PciCapability,PciConfiguration};
pub use address::PciAddress;
pub use device::{PciDevice,PciBar,PciBarAllocation,MmioHandler};

74
src/io/virtio/consts.rs Normal file
View File

@ -0,0 +1,74 @@
#[derive(Copy,Clone,Eq,PartialEq,Debug)]
#[repr(u32)]
pub enum VirtioDeviceType {
Net = 1,
Block = 2,
Console = 3,
Rng = 4,
NineP = 9,
Wl = 63,
}
impl VirtioDeviceType {
// Base PCI device id for Virtio devices
const PCI_VIRTIO_DEVICE_ID_BASE: u16 = 0x1040;
const PCI_CLASS_NETWORK_ETHERNET: u16 = 0x0200;
const PCI_CLASS_STORAGE_SCSI: u16 = 0x0100;
const PCI_CLASS_COMMUNICATION_OTHER: u16 = 0x0780;
const PCI_CLASS_OTHERS: u16 = 0xff;
const PCI_CLASS_STORAGE_OTHER: u16 = 0x0180;
pub fn device_id(&self) -> u16 {
Self::PCI_VIRTIO_DEVICE_ID_BASE + (*self as u16)
}
pub fn class_id(&self) -> u16 {
match self {
VirtioDeviceType::Net => Self::PCI_CLASS_NETWORK_ETHERNET,
VirtioDeviceType::Block => Self::PCI_CLASS_STORAGE_SCSI,
VirtioDeviceType::Console => Self::PCI_CLASS_COMMUNICATION_OTHER,
VirtioDeviceType::Rng => Self::PCI_CLASS_OTHERS,
VirtioDeviceType::NineP => Self::PCI_CLASS_STORAGE_OTHER,
VirtioDeviceType::Wl => Self::PCI_CLASS_OTHERS,
}
}
}
pub const VIRTIO_MMIO_AREA_SIZE: usize = 4096;
// Offsets and sizes for each structure in MMIO area
pub const VIRTIO_MMIO_OFFSET_COMMON_CFG : u64 = 0; // Common configuration offset
pub const VIRTIO_MMIO_OFFSET_ISR : u64 = 56; // ISR register offset
pub const VIRTIO_MMIO_OFFSET_NOTIFY : u64 = 0x400; // Notify area offset
pub const VIRTIO_MMIO_OFFSET_DEV_CFG : u64 = 0x800; // Device specific configuration offset
pub const VIRTIO_MMIO_COMMON_CFG_SIZE: u64 = 56; // Common configuration size
pub const VIRTIO_MMIO_NOTIFY_SIZE : u64 = 0x400; // Notify area size
pub const VIRTIO_MMIO_ISR_SIZE : u64 = 4; // ISR register size
// Common configuration status bits
pub const _VIRTIO_CONFIG_S_ACKNOWLEDGE : u8 = 1;
pub const _VIRTIO_CONFIG_S_DRIVER : u8 = 2;
pub const VIRTIO_CONFIG_S_DRIVER_OK : u8 = 4;
pub const VIRTIO_CONFIG_S_FEATURES_OK : u8 = 8;
pub const VIRTIO_CONFIG_S_FAILED : u8 = 0x80;
pub const MAX_QUEUE_SIZE: u16 = 1024;
pub const VIRTIO_NO_MSI_VECTOR: u16 = 0xFFFF;
// Bar number 0 is used for Virtio MMIO area
pub const VIRTIO_MMIO_BAR: usize = 0;
// Virtio PCI capability types
pub const VIRTIO_PCI_CAP_COMMON_CFG : u8 = 1;
pub const VIRTIO_PCI_CAP_NOTIFY_CFG : u8 = 2;
pub const VIRTIO_PCI_CAP_ISR_CFG : u8 = 3;
pub const VIRTIO_PCI_CAP_DEVICE_CFG : u8 = 4;

430
src/io/virtio/device.rs Normal file
View File

@ -0,0 +1,430 @@
use std::ops::Range;
use std::sync::{Arc, Mutex, MutexGuard};
use byteorder::{ByteOrder, LittleEndian};
use crate::io::busdata::{ReadableInt, WriteableInt};
use crate::io::pci::{PciBar, PciBarAllocation, PciConfiguration, PciDevice};
use crate::io::virtio::consts::*;
use crate::io::virtio::features::FeatureBits;
use crate::io::virtio::queues::Queues;
use crate::io::virtio::Result;
use crate::io::PCI_VENDOR_ID_REDHAT;
use crate::memory::{AddressRange, MemoryManager};
pub trait VirtioDevice: Send {
fn features(&self) -> &FeatureBits;
fn features_ok(&self) -> bool { true }
fn queue_sizes(&self) -> &[u16];
fn device_type(&self) -> VirtioDeviceType;
fn config_size(&self) -> usize { 0 }
fn read_config(&self, offset: u64, data: &mut [u8]) {
let (_,_) = (offset, data);
}
fn write_config(&mut self, offset: u64, data: &[u8]) {
let (_,_) = (offset, data);
}
fn start(&mut self, queues: &Queues);
}
pub struct VirtioDeviceState {
pci_config: PciConfiguration,
device: Arc<Mutex<dyn VirtioDevice>>,
status: u8,
queues: Queues,
}
impl VirtioDeviceState {
pub fn new<T: VirtioDevice+'static>(device: T, memory: MemoryManager, irq: u8) -> Result<Self> {
let devtype = device.device_type();
let config_size = device.config_size();
let device = Arc::new(Mutex::new(device));
let queues = Queues::new(memory, irq)?;
let mut pci_config = PciConfiguration::new(queues.irq(), PCI_VENDOR_ID_REDHAT, devtype.device_id(), devtype.class_id());
Self::add_pci_capabilities::<T>(&mut pci_config, config_size);
Ok(VirtioDeviceState {
pci_config,
device,
status: 0,
queues,
})
}
fn add_pci_capabilities<T: VirtioDevice>(pci_config: &mut PciConfiguration, config_size: usize) {
VirtioPciCapability::new(VIRTIO_PCI_CAP_COMMON_CFG)
.set_mmio_range(VIRTIO_MMIO_OFFSET_COMMON_CFG, VIRTIO_MMIO_COMMON_CFG_SIZE)
.store(pci_config);
VirtioPciCapability::new(VIRTIO_PCI_CAP_ISR_CFG)
.set_mmio_range(VIRTIO_MMIO_OFFSET_ISR, VIRTIO_MMIO_ISR_SIZE)
.store(pci_config);
VirtioPciCapability::new(VIRTIO_PCI_CAP_NOTIFY_CFG)
.set_mmio_range(VIRTIO_MMIO_OFFSET_NOTIFY, VIRTIO_MMIO_NOTIFY_SIZE)
.set_extra_word(4)
.store(pci_config);
if config_size > 0 {
VirtioPciCapability::new(VIRTIO_PCI_CAP_DEVICE_CFG)
.set_mmio_range(VIRTIO_MMIO_OFFSET_DEV_CFG, config_size as u64)
.store(pci_config);
}
}
fn device(&self) -> MutexGuard<dyn VirtioDevice + 'static> {
self.device.lock().unwrap()
}
fn reset(&mut self) {
self.queues.reset();
self.device().features().reset();
self.status = 0;
}
fn status_write(&mut self, val: u8) {
let new_bits = val & !self.status;
let has_new_bit = |bit| -> bool {
new_bits & bit != 0
};
self.status |= new_bits;
if val == 0 {
self.reset();
} else if has_new_bit(VIRTIO_CONFIG_S_FEATURES_OK) {
// 2.2.2: The device SHOULD accept any valid subset of features the driver accepts,
// otherwise it MUST fail to set the FEATURES_OK device status bit when the driver
// writes it.
if !self.device().features_ok() {
self.status &= VIRTIO_CONFIG_S_FEATURES_OK;
}
} else if has_new_bit(VIRTIO_CONFIG_S_DRIVER_OK) {
let features = self.device().features().guest_value();
if let Err(err) = self.queues.configure_queues(features) {
warn!("Error configuring virtqueue: {}", err);
} else {
self.device().start(&self.queues)
}
} else if has_new_bit(VIRTIO_CONFIG_S_FAILED) {
// XXX print a warning
}
}
fn common_config_write(&mut self, offset: u64, val: WriteableInt) {
match val {
WriteableInt::Byte(n) => match offset {
/* device_status */
20 => self.status_write(n),
_ => warn!("VirtioDeviceState: common_config_write: unhandled byte offset {}", offset),
},
WriteableInt::Word(n) => match offset {
/* queue_select */
22 => self.queues.select(n),
/* queue_size */
24 => self.queues.set_size(n),
/* queue_enable */
28 => self.queues.enable_current(),
_ => warn!("VirtioDeviceState: common_config_write: unhandled word offset {}", offset),
}
WriteableInt::DWord(n) => match offset {
/* device_feature_select */
0 => self.device().features().set_device_selected(n),
/* guest_feature_select */
8 => self.device().features().set_guest_selected(n),
/* guest_feature */
12 => self.device().features().write_guest_word(n),
/* queue_desc_lo */
32 => self.queues.set_current_descriptor_area(n, false),
/* queue_desc_hi */
36 => self.queues.set_current_descriptor_area(n, true),
/* queue_avail_lo */
40 => self.queues.set_avail_area(n, false),
/* queue_avail_hi */
44 => self.queues.set_avail_area(n, true),
/* queue_used_lo */
48 => self.queues.set_used_area(n, false),
/* queue_used_hi */
52 => self.queues.set_used_area(n, true),
_ => warn!("VirtioDeviceState: common_config_write: unhandled dword offset {}", offset),
},
WriteableInt::QWord(_) => warn!("VirtioDeviceState: common_config_write: unhandled qword offset {}", offset),
WriteableInt::Data(bs) => warn!("VirtioDeviceState: common_config_write: unhandled raw bytes offset {}, len {}", offset, bs.len()),
}
}
fn common_config_read(&self, offset: u64) -> ReadableInt {
match offset {
/* device_feature_select */
0 => self.device().features().device_selected().into(),
/* device_feature */
4 => self.device().features().read_device_word().into(),
/* guest_feature_select */
8 => self.device().features().guest_selected().into(),
/* guest_feature */
12 => self.device().features().read_guest_word().into(),
/* msix_config */
16 => VIRTIO_NO_MSI_VECTOR.into(),
/* num_queues */
18 => self.queues.num_queues().into(),
/* device_status */
20 => self.status.into(),
/* config_generation */
21 => (0u8).into(),
/* queue_select */
22 => self.queues.selected_queue().into(),
/* queue_size */
24 => self.queues.queue_size().into(),
/* queue_msix_vector */
26 => VIRTIO_NO_MSI_VECTOR.into(),
/* queue_enable */
28 => if self.queues.is_current_enabled() { 1u16.into() } else { 0u16.into() },
/* queue_notify_off */
30 => self.queues.selected_queue().into(),
/* queue_desc_lo */
32 => self.queues.get_current_descriptor_area(false).into(),
/* queue_desc_hi */
36 => self.queues.get_current_descriptor_area(true).into(),
/* queue_avail_lo */
40 => self.queues.get_avail_area(false).into(),
/* queue_avail_hi */
44 => self.queues.get_avail_area(true).into(),
/* queue_used_lo */
48 => self.queues.get_used_area(false).into(),
/* queue_used_hi */
52 => self.queues.get_used_area(true).into(),
_ => ReadableInt::new_dword(0),
}
}
fn isr_read(&self) -> u8 {
self.queues.isr_read() as u8
}
fn is_device_config_range(&self, offset: u64, len: usize) -> bool {
let dev = self.device();
if dev.config_size() > 0 {
let range = AddressRange::new(VIRTIO_MMIO_OFFSET_DEV_CFG, dev.config_size());
range.contains(offset, len)
} else {
false
}
}
fn is_common_cfg_range(&self, offset: u64, len: usize) -> bool {
AddressRange::new(VIRTIO_MMIO_OFFSET_COMMON_CFG, VIRTIO_MMIO_COMMON_CFG_SIZE as usize)
.contains(offset, len)
}
}
impl PciDevice for VirtioDeviceState {
fn config(&self) -> &PciConfiguration {
&self.pci_config
}
fn config_mut(&mut self) -> &mut PciConfiguration {
&mut self.pci_config
}
fn read_bar(&mut self, bar: PciBar, offset: u64, data: &mut [u8]) {
if bar != PciBar::Bar0 {
warn!("Virtio PciDevice: read_bar() expected bar0!");
return;
}
if self.is_common_cfg_range(offset, data.len()) {
let v = self.common_config_read(offset);
v.read(data);
} else if offset == VIRTIO_MMIO_OFFSET_ISR && data.len() == 1 {
data[0] = self.isr_read();
} else if self.is_device_config_range(offset, data.len()) {
let dev = self.device();
dev.read_config(offset - VIRTIO_MMIO_OFFSET_DEV_CFG, data);
}
}
fn write_bar(&mut self, bar: PciBar, offset: u64, data: &[u8]) {
if bar != PciBar::Bar0 {
warn!("Virtio PciDevice: write_bar() expected bar0!");
return;
}
if self.is_common_cfg_range(offset, data.len()) {
let data = WriteableInt::from(data);
self.common_config_write(offset, data);
} else if self.is_device_config_range(offset, data.len()) {
let mut dev = self.device();
dev.write_config(offset - VIRTIO_MMIO_OFFSET_DEV_CFG, data);
}
}
fn irq(&self) -> Option<u8> {
Some(self.queues.irq())
}
fn bar_allocations(&self) -> Vec<PciBarAllocation> {
vec![PciBarAllocation::Mmio(PciBar::Bar0, VIRTIO_MMIO_AREA_SIZE)]
}
fn configure_bars(&mut self, allocations: Vec<(PciBar, u64)>) {
for (bar,base) in allocations {
if bar == PciBar::Bar0 {
let queue_sizes = self.device().queue_sizes().to_vec();
if let Err(e) = self.queues.create_queues(base, &queue_sizes) {
warn!("Error creating queues: {}", e);
}
} else {
warn!("Virtio PciDevice: Cannot configure unexpected PCI bar: {}", bar.idx());
}
}
}
}
struct VirtioPciCapability {
vtype: u8,
size: u8,
mmio_offset: u32,
mmio_len: u32,
extra_word: Option<u32>,
}
impl VirtioPciCapability {
fn new(vtype: u8) -> VirtioPciCapability{
VirtioPciCapability {
vtype,
size: 16,
mmio_offset: 0,
mmio_len: 0,
extra_word: None
}
}
fn set_mmio_range(&mut self, offset: u64, len: u64) -> &mut VirtioPciCapability {
self.mmio_offset = offset as u32;
self.mmio_len = len as u32;
self
}
fn set_extra_word(&mut self, val: u32) -> &mut VirtioPciCapability {
self.size += 4;
self.extra_word = Some(val);
self
}
fn store(&self, pci_config: &mut PciConfiguration) {
/*
* struct virtio_pci_cap {
* u8 cap_vndr; /* Generic PCI field: PCI_CAP_ID_VNDR */
* u8 cap_next; /* Generic PCI field: next ptr. */
* u8 cap_len; /* Generic PCI field: capability length */
* u8 cfg_type; /* Identifies the structure. */
* u8 bar; /* Where to find it. */
* u8 padding[3]; /* Pad to full dword. */
* le32 offset; /* Offset within bar. */
* le32 length; /* Length of the structure, in bytes. */
* };
*/
let mut cap = pci_config.new_capability();
cap.write(self.size);
cap.write(self.vtype);
// Also fills the padding bytes
cap.write(VIRTIO_MMIO_BAR as u32);
cap.write(self.mmio_offset);
cap.write(self.mmio_len);
if let Some(word) = self.extra_word {
cap.write(word);
}
cap.store();
}
}
pub struct DeviceConfigArea {
buffer: Vec<u8>,
write_filter: DeviceConfigWriteFilter,
}
#[allow(dead_code)]
impl DeviceConfigArea {
pub fn new(size: usize) -> Self {
DeviceConfigArea{
buffer: vec![0u8; size],
write_filter: DeviceConfigWriteFilter::new(size),
}
}
pub fn read_config(&self, offset: u64, data: &mut [u8]) {
let offset = offset as usize;
if offset + data.len() <= self.buffer.len() {
data.copy_from_slice(&self.buffer[offset..offset+data.len()]);
}
}
pub fn write_config(&mut self, offset: u64, data: &[u8]) {
let offset = offset as usize;
if self.write_filter.is_writeable(offset, data.len()) {
self.buffer[offset..offset+data.len()].copy_from_slice(data);
}
}
pub fn set_writeable(&mut self, offset: usize, size: usize) {
self.write_filter.set_writable(offset, size)
}
pub fn write_u8(&mut self, offset: usize, val: u8) {
assert!(offset + 1 <= self.buffer.len());
self.buffer[offset] = val;
}
pub fn write_u16(&mut self, offset: usize, val: u16) {
assert!(offset + 2 <= self.buffer.len());
LittleEndian::write_u16(&mut self.buffer[offset..], val);
}
pub fn write_u32(&mut self, offset: usize, val: u32) {
assert!(offset + 4 <= self.buffer.len());
LittleEndian::write_u32(&mut self.buffer[offset..], val);
}
pub fn write_u64(&mut self, offset: usize, val: u64) {
assert!(offset + 8 <= self.buffer.len());
LittleEndian::write_u64(&mut self.buffer[offset..], val);
}
pub fn write_bytes(&mut self, offset: usize, bytes: &[u8]) {
assert!(offset + bytes.len() <= self.buffer.len());
self.buffer[offset..offset + bytes.len()].copy_from_slice(bytes);
}
}
struct DeviceConfigWriteFilter {
size: usize,
ranges: Vec<Range<usize>>,
}
impl DeviceConfigWriteFilter {
fn new(size: usize) -> Self {
DeviceConfigWriteFilter { size, ranges: Vec::new() }
}
fn set_writable(&mut self, offset: usize, size: usize) {
let end = offset + size;
self.ranges.push(offset..end);
}
fn is_writeable(&self, offset: usize, size: usize) -> bool {
if offset + size > self.size {
false
} else {
let last = offset + size - 1;
self.ranges.iter().any(|r| r.contains(&offset) && r.contains(&last))
}
}
}

110
src/io/virtio/features.rs Normal file
View File

@ -0,0 +1,110 @@
use std::sync::{Arc, Mutex, MutexGuard};
#[derive(Copy,Clone)]
#[repr(u64)]
pub enum ReservedFeatureBit {
_IndirectDesc = 1 << 28,
EventIdx = 1 << 29,
Version1 = 1 << 32,
}
impl ReservedFeatureBit {
pub fn is_set_in(&self, flags: u64) -> bool {
flags & (*self as u64) != 0
}
}
#[derive(Clone)]
pub struct FeatureBits {
device_bits: Arc<Mutex<Inner>>,
guest_bits: Arc<Mutex<Inner>>,
}
struct Inner {
bits: u64,
selected: u32,
}
impl Inner {
fn new(bits: u64) -> Arc<Mutex<Self>> {
Arc::new(Mutex::new(Inner { bits, selected: 0 }))
}
}
impl FeatureBits {
pub fn new_default(device_bits: u64) -> Self {
FeatureBits {
guest_bits: Inner::new(0),
device_bits: Inner::new(ReservedFeatureBit::Version1 as u64 | device_bits),
}
}
pub fn reset(&self) {
let mut guest = self.guest();
guest.bits = 0;
guest.selected = 0;
}
fn guest(&self) -> MutexGuard<Inner> {
self.guest_bits.lock().unwrap()
}
fn device(&self) -> MutexGuard<Inner> {
self.device_bits.lock().unwrap()
}
pub fn guest_selected(&self) -> u32 {
self.guest().selected
}
pub fn guest_value(&self) -> u64 {
self.guest().bits
}
pub fn has_guest_bit(&self, bit: u64) -> bool {
self.guest_value() & bit == bit
}
pub fn set_guest_selected(&self, val: u32) {
self.guest().selected = val;
}
pub fn write_guest_word(&self, val: u32) {
const MASK_LOW_32: u64 = (1u64 << 32) - 1;
const MASK_HI_32: u64 = MASK_LOW_32 << 32;
let mut inner = self.guest();
let val = u64::from(val);
match inner.selected {
0 => inner.bits = (inner.bits & MASK_HI_32) | val,
1 => inner.bits = val << 32 | (inner.bits & MASK_LOW_32),
_ => (),
}
}
pub fn read_guest_word(&self) -> u32 {
let inner = self.guest();
match inner.selected {
0 => inner.bits as u32,
1 => (inner.bits >> 32) as u32,
_ => 0,
}
}
pub fn set_device_selected(&self, val: u32) {
self.device().selected = val;
}
pub fn device_selected(&self) -> u32 {
self.device().selected
}
pub fn read_device_word(&self) -> u32 {
let inner = self.device();
match inner.selected {
0 => inner.bits as u32,
1 => (inner.bits >> 32) as u32,
_ => 0,
}
}
}

41
src/io/virtio/mod.rs Normal file
View File

@ -0,0 +1,41 @@
mod device;
mod consts;
mod vq;
mod queues;
mod features;
use std::result;
pub use device::{VirtioDeviceState, VirtioDevice, DeviceConfigArea};
pub use queues::Queues;
pub use features::FeatureBits;
pub use consts::VirtioDeviceType;
pub use vq::virtqueue::VirtQueue;
pub use vq::chain::Chain;
use crate::io::bus::Error as BusError;
use thiserror::Error;
use vmm_sys_util::errno;
pub type Result<T> = result::Result<T, Error>;
#[derive(Debug,Error)]
pub enum Error {
#[error("failed to create EventFd for VirtQueue: {0}")]
CreateEventFd(std::io::Error),
#[error("failed to create IoEventFd for VirtQueue: {0}")]
CreateIoEventFd(kvm_ioctls::Error),
#[error("failed to read from IoEventFd: {0}")]
ReadIoEventFd(std::io::Error),
#[error("VirtQueue not enabled")]
QueueNotEnabled,
#[error("VirtQueue descriptor table range is invalid 0x{0:x}")]
RangeInvalid(u64),
#[error("VirtQueue avail ring range range is invalid 0x{0:x}")]
AvailInvalid(u64),
#[error("VirtQueue used ring range is invalid 0x{0:x}")]
UsedInvalid(u64),
#[error("{0}")]
BusInsert(#[from]BusError),
#[error("Error registering irqfd: {0}")]
IrqFd(errno::Error),
}

247
src/io/virtio/queues.rs Normal file
View File

@ -0,0 +1,247 @@
use std::sync::Arc;
use std::sync::atomic::{AtomicUsize, Ordering};
use kvm_ioctls::{IoEventAddress, NoDatamatch};
use vmm_sys_util::eventfd::EventFd;
use crate::memory::MemoryManager;
use crate::io::virtio::{Error, Result};
use crate::io::virtio::consts::VIRTIO_MMIO_OFFSET_NOTIFY;
use crate::io::VirtQueue;
use crate::vm::KvmVm;
pub struct InterruptLine {
irqfd: EventFd,
irq: u8,
isr: AtomicUsize,
}
impl InterruptLine {
fn new(kvm_vm: &KvmVm, irq: u8) -> Result<InterruptLine> {
let irqfd = EventFd::new(0)
.map_err(Error::CreateEventFd)?;
kvm_vm.vm_fd().register_irqfd(&irqfd, irq as u32)
.map_err(Error::IrqFd)?;
Ok(InterruptLine{
irqfd,
irq,
isr: AtomicUsize::new(0)
})
}
fn irq(&self) -> u8 {
self.irq
}
fn isr_read(&self) -> u64 {
self.isr.swap(0, Ordering::SeqCst) as u64
}
pub fn notify_queue(&self) {
self.isr.fetch_or(0x1, Ordering::SeqCst);
self.irqfd.write(1).unwrap();
}
pub fn notify_config(&self) {
self.isr.fetch_or(0x2, Ordering::SeqCst);
self.irqfd.write(1).unwrap();
}
}
pub struct Queues {
memory: MemoryManager,
selected_queue: u16,
queues: Vec<VirtQueue>,
interrupt: Arc<InterruptLine>,
}
impl Queues {
pub fn new(memory: MemoryManager, irq: u8) -> Result<Self> {
let interrupt = InterruptLine::new(memory.kvm_vm(), irq)?;
let queues = Queues {
memory,
selected_queue: 0,
queues: Vec::new(),
interrupt: Arc::new(interrupt),
};
Ok(queues)
}
pub fn get_queue(&self, idx: usize) -> VirtQueue {
self.queues
.get(idx)
.cloned()
.expect(&format!("Virtio device requested VQ index {} that does not exist", idx))
}
pub fn queues(&self) -> Vec<VirtQueue> {
self.queues.clone()
}
pub fn memory(&self) -> &MemoryManager {
&self.memory
}
pub fn configure_queues(&self, features: u64) -> Result<()> {
for q in &self.queues {
q.configure(features)?;
}
Ok(())
}
pub fn reset(&mut self) {
self.selected_queue = 0;
let _ = self.isr_read();
for vr in &mut self.queues {
vr.reset();
}
}
pub fn irq(&self) -> u8 {
self.interrupt.irq()
}
pub fn isr_read(&self) -> u64 {
self.interrupt.isr_read()
}
pub fn num_queues(&self) -> u16 {
self.queues.len() as u16
}
pub fn create_queues(&mut self, mmio_base: u64, queue_sizes: &[u16]) -> Result<()> {
let mut idx = 0;
for &sz in queue_sizes {
let ioevent = self.create_ioevent(idx, mmio_base)?;
let vq = VirtQueue::new(self.memory.guest_ram().clone(), sz, self.interrupt.clone(), ioevent);
self.queues.push(vq);
idx += 1;
}
Ok(())
}
fn create_ioevent(&self, index: usize, mmio_base: u64) -> Result<Arc<EventFd>> {
let evt = EventFd::new(0)
.map_err(Error::CreateEventFd)?;
let notify_address = mmio_base +
VIRTIO_MMIO_OFFSET_NOTIFY +
(4 * index as u64);
let addr = IoEventAddress::Mmio(notify_address);
self.memory.kvm_vm().vm_fd().register_ioevent(&evt, &addr, NoDatamatch)
.map_err(Error::CreateIoEventFd)?;
Ok(Arc::new(evt))
}
fn current_queue(&self) -> Option<&VirtQueue> {
self.queues.get(self.selected_queue as usize)
}
fn with_current<F>(&mut self, f: F)
where F: FnOnce(&mut VirtQueue)
{
if let Some(vq) = self.queues.get_mut(self.selected_queue as usize) {
if !vq.is_enabled() {
f(vq)
}
}
}
pub fn selected_queue(&self) -> u16 {
self.selected_queue
}
pub fn select(&mut self, index: u16) {
self.selected_queue = index;
}
pub fn is_current_enabled(&self) -> bool {
self.current_queue()
.map(|q| q.is_enabled())
.unwrap_or(false)
}
pub fn queue_size(&self) -> u16 {
self.current_queue()
.map(|q| q.size())
.unwrap_or(0)
}
pub fn set_size(&mut self, size: u16) {
self.with_current(|q| q.set_size(size))
}
pub fn enable_current(&mut self) {
self.with_current(|q| q.enable())
}
pub fn get_current_descriptor_area(&self, hi_word: bool) -> u32 {
self.current_queue().map(|q| if hi_word {
Self::get_hi32(q.descriptor_area())
} else {
Self::get_lo32(q.descriptor_area())
}).unwrap_or(0)
}
pub fn set_current_descriptor_area(&mut self, val: u32, hi_word: bool) {
self.with_current(|q| {
let mut addr = q.descriptor_area();
if hi_word { Self::set_hi32(&mut addr, val) } else { Self::set_lo32(&mut addr, val) }
q.set_descriptor_area(addr);
});
}
pub fn get_avail_area(&self, hi_word: bool) -> u32 {
self.current_queue().map(|q| if hi_word {
Self::get_hi32(q.driver_area())
} else {
Self::get_lo32(q.driver_area())
}).unwrap_or(0)
}
fn set_hi32(val: &mut u64, dword: u32) {
const MASK_LO_32: u64 = (1u64 << 32) - 1;
*val = (*val & MASK_LO_32) | (u64::from(dword) << 32)
}
fn set_lo32(val: &mut u64, dword: u32) {
const MASK_HI_32: u64 = ((1u64 << 32) - 1) << 32;
*val = (*val & MASK_HI_32) | u64::from(dword)
}
fn get_hi32(val: u64) -> u32 {
(val >> 32) as u32
}
fn get_lo32(val: u64) -> u32 {
val as u32
}
pub fn set_avail_area(&mut self, val: u32, hi_word: bool) {
self.with_current(|q| {
let mut addr = q.driver_area();
if hi_word { Self::set_hi32(&mut addr, val) } else { Self::set_lo32(&mut addr, val) }
q.set_driver_area(addr);
});
}
pub fn set_used_area(&mut self, val: u32, hi_word: bool) {
self.with_current(|q| {
let mut addr = q.device_area();
if hi_word { Self::set_hi32(&mut addr, val) } else { Self::set_lo32(&mut addr, val) }
q.set_device_area(addr);
});
}
pub fn get_used_area(&self, hi_word: bool) -> u32 {
self.current_queue().map(|q| if hi_word {
Self::get_hi32(q.device_area())
} else {
Self::get_lo32(q.device_area())
}).unwrap_or(0)
}
}

View File

@ -1,11 +1,11 @@
use std::fmt; use std::{fmt, io};
use std::io::{self,Read,Write}; use std::io::{Read, Write};
use std::sync::{Arc, Mutex};
use crate::io::virtio::vq::descriptor::Descriptor;
use crate::io::virtio::vq::virtqueue::QueueBackend;
use crate::memory::GuestRam; use crate::memory::GuestRam;
use crate::virtio::VirtQueue;
use crate::virtio::vring::Descriptor;
struct DescriptorList { pub struct DescriptorList {
memory: GuestRam, memory: GuestRam,
descriptors: Vec<Descriptor>, descriptors: Vec<Descriptor>,
offset: usize, offset: usize,
@ -14,7 +14,7 @@ struct DescriptorList {
} }
impl DescriptorList { impl DescriptorList {
fn new(memory: GuestRam) -> Self { pub fn new(memory: GuestRam) -> Self {
DescriptorList { DescriptorList {
memory, memory,
descriptors: Vec::new(), descriptors: Vec::new(),
@ -24,12 +24,12 @@ impl DescriptorList {
} }
} }
fn add_descriptor(&mut self, d: Descriptor) { pub fn add_descriptor(&mut self, d: Descriptor) {
self.total_size += d.len as usize; self.total_size += d.length();
self.descriptors.push(d) self.descriptors.push(d)
} }
fn reverse(&mut self) { pub fn reverse(&mut self) {
self.descriptors.reverse(); self.descriptors.reverse();
} }
@ -38,7 +38,7 @@ impl DescriptorList {
self.offset = 0; self.offset = 0;
} }
fn is_empty(&self) -> bool { pub fn is_empty(&self) -> bool {
self.descriptors.is_empty() self.descriptors.is_empty()
} }
@ -49,7 +49,7 @@ impl DescriptorList {
fn current_address(&self, size: usize) -> Option<u64> { fn current_address(&self, size: usize) -> Option<u64> {
self.current().and_then(|d| { self.current().and_then(|d| {
if d.remaining(self.offset) >= size { if d.remaining(self.offset) >= size {
Some(d.addr + self.offset as u64) Some(d.address() + self.offset as u64)
} else { } else {
None None
} }
@ -111,7 +111,7 @@ impl DescriptorList {
fn current_slice(&self) -> &[u8] { fn current_slice(&self) -> &[u8] {
if let Some(d) = self.current() { if let Some(d) = self.current() {
let size = d.remaining(self.offset); let size = d.remaining(self.offset);
let addr = d.addr + self.offset as u64; let addr = d.address() + self.offset as u64;
self.memory.slice(addr, size).unwrap_or(&[]) self.memory.slice(addr, size).unwrap_or(&[])
} else { } else {
&[] &[]
@ -121,7 +121,7 @@ impl DescriptorList {
fn current_mut_slice(&self) -> &mut [u8] { fn current_mut_slice(&self) -> &mut [u8] {
if let Some(d) = self.current() { if let Some(d) = self.current() {
let size = d.remaining(self.offset); let size = d.remaining(self.offset);
let addr = d.addr + self.offset as u64; let addr = d.address() + self.offset as u64;
self.memory.mut_slice(addr, size).unwrap_or(&mut []) self.memory.mut_slice(addr, size).unwrap_or(&mut [])
} else { } else {
&mut [] &mut []
@ -135,64 +135,31 @@ impl DescriptorList {
impl fmt::Debug for DescriptorList { impl fmt::Debug for DescriptorList {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "DList[size={}, [", self.total_size)?; write!(f, "[size={}, [", self.total_size)?;
for d in self.descriptors.iter().rev() { for d in self.descriptors.iter().rev() {
write!(f, "(#{}, 0x{:08x}, [{}]),", d.idx, d.addr, d.len)?; write!(f, "(0x{:08x}, [{}]),", d.address(), d.length())?;
} }
write!(f, "]") write!(f, "] ]")
} }
} }
pub struct Chain { pub struct Chain {
backend: Arc<Mutex<dyn QueueBackend>>,
head: Option<u16>, head: Option<u16>,
vq: VirtQueue,
readable: DescriptorList, readable: DescriptorList,
writeable: DescriptorList, writeable: DescriptorList,
} }
impl Chain { impl Chain {
pub fn new(memory: GuestRam, vq: VirtQueue, head: u16, ttl: u16) -> Self { pub fn new(backend: Arc<Mutex<dyn QueueBackend>>, head: u16, readable: DescriptorList, writeable: DescriptorList) -> Self {
let (readable,writeable) = Self::load_descriptors(memory, &vq, head, ttl);
Chain { Chain {
backend,
head: Some(head), head: Some(head),
vq,
readable, readable,
writeable, writeable,
} }
} }
fn load_descriptors(memory: GuestRam, vq: &VirtQueue, head: u16, ttl: u16) -> (DescriptorList, DescriptorList) {
let mut readable = DescriptorList::new(memory.clone());
let mut writeable = DescriptorList::new(memory);
let mut idx = head;
let mut ttl = ttl;
while let Some(d) = vq.load_descriptor(idx) {
if ttl == 0 {
warn!("Descriptor chain length exceeded ttl");
break;
} else {
ttl -= 1;
}
if d.is_write() {
writeable.add_descriptor(d);
} else {
if !writeable.is_empty() {
warn!("Guest sent readable virtqueue descriptor after writeable descriptor in violation of specification");
}
readable.add_descriptor(d);
}
if !d.has_next() {
break;
}
idx = d.next;
}
readable.reverse();
writeable.reverse();
return (readable, writeable);
}
pub fn w8(&mut self, n: u8) -> io::Result<()> { pub fn w8(&mut self, n: u8) -> io::Result<()> {
self.write_all(&[n])?; self.write_all(&[n])?;
Ok(()) Ok(())
@ -205,6 +172,7 @@ impl Chain {
self.write_all(&n.to_le_bytes())?; self.write_all(&n.to_le_bytes())?;
Ok(()) Ok(())
} }
pub fn w64(&mut self, n: u64) -> io::Result<()> { pub fn w64(&mut self, n: u64) -> io::Result<()> {
self.write_all(&n.to_le_bytes())?; self.write_all(&n.to_le_bytes())?;
Ok(()) Ok(())
@ -231,7 +199,8 @@ impl Chain {
if let Some(head) = self.head.take() { if let Some(head) = self.head.take() {
self.readable.clear(); self.readable.clear();
self.writeable.clear(); self.writeable.clear();
self.vq.put_used(head, self.writeable.consumed_size as u32); let backend = self.backend.lock().unwrap();
backend.put_used(head, self.writeable.consumed_size as u32);
} }
} }
@ -275,7 +244,7 @@ impl Chain {
} }
pub fn copy_from_reader<R>(&mut self, r: R, size: usize) -> io::Result<usize> pub fn copy_from_reader<R>(&mut self, r: R, size: usize) -> io::Result<usize>
where R: Read+Sized where R: Read+Sized
{ {
self.writeable.write_from_reader(r, size) self.writeable.write_from_reader(r, size)
} }

View File

@ -0,0 +1,111 @@
use std::{cmp, io};
use std::io::Read;
use crate::memory::GuestRam;
#[repr(u16)]
enum DescriptorFlag {
Next = 1,
Write = 2,
Indirect = 4,
PackedAvail = 1<<7,
PackedUsed = 1<<15,
}
#[derive(Copy,Clone)]
pub struct Descriptor {
address: u64,
length: u32,
flags: u16,
// 'next' field for split virtqueue, 'buffer_id' for packed virtqueue
extra: u16,
}
impl Descriptor {
pub fn new(address: u64, length: u32, flags: u16, extra: u16) -> Self {
Descriptor {
address, length, flags, extra
}
}
pub fn length(&self) -> usize {
self.length as usize
}
pub fn address(&self) -> u64 {
self.address
}
///
/// Test if `flag` is set in `self.flags`
///
fn has_flag(&self, flag: DescriptorFlag) -> bool {
self.flags & (flag as u16) != 0
}
///
/// Is VRING_DESC_F_NEXT set in `self.flags`?
///
pub fn has_next(&self) -> bool {
self.has_flag(DescriptorFlag::Next)
}
pub fn next(&self) -> u16 {
self.extra
}
///
/// Is VRING_DESC_F_WRITE set in `self.flags`?
///
pub fn is_write(&self) -> bool {
self.has_flag(DescriptorFlag::Write)
}
///
/// Is VRING_DESC_F_INDIRECT set in `self.flags`?
///
pub fn is_indirect(&self) -> bool {
self.has_flag(DescriptorFlag::Indirect)
}
pub fn remaining(&self, offset: usize) -> usize {
if offset >= self.length as usize {
0
} else {
self.length as usize - offset
}
}
pub fn is_desc_avail(&self, wrap_counter: bool) -> bool {
let used = self.has_flag(DescriptorFlag::PackedUsed);
let avail = self.has_flag(DescriptorFlag::PackedAvail);
(used != avail) && (avail == wrap_counter)
}
pub fn read_from(&self, memory: &GuestRam, offset: usize, buf: &mut[u8]) -> usize {
let sz = cmp::min(buf.len(), self.remaining(offset));
if sz > 0 {
memory.read_bytes(self.address + offset as u64, &mut buf[..sz]).unwrap();
}
sz
}
pub fn write_to(&self, memory: &GuestRam, offset: usize, buf: &[u8]) -> usize {
let sz = cmp::min(buf.len(), self.remaining(offset));
if sz > 0 {
memory.write_bytes(self.address + offset as u64, &buf[..sz]).unwrap();
}
sz
}
pub fn write_from_reader<R: Read+Sized>(&self, memory: &GuestRam, offset: usize, mut r: R, size: usize) -> io::Result<usize> {
let sz = cmp::min(size, self.remaining(offset));
if sz > 0 {
let slice = memory.mut_slice(self.address + offset as u64, sz).unwrap();
return r.read(slice);
}
Ok(0)
}
}

28
src/io/virtio/vq/mod.rs Normal file
View File

@ -0,0 +1,28 @@
use std::sync::Arc;
use std::sync::atomic::{AtomicUsize, Ordering};
pub mod chain;
mod descriptor;
mod splitqueue;
pub mod virtqueue;
///
/// A convenience wrapper around `AtomicUsize`
///
#[derive(Clone)]
pub struct SharedIndex(Arc<AtomicUsize>);
impl SharedIndex {
fn new() -> SharedIndex {
SharedIndex(Arc::new(AtomicUsize::new(0)))
}
fn get(&self) -> u16 {
self.0.load(Ordering::SeqCst) as u16
}
fn inc(&self) {
self.0.fetch_add(1, Ordering::SeqCst);
}
fn set(&self, v: u16) {
self.0.store(v as usize, Ordering::SeqCst);
}
}

View File

@ -0,0 +1,278 @@
use std::sync::{Arc, atomic};
use std::sync::atomic::Ordering;
use crate::io::virtio::Error;
use crate::io::virtio::features::ReservedFeatureBit;
use crate::io::virtio::queues::InterruptLine;
use crate::io::virtio::vq::chain::DescriptorList;
use crate::io::virtio::vq::descriptor::Descriptor;
use crate::io::virtio::vq::SharedIndex;
use crate::io::virtio::vq::virtqueue::QueueBackend;
use crate::memory::GuestRam;
pub struct SplitQueue {
memory: GuestRam,
interrupt: Arc<InterruptLine>,
queue_size: u16,
features: u64,
descriptor_base: u64,
avail_base: u64,
used_base: u64,
/// last seen avail_idx loaded from guest memory
cached_avail_idx: SharedIndex,
/// The index in the avail ring where the next available entry will be read
next_avail: SharedIndex,
/// The index in the used ring where the next used entry will be placed
next_used_idx: SharedIndex,
}
impl SplitQueue {
pub fn new(memory: GuestRam, interrupt: Arc<InterruptLine>) -> Self {
SplitQueue {
memory,
interrupt,
queue_size: 0,
features: 0,
descriptor_base: 0,
avail_base: 0,
used_base: 0,
cached_avail_idx: SharedIndex::new(),
next_avail: SharedIndex::new(),
next_used_idx: SharedIndex::new(),
}
}
///
/// Load the descriptor table entry at `idx` from guest memory and return it.
///
fn load_descriptor(&self, idx: u16) -> Option<Descriptor> {
if idx >= self.queue_size {
panic!("load_descriptor called with index larger than queue size");
}
let head = self.descriptor_base + (idx as u64 * 16);
let addr = self.memory.read_int::<u64>(head).unwrap();
let len= self.memory.read_int::<u32>(head + 8).unwrap();
let flags = self.memory.read_int::<u16>(head + 12).unwrap();
let next = self.memory.read_int::<u16>(head + 14).unwrap();
if self.memory.is_valid_range(addr, len as usize) && next < self.queue_size {
return Some(Descriptor::new(addr, len, flags, next));
}
None
}
fn load_descriptor_lists(&self, head: u16) -> (DescriptorList,DescriptorList) {
let mut readable = DescriptorList::new(self.memory.clone());
let mut writeable = DescriptorList::new(self.memory.clone());
let mut idx = head;
let mut ttl = self.queue_size;
while let Some(d) = self.load_descriptor(idx) {
if ttl == 0 {
warn!("Descriptor chain length exceeded ttl");
break;
} else {
ttl -= 1;
}
if d.is_write() {
writeable.add_descriptor(d);
} else {
if !writeable.is_empty() {
warn!("Guest sent readable virtqueue descriptor after writeable descriptor in violation of specification");
}
readable.add_descriptor(d);
}
if !d.has_next() {
break;
}
idx = d.next();
}
readable.reverse();
writeable.reverse();
return (readable, writeable)
}
///
/// Load `avail_ring.idx` from guest memory and store it in `cached_avail_idx`.
///
fn load_avail_idx(&self) -> u16 {
let avail_idx = self.memory.read_int::<u16>(self.avail_base + 2).unwrap();
self.cached_avail_idx.set(avail_idx);
avail_idx
}
///
/// Read from guest memory and return the Avail ring entry at
/// index `ring_idx % queue_size`.
///
fn load_avail_entry(&self, ring_idx: u16) -> u16 {
let offset = (4 + (ring_idx % self.queue_size) * 2) as u64;
self.memory.read_int(self.avail_base + offset).unwrap()
}
/// Queue is empty if `next_avail` is same value as
/// `avail_ring.idx` value in guest memory If `cached_avail_idx`
/// currently matches `next_avail` it is reloaded from
/// memory in case guest has updated field since last
/// time it was loaded.
///
fn is_empty(&self) -> bool {
let next_avail = self.next_avail.get();
if self.cached_avail_idx.get() != next_avail {
return false;
}
next_avail == self.load_avail_idx()
}
///
/// If queue is not empty, read and return the next Avail ring entry
/// and increment `next_avail`. If queue is empty return `None`
///
fn pop_avail_entry(&self) -> Option<u16> {
if self.is_empty() {
return None
}
let next_avail = self.next_avail.get();
let avail_entry = self.load_avail_entry(next_avail);
self.next_avail.inc();
if self.has_event_idx() {
self.write_avail_event(self.next_avail.get());
}
Some(avail_entry)
}
fn read_avail_flags(&self) -> u16 {
self.memory.read_int::<u16>(self.avail_base).unwrap()
}
///
/// Write an entry into the Used ring.
///
/// The entry is written into the ring structure at offset
/// `next_used_idx % queue_size`. The value of `next_used_idx`
/// is then incremented and the new value is written into
/// guest memory into the `used_ring.idx` field.
///
fn put_used_entry(&self, idx: u16, len: u32) {
if idx >= self.queue_size {
return;
}
let used_idx = (self.next_used_idx.get() % self.queue_size) as u64;
let elem_addr = self.used_base + (4 + used_idx * 8);
// write descriptor index to 'next used' slot in used ring
self.memory.write_int(elem_addr, idx as u32).unwrap();
// write length to 'next used' slot in ring
self.memory.write_int(elem_addr + 4, len as u32).unwrap();
self.next_used_idx.inc();
atomic::fence(Ordering::Release);
// write updated next_used
self.memory.write_int(self.used_base + 2, self.next_used_idx.get()).unwrap();
}
///
/// Write `val` to the `avail_event` field of Used ring.
///
/// If `val` is not a valid index for this virtqueue this
/// function does nothing.
///
pub fn write_avail_event(&self, val: u16) {
if val > self.queue_size {
return;
}
let addr = self.used_base + 4 + (self.queue_size as u64 * 8);
self.memory.write_int::<u16>(addr, val).unwrap();
atomic::fence(Ordering::Release);
}
fn has_event_idx(&self) -> bool {
ReservedFeatureBit::EventIdx.is_set_in(self.features)
}
///
/// Read and return the `used_event` field from the Avail ring
fn read_used_event(&self) -> u16 {
let addr = self.avail_base + 4 + (self.queue_size as u64 * 2);
self.memory.read_int::<u16>(addr).unwrap()
}
fn need_interrupt(&self, first_used: u16) -> bool {
if self.has_event_idx() {
first_used == self.read_used_event()
} else {
self.read_avail_flags() & 0x1 == 0
}
}
}
impl QueueBackend for SplitQueue {
fn configure(&mut self, descriptor_area: u64, driver_area: u64, device_area: u64, size: u16, features: u64) -> crate::io::virtio::Result<()> {
let desc_table_sz = 16 * size as usize;
let avail_ring_sz = 6 + 2 * size as usize;
let used_ring_sz = 6 + 8 * size as usize;
if !self.memory.is_valid_range(descriptor_area, desc_table_sz) {
return Err(Error::RangeInvalid(descriptor_area));
}
if !self.memory.is_valid_range(driver_area, avail_ring_sz) {
return Err(Error::AvailInvalid(driver_area));
}
if !self.memory.is_valid_range(device_area, used_ring_sz) {
return Err(Error::UsedInvalid(device_area));
}
self.descriptor_base = descriptor_area;
self.avail_base = driver_area;
self.used_base = device_area;
self.queue_size = size;
self.features = features;
Ok(())
}
fn reset(&mut self) {
self.queue_size = 0;
self.features = 0;
self.descriptor_base = 0;
self.avail_base = 0;
self.used_base = 0;
self.next_avail.set(0);
self.cached_avail_idx.set(0);
self.next_used_idx.set(0);
}
/// Queue is empty if `next_avail` is same value as
/// `avail_ring.idx` value in guest memory If `cached_avail_idx`
/// currently matches `next_avail` it is reloaded from
/// memory in case guest has updated field since last
/// time it was loaded.
///
fn is_empty(&self) -> bool {
let next_avail = self.next_avail.get();
if self.cached_avail_idx.get() != next_avail {
return false;
}
next_avail == self.load_avail_idx()
}
fn next_descriptors(&self) -> Option<(u16, DescriptorList, DescriptorList)> {
self.pop_avail_entry().map(|head| {
let (r,w) = self.load_descriptor_lists(head);
(head, r, w)
})
}
fn put_used(&self, id: u16, size: u32) {
let used = self.next_used_idx.get();
self.put_used_entry(id, size);
if self.need_interrupt(used) {
self.interrupt.notify_queue();
}
}
}

View File

@ -0,0 +1,195 @@
use std::sync::{Arc, Mutex, MutexGuard};
use vmm_sys_util::eventfd::EventFd;
use crate::io::virtio::{Error, Result};
use crate::io::virtio::consts::MAX_QUEUE_SIZE;
use crate::io::virtio::queues::InterruptLine;
use crate::io::virtio::vq::chain::{Chain, DescriptorList};
use crate::io::virtio::vq::splitqueue::SplitQueue;
use crate::memory::GuestRam;
pub trait QueueBackend: Send {
fn configure(&mut self, descriptor_area: u64, driver_area: u64, device_area: u64, size: u16, features: u64) -> Result<()>;
fn reset(&mut self);
fn is_empty(&self) -> bool;
fn next_descriptors(&self) -> Option<(u16, DescriptorList,DescriptorList)>;
fn put_used(&self, id: u16, size: u32);
}
#[derive(Clone)]
pub struct VirtQueue {
ioeventfd: Arc<EventFd>,
/// Default queue_size for this virtqueue
default_size: u16,
/// Number of elements in the virtqueue ring
queue_size: u16,
descriptor_area: u64,
driver_area: u64,
device_area: u64,
backend: Arc<Mutex<dyn QueueBackend>>,
/// Has this virtqueue been enabled?
enabled: bool,
}
impl VirtQueue {
pub const DEFAULT_QUEUE_SIZE: u16 = 128;
pub fn new(memory: GuestRam, default_size: u16, interrupt: Arc<InterruptLine>, ioeventfd: Arc<EventFd>) -> Self {
let backend = Arc::new(Mutex::new(SplitQueue::new(memory, interrupt)));
VirtQueue {
ioeventfd,
default_size,
queue_size: default_size,
descriptor_area: 0,
driver_area: 0,
device_area: 0,
backend,
enabled: false,
}
}
fn backend(&self) -> MutexGuard<dyn QueueBackend+'static> {
self.backend.lock().unwrap()
}
pub fn descriptor_area(&self) -> u64 {
self.descriptor_area
}
pub fn set_descriptor_area(&mut self, address: u64) {
self.descriptor_area = address;
}
pub fn driver_area(&self) -> u64 {
self.driver_area
}
pub fn set_driver_area(&mut self, address: u64) {
self.driver_area = address;
}
pub fn device_area(&self) -> u64 {
self.device_area
}
pub fn set_device_area(&mut self, address: u64) {
self.device_area = address
}
pub fn is_enabled(&self) -> bool {
self.enabled
}
pub fn enable(&mut self) {
self.enabled = true
}
///
/// Set the queue size of this `VirtQueue`. If `sz` is an invalid value
/// ignore the request. It is illegal to change the queue size after
/// a virtqueue has been enabled, so ignore requests if enabled.
///
/// Valid sizes are less than or equal to `MAX_QUEUE_SIZE` and must
/// be a power of 2.
///
pub fn set_size(&mut self, sz: u16) {
if self.is_enabled() || sz > MAX_QUEUE_SIZE || (sz & (sz - 1) != 0) {
return;
}
self.queue_size = sz;
}
pub fn size(&self) -> u16 {
self.queue_size
}
///
/// Reset `VirtQueue` to the initial state. `queue_size` is set to the `default_size`
/// and all other fields are cleared. `enabled` is set to false.
///
pub fn reset(&mut self) {
self.queue_size = self.default_size;
self.descriptor_area = 0;
self.driver_area = 0;
self.device_area = 0;
self.enabled = false;
self.backend().reset();
}
pub fn configure(&self, features: u64) -> Result<()> {
if !self.enabled {
return Err(Error::QueueNotEnabled);
}
self.backend().configure(self.descriptor_area, self.driver_area, self.device_area, self.size(), features)
}
///
/// Does `VirtQueue` currently have available entries?
///
pub fn is_empty(&self) -> bool {
self.backend().is_empty()
}
pub fn wait_ready(&self) -> Result<()> {
if self.is_empty() {
let _ = self.ioeventfd.read()
.map_err(Error::ReadIoEventFd)?;
}
Ok(())
}
pub fn wait_next_chain(&self) -> Result<Chain> {
loop {
self.wait_ready()?;
if let Some(chain) = self.next_chain() {
return Ok(chain)
}
}
}
pub fn next_chain(&self) -> Option<Chain> {
self.backend().next_descriptors().map(|(id, r, w)| {
Chain::new(self.backend.clone(), id, r, w)
})
}
pub fn on_each_chain<F>(&self, mut f: F)
where F: FnMut(Chain) {
loop {
self.wait_ready().unwrap();
for chain in self.iter() {
f(chain);
}
}
}
pub fn iter(&self) -> QueueIter {
QueueIter { vq: self.clone() }
}
pub fn ioevent(&self) -> &EventFd {
&self.ioeventfd
}
}
pub struct QueueIter {
vq: VirtQueue
}
impl Iterator for QueueIter {
type Item = Chain;
fn next(&mut self) -> Option<Self::Item> {
self.vq.next_chain()
}
}

View File

@ -5,6 +5,7 @@ mod socket;
mod filedesc; mod filedesc;
mod memfd; mod memfd;
mod tap; mod tap;
//pub mod priority;
pub mod netlink; pub mod netlink;
pub use filedesc::{FileDesc, FileFlags}; pub use filedesc::{FileDesc, FileFlags};

View File

@ -4,5 +4,5 @@ mod buffer;
mod log; mod log;
pub use bitvec::BitSet; pub use bitvec::BitSet;
pub use buffer::ByteBuffer; pub use buffer::{ByteBuffer,Writeable};
pub use log::{Logger,LogLevel}; pub use log::{Logger,LogLevel};

View File

@ -1,161 +0,0 @@
use std::sync::{Arc,RwLock};
use crate::vm::io::IoDispatcher;
use crate::memory::{AddressRange, MemoryManager};
use super::{VirtioDevice,VirtioDeviceOps,PciIrq};
use super::consts::*;
use super::pci::PciBus;
use crate::virtio::Result;
use std::iter;
use crate::vm::KvmVm;
pub struct VirtioBus {
kvm_vm: KvmVm,
memory: MemoryManager,
io_dispatcher: Arc<IoDispatcher>,
pci_bus: Arc<RwLock<PciBus>>,
devices: Vec<Arc<RwLock<VirtioDevice>>>,
}
impl VirtioBus {
pub fn new(memory: MemoryManager, io_dispatcher: Arc<IoDispatcher>, kvm_vm: KvmVm) -> VirtioBus {
VirtioBus {
kvm_vm,
memory,
io_dispatcher: io_dispatcher.clone(),
pci_bus: PciBus::new(&io_dispatcher),
devices: Vec::new(),
}
}
pub fn new_virtio_device(&mut self, device_type: u16, ops: Arc<RwLock<dyn VirtioDeviceOps>>) -> VirtioDeviceConfig {
VirtioDeviceConfig::new(self, device_type, ops)
}
pub fn pci_irqs(&self) -> Vec<PciIrq> {
self.pci_bus.read().unwrap().pci_irqs()
}
}
pub struct VirtioDeviceConfig<'a> {
virtio_bus: &'a mut VirtioBus,
device_type: u16,
irq: u8,
kvm_vm: KvmVm,
ops: Arc<RwLock<dyn VirtioDeviceOps>>,
mmio: AddressRange,
queue_sizes: Vec<usize>,
config_size: usize,
device_class: u16,
features: u64,
}
impl <'a> VirtioDeviceConfig<'a> {
fn new(virtio_bus: &mut VirtioBus, device_type: u16, ops: Arc<RwLock<dyn VirtioDeviceOps>>) -> VirtioDeviceConfig {
let kvm_vm = virtio_bus.kvm_vm.clone();
let mmio = virtio_bus.pci_bus.write().unwrap().allocate_mmio_space(VIRTIO_MMIO_AREA_SIZE);
VirtioDeviceConfig {
virtio_bus,
device_type,
irq: 0,
kvm_vm,
ops,
mmio,
queue_sizes: Vec::new(),
config_size: 0,
features: 0,
device_class: 0x0880,
}
}
pub fn kvm_vm(&self) -> &KvmVm { &self.kvm_vm }
pub fn ops(&self) -> Arc<RwLock<dyn VirtioDeviceOps>> {
self.ops.clone()
}
pub fn irq(&self) -> u8 { self.irq }
pub fn common_cfg_mmio(&self) -> AddressRange {
self.mmio.subrange(VIRTIO_MMIO_OFFSET_COMMON_CFG, VIRTIO_MMIO_COMMON_CFG_SIZE).unwrap()
}
pub fn notify_mmio(&self) -> AddressRange {
self.mmio.subrange(VIRTIO_MMIO_OFFSET_NOTIFY, VIRTIO_MMIO_NOTIFY_SIZE).unwrap()
}
pub fn isr_mmio(&self) -> AddressRange {
self.mmio.subrange(VIRTIO_MMIO_OFFSET_ISR, VIRTIO_MMIO_ISR_SIZE).unwrap()
}
pub fn device_cfg_mmio(&self) -> Option<AddressRange> {
if self.config_size > 0 {
Some(self.mmio.subrange(VIRTIO_MMIO_OFFSET_DEV_CFG, self.config_size).unwrap())
} else {
None
}
}
pub fn feature_bits(&self) -> u64 {
self.features
}
pub fn num_queues(&self) -> usize {
self.queue_sizes.len()
}
pub fn queue_sizes(&self) -> &[usize] {
&self.queue_sizes
}
#[allow(dead_code)]
pub fn config_size(&self) -> usize {
self.config_size
}
pub fn set_queue_sizes(&mut self, sizes: &[usize]) -> &'a mut VirtioDeviceConfig {
self.queue_sizes.clear();
self.queue_sizes.extend_from_slice(sizes);
self
}
pub fn set_num_queues(&mut self, n: usize) -> &'a mut VirtioDeviceConfig {
self.queue_sizes.clear();
self.queue_sizes.extend(iter::repeat(DEFAULT_QUEUE_SIZE as usize).take(n));
self
}
pub fn set_config_size(&mut self, sz: usize) -> &'a mut VirtioDeviceConfig {
self.config_size = sz;
self
}
pub fn set_device_class(&mut self, cl: u16) -> &'a mut VirtioDeviceConfig {
self.device_class = cl;
self
}
pub fn set_features(&mut self, features: u64) -> &'a mut VirtioDeviceConfig {
self.features = features;
self
}
pub fn register(&mut self) -> Result<()> {
self.create_pci_device();
self.features |= VIRTIO_F_VERSION_1;
//self.features |= VIRTIO_F_EVENT_IDX;
let dev = VirtioDevice::new(self.virtio_bus.memory.clone(), &self)?;
self.virtio_bus.io_dispatcher.register_mmio(self.mmio, dev.clone());
self.virtio_bus.devices.push(dev);
Ok(())
}
fn create_pci_device(&mut self) {
let mut pci_bus = self.virtio_bus.pci_bus.write().unwrap();
let mut pci = pci_bus.create_device(PCI_VENDOR_ID_REDHAT, PCI_VIRTIO_DEVICE_ID_BASE + self.device_type, self.device_class);
pci.add_virtio_caps(self.config_size);
pci.set_mmio_bar(VIRTIO_MMIO_BAR, self.mmio);
self.irq = pci.get_irq();
pci_bus.store_device(pci);
}
}

View File

@ -1,135 +0,0 @@
use crate::memory::GuestRam;
use std::sync::Arc;
use kvm_ioctls::{IoEventAddress, NoDatamatch};
use vmm_sys_util::eventfd::EventFd;
use super::VirtQueue;
use super::vring::Vring;
use super::virtqueue::InterruptLine;
use super::bus::VirtioDeviceConfig;
use crate::virtio::{Error, Result};
///
/// Manages a set of virtqueues during device intitialization.
///
pub struct VirtQueueConfig {
num_queues: usize,
selected_queue: u16,
enabled_features: u64,
vrings: Vec<Vring>,
interrupt: Arc<InterruptLine>,
events: Vec<Arc<EventFd>>,
}
impl VirtQueueConfig {
pub fn new(memory: &GuestRam, dev_config: &VirtioDeviceConfig) -> Result<VirtQueueConfig> {
Ok(VirtQueueConfig {
num_queues: dev_config.num_queues(),
selected_queue: 0,
enabled_features: 0,
vrings: create_vrings(memory,dev_config.queue_sizes()),
interrupt: InterruptLine::from_config(&dev_config)?,
events: create_ioeventfds(&dev_config)?,
})
}
pub fn isr_read(&self) -> u64 {
self.interrupt.isr_read()
}
pub fn notify_config(&self) {
self.interrupt.notify_config();
}
pub fn enable_features(&mut self, features: u64) {
self.enabled_features = features;
}
pub fn reset(&mut self) {
self.selected_queue = 0;
let _ = self.interrupt.isr_read();
for vr in &mut self.vrings {
vr.reset();
}
}
pub fn num_queues(&self) -> u16 {
self.num_queues as u16
}
pub fn selected_queue(&self) -> u16 {
self.selected_queue
}
pub fn select_queue(&mut self, q: u16) {
self.selected_queue = q;
}
pub fn with_vring<U,F>(&self, d: U, f: F) -> U
where F: FnOnce(&Vring) -> U
{
match self.vrings.get(self.selected_queue as usize) {
Some(vr) => f(vr),
None => d,
}
}
pub fn with_vring_mut<F>(&mut self, f: F)
where F: FnOnce(&mut Vring)
{
match self.vrings.get_mut(self.selected_queue as usize) {
Some(vr) => if !vr.is_enabled() { f(vr) },
None => (),
}
}
pub fn vring_get_size(&self) -> u16 { self.with_vring(0, |vr| vr.size() ) }
pub fn vring_set_size(&mut self, sz: u16) { self.with_vring_mut(|vr| vr.set_size(sz)) }
pub fn vring_enable(&mut self) { self.with_vring_mut(|vr| vr.enable() ) }
pub fn vring_is_enabled(&self) -> bool { self.with_vring(false, |vr| vr.is_enabled() ) }
pub fn notify(&self, vq: u16) {
match self.events.get(vq as usize) {
Some(ref ev) => ev.write(1).expect("ioeventfd write failed in notify"),
None => (),
}
}
fn create_vq(&self, memory: &GuestRam, idx: usize) -> Result<VirtQueue> {
let vring = self.vrings[idx].clone();
vring.validate()?;
Ok(VirtQueue::new(memory.clone(), vring, self.interrupt.clone(), self.events[idx].clone()))
}
pub fn create_queues(&self, memory: &GuestRam) -> Result<Vec<VirtQueue>> {
let mut v = Vec::with_capacity(self.num_queues);
for i in 0..self.num_queues {
v.push(self.create_vq(memory, i)?);
}
Ok(v)
}
}
fn create_ioeventfds(conf: &VirtioDeviceConfig) -> Result<Vec<Arc<EventFd>>> {
let mut v = Vec::with_capacity(conf.num_queues());
let notify_base = conf.notify_mmio().base();
for i in 0..conf.num_queues() {
let evt = EventFd::new(0)
.map_err(Error::CreateEventFd)?;
let addr = IoEventAddress::Mmio(notify_base + (4 * i as u64));
conf.kvm_vm().vm_fd().register_ioevent(&evt, &addr, NoDatamatch)
.map_err(Error::CreateIoEventFd)?;
v.push(Arc::new(evt));
}
Ok(v)
}
fn create_vrings(memory: &GuestRam, queue_sizes: &[usize]) -> Vec<Vring> {
let mut v = Vec::with_capacity(queue_sizes.len());
for &sz in queue_sizes {
v.push(Vring::new(memory.clone(), sz as u16))
}
v
}

View File

@ -1,120 +0,0 @@
// Maximum number of logical devices on a PCI bus
pub const PCI_MAX_DEVICES: usize = 32;
// IO Port addresses for PCI configuration access
pub const PCI_CONFIG_ADDRESS: u16 = 0xcf8;
pub const PCI_CONFIG_DATA: u16 = 0xcfc;
// Vendor specific PCI capabilities
pub const PCI_CAP_ID_VENDOR: u8 = 0x09;
pub const PCI_CONFIG_SPACE_SIZE: usize = 256;
pub const PCI_CAP_BASE_OFFSET: usize = 0x40;
pub const PCI_VENDOR_ID: usize = 0x00;
pub const PCI_DEVICE_ID: usize = 0x02;
pub const PCI_COMMAND: usize = 0x04;
pub const PCI_COMMAND_IO: u16 = 0x01;
pub const PCI_COMMAND_MEMORY: u16 = 0x02;
pub const PCI_COMMAND_INTX_DISABLE: u16 = 0x400;
pub const PCI_STATUS: usize = 0x06;
pub const PCI_STATUS_CAP_LIST: u16 = 0x10;
pub const PCI_CLASS_REVISION: usize = 0x08;
pub const PCI_CLASS_DEVICE: usize = 0x0a;
pub const PCI_CACHE_LINE_SIZE: usize = 0x0c;
pub const PCI_LATENCY_TIMER: usize = 0x0d;
pub const _PCI_SUBSYSTEM_VENDOR_ID: usize = 0x2c;
pub const PCI_SUBSYSTEM_ID: usize = 0x2e;
pub const PCI_CAPABILITY_LIST: usize = 0x34;
pub const PCI_INTERRUPT_LINE: usize = 0x3C;
pub const PCI_INTERRUPT_PIN: usize = 0x3D;
// Virtio PCI capability types
pub const VIRTIO_PCI_CAP_COMMON_CFG : u8 = 1;
pub const VIRTIO_PCI_CAP_NOTIFY_CFG : u8 = 2;
pub const VIRTIO_PCI_CAP_ISR_CFG : u8 = 3;
pub const VIRTIO_PCI_CAP_DEVICE_CFG : u8 = 4;
// Indicates that no MSIX vector is configured
pub const VIRTIO_NO_MSI_VECTOR: u16 = 0xFFFF;
// Bar number 0 is used for Virtio MMIO area
pub const VIRTIO_MMIO_BAR: usize = 0;
// Virtio MMIO area is one page
pub const VIRTIO_MMIO_AREA_SIZE: usize = 4096;
// Offsets and sizes for each structure in MMIO area
pub const VIRTIO_MMIO_OFFSET_COMMON_CFG : usize = 0; // Common configuration offset
pub const VIRTIO_MMIO_OFFSET_ISR : usize = 56; // ISR register offset
pub const VIRTIO_MMIO_OFFSET_NOTIFY : usize = 0x400; // Notify area offset
pub const VIRTIO_MMIO_OFFSET_DEV_CFG : usize = 0x800; // Device specific configuration offset
pub const VIRTIO_MMIO_COMMON_CFG_SIZE: usize = 56; // Common configuration size
pub const VIRTIO_MMIO_NOTIFY_SIZE : usize = 0x400; // Notify area size
pub const VIRTIO_MMIO_ISR_SIZE : usize = 4; // ISR register size
// Common configuration header offsets
pub const VIRTIO_PCI_COMMON_DFSELECT : usize = 0;
pub const VIRTIO_PCI_COMMON_DF : usize = 4;
pub const VIRTIO_PCI_COMMON_GFSELECT : usize = 8;
pub const VIRTIO_PCI_COMMON_GF : usize = 12;
pub const VIRTIO_PCI_COMMON_MSIX : usize = 16;
pub const VIRTIO_PCI_COMMON_NUMQ : usize = 18;
pub const VIRTIO_PCI_COMMON_STATUS : usize = 20;
pub const VIRTIO_PCI_COMMON_CFGGENERATION : usize = 21;
pub const VIRTIO_PCI_COMMON_Q_SELECT : usize = 22;
pub const VIRTIO_PCI_COMMON_Q_SIZE : usize = 24;
pub const VIRTIO_PCI_COMMON_Q_MSIX : usize = 26;
pub const VIRTIO_PCI_COMMON_Q_ENABLE : usize = 28;
pub const VIRTIO_PCI_COMMON_Q_NOFF : usize = 30;
pub const VIRTIO_PCI_COMMON_Q_DESCLO : usize = 32;
pub const VIRTIO_PCI_COMMON_Q_DESCHI : usize = 36;
pub const VIRTIO_PCI_COMMON_Q_AVAILLO : usize = 40;
pub const VIRTIO_PCI_COMMON_Q_AVAILHI : usize = 44;
pub const VIRTIO_PCI_COMMON_Q_USEDLO : usize = 48;
pub const VIRTIO_PCI_COMMON_Q_USEDHI : usize = 52;
// Common configuration status bits
pub const _VIRTIO_CONFIG_S_ACKNOWLEDGE : u8 = 1;
pub const _VIRTIO_CONFIG_S_DRIVER : u8 = 2;
pub const VIRTIO_CONFIG_S_DRIVER_OK : u8 = 4;
pub const VIRTIO_CONFIG_S_FEATURES_OK : u8 = 8;
pub const VIRTIO_CONFIG_S_NEEDS_RESET : u8 = 0x40;
pub const _VIRTIO_CONFIG_S_FAILED : u8 = 0x80;
pub const _VRING_USED_F_NO_NOTIFY: u16 = 1;
pub const _VRING_AVAIL_F_NO_INTERRUPT: u16 = 1;
pub const _VIRTIO_F_INDIRECT_DESC: u64 = 1 << 28;
pub const VIRTIO_F_EVENT_IDX: u64 = 1 << 29;
pub const VIRTIO_F_VERSION_1: u64 = 1 << 32;
pub const VRING_DESC_F_NEXT: u16 = 1;
pub const VRING_DESC_F_WRITE: u16 = 2;
pub const VRING_DESC_F_INDIRECT: u16 = 4;
pub const DEFAULT_QUEUE_SIZE: u16 = 128;
pub const MAX_QUEUE_SIZE: u16 = 1024;
// PCI Vendor id for Virtio devices
pub const PCI_VENDOR_ID_REDHAT: u16 = 0x1af4;
// Base PCI device id for Virtio devices
pub const PCI_VIRTIO_DEVICE_ID_BASE: u16 = 0x1040;
pub const PCI_VENDOR_ID_INTEL: u16 = 0x8086;
pub const PCI_CLASS_BRIDGE_HOST: u16 = 0x0600;

View File

@ -1,228 +0,0 @@
use std::sync::{Arc,RwLock};
use std::ops::DerefMut;
use crate::memory::{AddressRange, MemoryManager};
use super::bus::VirtioDeviceConfig;
use super::VirtQueue;
use super::config::VirtQueueConfig;
use super::consts::*;
use crate::vm::io::MmioOps;
use crate::virtio::Result;
pub trait VirtioDeviceOps: Send+Sync {
fn reset(&mut self) {}
fn enable_features(&mut self, bits: u64) -> bool { let _ = bits; true }
fn write_config(&mut self, offset: usize, size: usize, val: u64) { let (_,_,_) = (offset, size, val); }
fn read_config(&mut self, offset: usize, size: usize) -> u64 { let (_,_) = (offset, size); 0 }
fn start(&mut self, memory: &MemoryManager, queues: Vec<VirtQueue>);
}
pub struct VirtioDevice {
memory: MemoryManager,
vq_config: VirtQueueConfig,
common_cfg_mmio: AddressRange,
isr_mmio: AddressRange,
notify_mmio: AddressRange,
device_cfg_mmio: Option<AddressRange>,
device_ops: Arc<RwLock<dyn VirtioDeviceOps>>,
dfselect: u32,
gfselect: u32,
device_features: u64,
guest_features: u64,
status: u8,
}
const MASK_LOW_32: u64 = (1u64 << 32) - 1;
const MASK_HI_32: u64 = MASK_LOW_32 << 32;
fn set_lo32(val: &mut u64, low32: u32) { *val = (*val & MASK_HI_32) | (low32 as u64) }
fn set_hi32(val: &mut u64, hi32: u32) { *val = ((hi32 as u64) << 32) | (*val & MASK_LOW_32) }
fn get_lo32(val: u64) -> u32 { val as u32 }
fn get_hi32(val: u64) -> u32 { (val >> 32) as u32 }
impl VirtioDevice {
pub fn new(memory: MemoryManager, config: &VirtioDeviceConfig) -> Result<Arc<RwLock<VirtioDevice>>> {
Ok(Arc::new(RwLock::new(VirtioDevice {
memory: memory.clone(),
vq_config: VirtQueueConfig::new(memory.guest_ram(),&config)?,
common_cfg_mmio: config.common_cfg_mmio(),
isr_mmio: config.isr_mmio(),
notify_mmio: config.notify_mmio(),
device_cfg_mmio: config.device_cfg_mmio(),
device_ops: config.ops(),
dfselect: 0,
gfselect: 0,
device_features: config.feature_bits(),
guest_features: 0,
status: 0,
})))
}
fn reset(&mut self) {
self.dfselect = 0;
self.gfselect = 0;
self.guest_features = 0;
self.status = 0;
self.vq_config.reset();
}
fn status_write(&mut self, val: u8) {
// 4.1.4.3.1 The device MUST reset when 0 is written to device status
if val == 0 {
self.reset();
return;
}
// 2.1.1 The driver MUST NOT clear a device status bit
if self.status & !val != 0 {
return;
}
let new_bits = val & !self.status;
if new_bits & VIRTIO_CONFIG_S_DRIVER_OK != 0 {
match self.vq_config.create_queues(self.memory.guest_ram()) {
Ok(queues) => self.with_ops(|ops| ops.start(&self.memory, queues)),
Err(e) => {
println!("creating virtqueues failed {}", e);
self.status |= VIRTIO_CONFIG_S_NEEDS_RESET;
self.vq_config.notify_config();
return;
}
}
}
if new_bits & VIRTIO_CONFIG_S_FEATURES_OK != 0 {
if !self.with_ops(|ops| ops.enable_features(self.guest_features)) {
self.vq_config.enable_features(self.guest_features);
return;
}
}
self.status |= new_bits;
}
fn common_config_write(&mut self, offset: usize, _size: usize, val: u32) {
match offset {
VIRTIO_PCI_COMMON_DFSELECT => self.dfselect = val,
VIRTIO_PCI_COMMON_GFSELECT => self.gfselect = val,
VIRTIO_PCI_COMMON_GF => {
match self.gfselect {
0 => set_lo32(&mut self.guest_features, val),
1 => set_hi32(&mut self.guest_features, val),
_ => {},
}
// 2.2.1
// The driver MUST NOT accept a feature which the device did
// not offer.
self.guest_features &= self.device_features;
},
VIRTIO_PCI_COMMON_STATUS => self.status_write(val as u8),
VIRTIO_PCI_COMMON_Q_SELECT=> self.vq_config.select_queue(val as u16),
VIRTIO_PCI_COMMON_Q_SIZE => self.vq_config.vring_set_size(val as u16),
VIRTIO_PCI_COMMON_Q_ENABLE=> if val == 1 { self.vq_config.vring_enable() } ,
VIRTIO_PCI_COMMON_Q_DESCLO=> self.vq_config.with_vring_mut(|vr| set_lo32(&mut vr.descriptors, val)),
VIRTIO_PCI_COMMON_Q_DESCHI=> self.vq_config.with_vring_mut(|vr| set_hi32(&mut vr.descriptors, val)),
VIRTIO_PCI_COMMON_Q_AVAILLO=> self.vq_config.with_vring_mut(|vr| set_lo32(&mut vr.avail_ring, val)),
VIRTIO_PCI_COMMON_Q_AVAILHI=> self.vq_config.with_vring_mut(|vr| set_hi32(&mut vr.avail_ring, val)),
VIRTIO_PCI_COMMON_Q_USEDLO=> self.vq_config.with_vring_mut(|vr| set_lo32(&mut vr.used_ring, val)),
VIRTIO_PCI_COMMON_Q_USEDHI=> self.vq_config.with_vring_mut(|vr| set_hi32(&mut vr.used_ring, val)),
_ => {},
}
}
fn common_config_read(&mut self, offset: usize, _size: usize) -> u32 {
match offset {
VIRTIO_PCI_COMMON_DFSELECT => self.dfselect,
VIRTIO_PCI_COMMON_DF=> match self.dfselect {
0 => get_lo32(self.device_features),
1 => get_hi32(self.device_features),
_ => 0,
},
VIRTIO_PCI_COMMON_GFSELECT => { self.gfselect },
VIRTIO_PCI_COMMON_GF => match self.gfselect {
0 => get_lo32(self.guest_features),
1 => get_hi32(self.guest_features),
_ => 0,
},
VIRTIO_PCI_COMMON_MSIX => VIRTIO_NO_MSI_VECTOR as u32,
VIRTIO_PCI_COMMON_NUMQ => self.vq_config.num_queues() as u32,
VIRTIO_PCI_COMMON_STATUS => self.status as u32,
VIRTIO_PCI_COMMON_CFGGENERATION => 0,
VIRTIO_PCI_COMMON_Q_SELECT => self.vq_config.selected_queue() as u32,
VIRTIO_PCI_COMMON_Q_SIZE => self.vq_config.vring_get_size() as u32,
VIRTIO_PCI_COMMON_Q_MSIX => VIRTIO_NO_MSI_VECTOR as u32,
VIRTIO_PCI_COMMON_Q_ENABLE => if self.vq_config.vring_is_enabled() {1} else {0},
VIRTIO_PCI_COMMON_Q_NOFF => self.vq_config.selected_queue() as u32,
VIRTIO_PCI_COMMON_Q_DESCLO => self.vq_config.with_vring(0, |vr| get_lo32(vr.descriptors)),
VIRTIO_PCI_COMMON_Q_DESCHI => self.vq_config.with_vring(0, |vr| get_hi32(vr.descriptors)),
VIRTIO_PCI_COMMON_Q_AVAILLO => self.vq_config.with_vring(0, |vr| get_lo32(vr.avail_ring)),
VIRTIO_PCI_COMMON_Q_AVAILHI => self.vq_config.with_vring(0, |vr| get_hi32(vr.avail_ring)),
VIRTIO_PCI_COMMON_Q_USEDLO => self.vq_config.with_vring(0, |vr| get_lo32(vr.used_ring)),
VIRTIO_PCI_COMMON_Q_USEDHI => self.vq_config.with_vring(0, |vr| get_hi32(vr.used_ring)),
_ => 0,
}
}
fn notify_read(&mut self, _offset: usize, _size: usize) -> u64 {
0
}
fn notify_write(&mut self, offset: usize, _size: usize, _val: u64) {
let vq = (offset / 4) as u16;
self.vq_config.notify(vq);
}
fn isr_read(&mut self) -> u64 {
self.vq_config.isr_read()
}
fn with_ops<U,F>(&self, f: F) -> U
where F: FnOnce(&mut dyn VirtioDeviceOps) -> U {
let mut ops = self.device_ops.write().unwrap();
f(ops.deref_mut())
}
}
impl MmioOps for VirtioDevice {
fn mmio_read(&mut self, address: u64, size: usize) -> u64 {
if self.common_cfg_mmio.contains(address, size) {
let offset = self.common_cfg_mmio.offset_of(address);
self.common_config_read(offset,size) as u64
} else if self.notify_mmio.contains(address, size) {
let offset = self.notify_mmio.offset_of(address);
self.notify_read(offset, size) as u64
} else if self.isr_mmio.contains(address, size) {
self.isr_read()
} else if let Some(ref dev_cfg_mmio) = self.device_cfg_mmio {
let offset = dev_cfg_mmio.offset_of(address);
self.with_ops(|ops| ops.read_config(offset, size))
} else {
0
}
}
fn mmio_write(&mut self, address: u64, size: usize, val: u64) {
if self.common_cfg_mmio.contains(address, size) {
let offset = self.common_cfg_mmio.offset_of(address);
self.common_config_write(offset,size, val as u32)
} else if self.notify_mmio.contains(address, size) {
let offset = self.notify_mmio.offset_of(address);
self.notify_write(offset, size, val)
} else if let Some(ref dev_cfg_mmio) = self.device_cfg_mmio {
let offset = dev_cfg_mmio.offset_of(address);
self.with_ops(|ops| ops.write_config(offset, size, val))
}
}
}

View File

@ -1,98 +0,0 @@
use byteorder::{ByteOrder,LittleEndian};
use std::ops::Range;
pub struct DeviceConfigArea {
buffer: Vec<u8>,
write_filter: DeviceConfigWriteFilter,
}
#[allow(dead_code)]
impl DeviceConfigArea {
pub fn new(size: usize) -> Self {
DeviceConfigArea{
buffer: vec![0u8; size],
write_filter: DeviceConfigWriteFilter::new(size),
}
}
pub fn read_config(&self, offset: usize, size: usize) -> u64 {
if offset + size > self.buffer.len() {
return 0;
}
match size {
1 => self.buffer[offset] as u64,
2 => LittleEndian::read_u16(&self.buffer[offset..]) as u64,
4 => LittleEndian::read_u32(&self.buffer[offset..]) as u64,
8 => LittleEndian::read_u64(&self.buffer[offset..]),
_ => 0,
}
}
pub fn write_config(&mut self, offset: usize, size: usize, val: u64) {
if self.write_filter.is_writeable(offset, size) {
match size {
1 => self.write_u8(offset, val as u8),
2 => self.write_u16(offset, val as u16),
4 => self.write_u32(offset, val as u32),
8 => self.write_u64(offset, val as u64),
_ => {},
}
}
}
pub fn set_writeable(&mut self, offset: usize, size: usize) {
self.write_filter.set_writable(offset, size)
}
pub fn write_u8(&mut self, offset: usize, val: u8) {
assert!(offset + 1 <= self.buffer.len());
self.buffer[offset] = val;
}
pub fn write_u16(&mut self, offset: usize, val: u16) {
assert!(offset + 2 <= self.buffer.len());
LittleEndian::write_u16(&mut self.buffer[offset..], val);
}
pub fn write_u32(&mut self, offset: usize, val: u32) {
assert!(offset + 4 <= self.buffer.len());
LittleEndian::write_u32(&mut self.buffer[offset..], val);
}
pub fn write_u64(&mut self, offset: usize, val: u64) {
assert!(offset + 8 <= self.buffer.len());
LittleEndian::write_u64(&mut self.buffer[offset..], val);
}
pub fn write_bytes(&mut self, offset: usize, bytes: &[u8]) {
assert!(offset + bytes.len() <= self.buffer.len());
self.buffer[offset..offset + bytes.len()].copy_from_slice(bytes);
}
}
struct DeviceConfigWriteFilter {
size: usize,
ranges: Vec<Range<usize>>,
}
impl DeviceConfigWriteFilter {
fn new(size: usize) -> Self {
DeviceConfigWriteFilter { size, ranges: Vec::new() }
}
fn set_writable(&mut self, offset: usize, size: usize) {
let end = offset + size;
self.ranges.push(offset..end);
}
fn is_writeable(&self, offset: usize, size: usize) -> bool {
if offset + size > self.size {
false
} else {
let last = offset + size - 1;
self.ranges.iter().any(|r| r.contains(&offset) && r.contains(&last))
}
}
}

View File

@ -1,56 +0,0 @@
mod bus;
mod chain;
mod config;
mod consts;
mod device;
mod pci;
mod virtqueue;
mod vring;
mod device_config;
pub use self::virtqueue::VirtQueue;
pub use self::pci::PciIrq;
pub use self::bus::VirtioBus;
pub use self::device::{VirtioDevice,VirtioDeviceOps};
pub use self::chain::Chain;
pub use self::device_config::DeviceConfigArea;
use byteorder::{ByteOrder,LittleEndian};
use std::result;
use thiserror::Error;
pub type Result<T> = result::Result<T, Error>;
#[derive(Debug,Error)]
pub enum Error {
#[error("failed to create EventFd for VirtQueue: {0}")]
CreateEventFd(std::io::Error),
#[error("failed to create IoEventFd for VirtQueue: {0}")]
CreateIoEventFd(kvm_ioctls::Error),
#[error("failed to read from IoEventFd: {0}")]
ReadIoEventFd(std::io::Error),
#[error("VirtQueue: {0}")]
IrqFd(kvm_ioctls::Error),
#[error("vring not enabled")]
VringNotEnabled,
#[error("vring descriptor table range is invalid 0x{0:x}")]
VringRangeInvalid(u64),
#[error("vring avail ring range range is invalid 0x{0:x}")]
VringAvailInvalid(u64),
#[error("vring used ring range is invalid 0x{0:x}")]
VringUsedInvalid(u64),
}
pub fn read_config_buffer(config: &[u8], offset: usize, size: usize) -> u64 {
if offset + size > config.len() {
return 0;
}
match size {
1 => config[offset] as u64,
2 => LittleEndian::read_u16(&config[offset..]) as u64,
4 => LittleEndian::read_u32(&config[offset..]) as u64,
8 => LittleEndian::read_u64(&config[offset..]) as u64,
_ => 0,
}
}

View File

@ -1,436 +0,0 @@
use std::sync::{Arc,RwLock};
use byteorder::{ByteOrder,LittleEndian};
use crate::vm::io::{IoDispatcher,IoPortOps};
use crate::vm::arch::PCI_MMIO_RESERVED_BASE;
use crate::memory::AddressRange;
use super::consts::*;
struct PciConfigAddress(u32);
impl PciConfigAddress {
fn new() -> PciConfigAddress { PciConfigAddress(0) }
fn set(&mut self, n: u32) { self.0 = n }
fn get(&self) -> u32 { self.0 }
fn bus(&self) -> u32 { self.bits(16, 8) }
fn function(&self) -> u32 { self.bits(8, 3) }
fn device(&self) -> usize { self.bits(11, 5) as usize }
fn offset(&self) -> usize { (self.bits(0, 8) & !0x3) as usize }
fn bits(&self, offset: u32, size: u32) -> u32 {
let mask = (1u32 << size) - 1;
(self.0 >> offset) & mask
}
}
pub struct PciIrq {
pci_id: u8,
int_pin: u8,
irq: u8,
}
impl PciIrq {
fn new(pci: &PciDevice) -> PciIrq {
PciIrq {
pci_id: pci.id,
int_pin: 1,
irq: pci.irq,
}
}
pub fn src_bus_irq(&self) -> u8 {
(self.pci_id << 2) | (self.int_pin - 1)
}
pub fn irq_line(&self) -> u8 {
self.irq
}
}
pub struct PciBus {
devices: Vec<Option<PciDevice>>,
mmio_next_alloc: u32,
next_irq: u8,
next_dev: u8,
config_address: PciConfigAddress,
}
impl PciBus {
pub fn new(io: &IoDispatcher) -> Arc<RwLock<PciBus>> {
let bus = Arc::new(RwLock::new(PciBus {
devices: PciBus::create_device_vec(PCI_MAX_DEVICES),
mmio_next_alloc: PCI_MMIO_RESERVED_BASE as u32,
next_irq: 5,
next_dev: 1,
config_address: PciConfigAddress::new(),
}));
io.register_ioports(PCI_CONFIG_ADDRESS, 8, bus.clone());
let pci = PciDevice::new(0, 0, PCI_VENDOR_ID_INTEL, 0, PCI_CLASS_BRIDGE_HOST);
bus.write().unwrap().store_device(pci);
bus
}
pub fn pci_irqs(&self) -> Vec<PciIrq> {
let mut v = Vec::new();
for d in &self.devices {
match *d {
Some(ref dev) => v.push(PciIrq::new(dev)),
None => (),
}
}
v
}
fn allocate_irq(&mut self) -> u8 {
let irq = self.next_irq;
self.next_irq += 1;
irq
}
fn allocate_id(&mut self) -> u8 {
let id = self.next_dev;
self.next_dev += 1;
id
}
pub fn create_device(&mut self, vendor: u16, device: u16, class_id: u16) -> PciDevice {
let irq = self.allocate_irq();
let id = self.allocate_id();
let pci = PciDevice::new(id, irq, vendor, device, class_id);
pci
}
pub fn store_device(&mut self, pci: PciDevice) {
let id = pci.id as usize;
self.devices[id] = Some(pci)
}
fn create_device_vec(sz: usize) -> Vec<Option<PciDevice>> {
let mut v = Vec::with_capacity(sz);
for _ in 0..sz {
v.push(None)
}
v
}
pub fn allocate_mmio_space(&mut self, sz: usize) -> AddressRange {
let mask = (sz - 1) as u32;
let aligned = (self.mmio_next_alloc + mask) & !mask;
self.mmio_next_alloc = aligned + (sz as u32);
AddressRange::new(aligned as u64, sz)
}
fn is_in_range(base: u16, port: u16, len: usize) -> bool {
let end = port + len as u16;
port >= base && end <= (base + 4)
}
fn is_config_address(&self, port: u16, len: usize) -> bool {
PciBus::is_in_range(PCI_CONFIG_ADDRESS, port, len)
}
fn is_config_data(&self, port: u16, len: usize) -> bool {
PciBus::is_in_range(PCI_CONFIG_DATA, port, len)
}
fn config_address_in(&self, _: usize) -> u32 {
self.config_address.get()
}
fn current_config_device(&mut self) -> Option<&mut PciDevice> {
let b = self.config_address.bus();
let d = self.config_address.device();
let f = self.config_address.function();
if b != 0 || f != 0 || d >= self.devices.len() {
return None;
}
self.devices[d].as_mut()
}
fn config_address_out(&mut self, _offset: u16, size: usize, data: u32) {
if size == 4 {
self.config_address.set(data);
}
}
#[allow(dead_code)]
fn valid_config_access(&self, offset: u16, len: usize) -> bool {
(offset as usize) + len <= 4
}
fn config_data_in(&mut self, offset: usize, size: usize) -> u32 {
let off = self.config_address.offset() + offset;
match self.current_config_device() {
Some(dev) => { dev.read_config(off, size)},
None => 0xFFFFFFFF,
}
}
fn config_data_out(&mut self, offset: u16, size: usize, data: u32) {
let off = self.config_address.offset() + offset as usize;
if let Some(dev) = self.current_config_device() {
dev.write_config(off, size,data)
}
}
}
impl IoPortOps for PciBus {
fn io_in(&mut self, port: u16, size: usize) -> u32 {
if self.is_config_address(port, size) {
return self.config_address_in(size)
}
if self.is_config_data(port, size) {
return self.config_data_in((port - PCI_CONFIG_DATA) as usize, size)
}
return 0;
}
fn io_out(&mut self, port: u16, size: usize, val: u32) {
if self.is_config_address(port, size) {
self.config_address_out(port - PCI_CONFIG_ADDRESS,size, val)
}
if self.is_config_data(port, size) {
self.config_data_out(port - PCI_CONFIG_DATA, size, val)
}
}
}
pub struct PciDevice {
next_cap: usize,
last_cap: usize,
id: u8,
irq: u8,
config_buffer: [u8; PCI_CONFIG_SPACE_SIZE],
bar_write_masks: [u32; 6],
}
impl PciDevice {
pub fn new(id: u8, irq: u8, vendor: u16, device: u16, class_id: u16) -> PciDevice {
let mut d = PciDevice {
next_cap: PCI_CAP_BASE_OFFSET,
last_cap: 0,
id,
irq,
config_buffer: [0; PCI_CONFIG_SPACE_SIZE],
bar_write_masks: [0; 6],
};
d.w16(PCI_VENDOR_ID, vendor);
d.w16(PCI_DEVICE_ID, device);
d.w16(PCI_COMMAND, PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
d.w8(PCI_CLASS_REVISION, 1);
d.w16(PCI_CLASS_DEVICE, class_id);
d.w8(PCI_INTERRUPT_PIN, 1);
d.w8(PCI_INTERRUPT_LINE, irq);
d.w16(PCI_SUBSYSTEM_ID, 0x40);
d
}
pub fn get_irq(&self) -> u8 {
self.irq
}
fn is_valid_write(&self, offset: usize, size: usize) -> bool {
if offset + size > PCI_CONFIG_SPACE_SIZE {
return false;
}
// check alignment of write
let mod4 = offset % 4;
match size {
4 if mod4 == 0 => true,
2 if mod4 == 0 || mod4 == 2 => true,
1 => true,
_ => false,
}
}
fn write_bar(&mut self, offset: usize, size: usize, data: u32) {
assert!(is_bar_offset(offset), "not a bar offset in write_bar()");
let bar = offset_to_bar(offset);
let write_mask = self.bar_write_masks[bar];
if write_mask == 0 {
// no writable bits
return;
}
let mod4 = offset % 4;
match size {
4 => self.w32(offset, data),
2 => self.w16(offset+ mod4, data as u16),
1 => self.w8(offset+ mod4, data as u8),
_ => (),
};
// apply write mask to whatever was written
let v = self.r32(offset);
self.w32(offset, v & write_mask);
}
fn write_config(&mut self, offset: usize, size: usize, data: u32) {
if !self.is_valid_write(offset, size) {
return;
}
if is_bar_offset(offset) {
self.write_bar(offset, size, data);
return;
}
match offset {
PCI_COMMAND if size == 2 => self.w16(PCI_COMMAND, data as u16),
PCI_STATUS if size == 2 => self.w16(PCI_STATUS, data as u16),
PCI_CACHE_LINE_SIZE if size == 1 => self.w8(PCI_CACHE_LINE_SIZE, data as u8),
PCI_LATENCY_TIMER if size == 1 => self.w8(PCI_LATENCY_TIMER, data as u8),
_ => (),
}
}
fn w32(&mut self, off: usize, val: u32) { LittleEndian::write_u32(&mut self.config_buffer[off..], val); }
fn w16(&mut self, off: usize, val: u16) { LittleEndian::write_u16(&mut self.config_buffer[off..], val); }
fn w8(&mut self, off: usize, val: u8) { self.config_buffer[off] = val; }
fn r32(&self, off: usize) -> u32 { LittleEndian::read_u32(&self.config_buffer[off..]) }
fn r16(&self, off: usize) -> u16 { LittleEndian::read_u16(&self.config_buffer[off..]) }
fn r8(&self, off: usize) -> u8 { self.config_buffer[off] }
fn read_config(&self, offset: usize, size: usize) -> u32 {
if offset + size > PCI_CONFIG_SPACE_SIZE {
return 0xFFFFFFFF;
}
match size {
1 => self.r8(offset) as u32,
2 => self.r16(offset) as u32,
4 => self.r32(offset),
_ => 0xFFFFFFFF
}
}
#[allow(dead_code)]
pub fn is_irq_disabled(&self) -> bool {
self.r16(PCI_COMMAND) & PCI_COMMAND_INTX_DISABLE != 0
}
pub fn set_mmio_bar(&mut self, bar: usize, range: AddressRange) {
assert!(range.is_naturally_aligned(), "cannot set_mmio_bar() because mmio range is not naturally aligned");
assert!(bar < 5, "bar is invalid value in set_mmio_bar()");
self.bar_write_masks[bar] = !((range.size() as u32) - 1);
self.w32(bar_to_offset(bar), range.base() as u32);
}
pub fn add_virtio_caps(&mut self, config_size: usize) {
self.new_virtio_cap(VIRTIO_PCI_CAP_COMMON_CFG)
.set_mmio_range(VIRTIO_MMIO_OFFSET_COMMON_CFG, VIRTIO_MMIO_COMMON_CFG_SIZE).add(self);
self.new_virtio_cap(VIRTIO_PCI_CAP_ISR_CFG)
.set_mmio_range(VIRTIO_MMIO_OFFSET_ISR, VIRTIO_MMIO_ISR_SIZE).add(self);
self.new_virtio_cap(VIRTIO_PCI_CAP_NOTIFY_CFG)
.set_mmio_range(VIRTIO_MMIO_OFFSET_NOTIFY, VIRTIO_MMIO_NOTIFY_SIZE)
.set_extra_word(4).add(self);
if config_size > 0 {
self.new_virtio_cap(VIRTIO_PCI_CAP_DEVICE_CFG)
.set_mmio_range(VIRTIO_MMIO_OFFSET_DEV_CFG,config_size).add(self);
}
}
pub fn new_virtio_cap(&mut self, vtype: u8) -> VirtioCap {
VirtioCap::new(self.next_cap, vtype)
}
fn inc_cap(&mut self, size: usize) {
let next = self.next_cap as u8;
let last = self.last_cap;
if self.last_cap == 0 {
self.w8(PCI_CAPABILITY_LIST, next);
let status = self.r16(PCI_STATUS) | PCI_STATUS_CAP_LIST;
self.w16(PCI_STATUS, status);
} else {
self.w8(last + 1, next);
}
self.last_cap = self.next_cap;
let aligned = (size + 3) & !3;
self.next_cap += aligned;
}
}
fn is_bar_offset(offset: usize) -> bool {
offset >= 0x10 && offset < 0x28
}
fn bar_to_offset(bar: usize) -> usize {
0x10 + (bar * 4)
}
fn offset_to_bar(offset: usize) -> usize {
assert!(offset >= 0x10 && offset < 0x28, "not a valid bar offset");
(offset - 0x10) / 4
}
pub struct VirtioCap {
offset: usize,
vtype: u8,
size: u8,
mmio_offset: u32,
mmio_len: u32,
extra_word: Option<u32>,
}
impl VirtioCap {
fn new(offset: usize, vtype: u8) -> VirtioCap {
VirtioCap {
vtype,
offset,
size: 16,
mmio_offset: 0,
mmio_len: 0,
extra_word: None,
}
}
pub fn set_mmio_range(&mut self, offset: usize, len: usize) -> &mut VirtioCap {
self.mmio_offset = offset as u32;
self.mmio_len = len as u32;
self
}
pub fn set_extra_word(&mut self, val: u32) -> &mut VirtioCap {
self.size += 4;
self.extra_word = Some(val);
self
}
pub fn add(&mut self, dev: &mut PciDevice) {
/*
* struct virtio_pci_cap {
* u8 cap_vndr; /* Generic PCI field: PCI_CAP_ID_VNDR */
* u8 cap_next; /* Generic PCI field: next ptr. */
* u8 cap_len; /* Generic PCI field: capability length */
* u8 cfg_type; /* Identifies the structure. */
* u8 bar; /* Where to find it. */
* u8 padding[3]; /* Pad to full dword. */
* le32 offset; /* Offset within bar. */
* le32 length; /* Length of the structure, in bytes. */
* };
*/
dev.w8(self.offset, PCI_CAP_ID_VENDOR);
dev.w8(self.offset + 2, self.size);
dev.w8(self.offset + 3, self.vtype);
dev.w8(self.offset + 4, VIRTIO_MMIO_BAR as u8);
if self.mmio_len > 0 {
dev.w32(self.offset + 8, self.mmio_offset);
dev.w32(self.offset + 12, self.mmio_len);
}
if let Some(word) = self.extra_word {
dev.w32(self.offset + 16, word);
}
dev.inc_cap(self.size as usize);
}
}

View File

@ -1,177 +0,0 @@
use std::sync::atomic::{Ordering, AtomicUsize, AtomicBool};
use std::sync::Arc;
use vmm_sys_util::eventfd::EventFd;
use crate::memory::GuestRam;
use crate::virtio::{Result,Error};
use super::consts::*;
use super::vring::{Vring,Descriptor};
use super::bus::VirtioDeviceConfig;
use crate::virtio::chain::Chain;
use crate::vm::KvmVm;
#[derive(Clone)]
pub struct VirtQueue {
memory: GuestRam,
vring: Vring,
features: u64,
ioeventfd: Arc<EventFd>,
interrupt: Arc<InterruptLine>,
closed: Arc<AtomicBool>,
}
impl VirtQueue {
pub fn new(memory: GuestRam, vring: Vring, interrupt: Arc<InterruptLine>, ioeventfd: Arc<EventFd>) -> VirtQueue {
VirtQueue {
memory,
vring,
features: 0,
ioeventfd,
interrupt,
closed: Arc::new(AtomicBool::new(false)),
}
}
#[allow(dead_code)]
pub fn set_closed(&self) {
self.closed.store(true, Ordering::SeqCst);
self.ioeventfd.write(1).unwrap();
}
#[allow(dead_code)]
pub fn is_closed(&self) -> bool {
self.closed.load(Ordering::SeqCst)
}
fn use_event_idx(&self) -> bool {
self.features & VIRTIO_F_EVENT_IDX != 0
}
pub fn wait_ready(&self) -> Result<()> {
if self.vring.is_empty() {
let _ = self.ioeventfd.read()
.map_err(Error::ReadIoEventFd)?;
}
Ok(())
}
pub fn wait_next_chain(&self) -> Result<Chain> {
loop {
self.wait_ready()?;
if let Some(idx) = self.pop_avail_entry() {
return Ok(Chain::new(self.memory.clone(), self.clone(), idx, self.vring.size()));
}
}
}
pub fn next_chain(&self) -> Option<Chain> {
self.pop_avail_entry()
.map(|idx| Chain::new(self.memory.clone(), self.clone(), idx, self.vring.size()))
}
pub fn on_each_chain<F>(&self, mut f: F)
where F: FnMut(Chain) {
loop {
self.wait_ready().unwrap();
for chain in self.iter() {
f(chain);
}
}
}
pub fn iter(&self) -> QueueIter {
QueueIter { vq: self.clone() }
}
fn need_interrupt(&self, first_used: u16, used_count: usize) -> bool {
if used_count == 0 {
return false;
}
if self.use_event_idx() {
let event = self.vring.read_used_event();
// Minimum count needed to traverse event idx
let span = ((event - first_used) + 1) as usize;
return used_count >= span;
}
!self.vring.read_avail_no_interrupt()
}
pub fn put_used(&self, idx: u16, len: u32) {
let used = self.vring.next_used();
self.vring.put_used(idx, len);
if self.need_interrupt(used, 1) {
self.interrupt.notify_queue();
}
}
fn pop_avail_entry(&self) -> Option<u16> {
if let Some(idx) = self.vring.pop_avail_entry() {
if self.use_event_idx() {
self.vring.write_avail_event(self.vring.next_avail());
}
return Some(idx)
}
None
}
pub fn load_descriptor(&self, idx: u16) -> Option<Descriptor> {
self.vring.load_descriptor(idx)
}
pub fn ioevent(&self) -> &EventFd {
&self.ioeventfd
}
}
pub struct QueueIter {
vq: VirtQueue,
}
impl Iterator for QueueIter {
type Item = Chain;
fn next(&mut self) -> Option<Self::Item> {
self.vq.pop_avail_entry().map(|idx| {
Chain::new(self.vq.memory.clone(), self.vq.clone(), idx, self.vq.vring.size())
})
}
}
pub struct InterruptLine {
irqfd: EventFd,
isr: AtomicUsize,
}
impl InterruptLine {
pub fn from_config(conf: &VirtioDeviceConfig) -> Result<Arc<InterruptLine>> {
InterruptLine::new(conf.kvm_vm(), conf.irq())
}
fn new(kvm_vm: &KvmVm, irq: u8) -> Result<Arc<InterruptLine>> {
let irqfd = EventFd::new(0)
.map_err(Error::CreateEventFd)?;
kvm_vm.vm_fd().register_irqfd(&irqfd, irq as u32)
.map_err(Error::IrqFd)?;
Ok(Arc::new(InterruptLine{
irqfd,
isr: AtomicUsize::new(0)
}))
}
pub fn isr_read(&self) -> u64 {
self.isr.swap(0, Ordering::SeqCst) as u64
}
pub fn notify_queue(&self) {
self.isr.fetch_or(0x1, Ordering::SeqCst);
self.irqfd.write(1).unwrap();
}
pub fn notify_config(&self) {
self.isr.fetch_or(0x2, Ordering::SeqCst);
self.irqfd.write(1).unwrap();
}
}

View File

@ -1,384 +0,0 @@
use std::sync::atomic::{self,AtomicUsize,Ordering};
use std::sync::Arc;
use std::fmt;
use std::cmp;
use std::io::{self, Read};
use crate::memory::GuestRam;
use super::consts::*;
use crate::virtio::{Result,Error};
///
/// A convenience wrapper around `AtomicUsize`
///
#[derive(Clone)]
struct SharedIndex(Arc<AtomicUsize>);
impl SharedIndex {
fn new() -> SharedIndex {
SharedIndex(Arc::new(AtomicUsize::new(0)))
}
fn get(&self) -> u16 {
self.0.load(Ordering::SeqCst) as u16
}
fn inc(&self) {
self.0.fetch_add(1, Ordering::SeqCst);
}
fn set(&self, v: u16) {
self.0.store(v as usize, Ordering::SeqCst);
}
}
///
/// Access to the low-level memory structure of a Virtqueue.
///
#[derive(Clone)]
pub struct Vring {
memory: GuestRam,
/// Default queue_size for this virtqueue
default_size: u16,
/// Number of elements in the virtqueue ring
queue_size: u16,
/// Guest address for base of descriptor table
pub descriptors: u64,
/// Guest address for base of available ring
pub avail_ring: u64,
/// Guest address for base of used ring
pub used_ring: u64,
/// Has this virtqueue been enabled?
enabled: bool,
/// The index in the used ring where the next used entry will be placed
next_used_idx: SharedIndex,
/// last seen avail_idx loaded from guest memory
cached_avail_idx: SharedIndex,
/// The index in the avail ring where the next available entry will be read
next_avail: SharedIndex,
}
impl Vring {
pub fn new(memory: GuestRam, default_size: u16) -> Vring {
Vring {
memory,
default_size,
queue_size: default_size,
descriptors:0,
avail_ring: 0,
used_ring: 0,
enabled: false,
next_used_idx: SharedIndex::new(),
cached_avail_idx: SharedIndex::new(),
next_avail: SharedIndex::new(),
}
}
///
/// Set `Vring` into the enabled state.
///
pub fn enable(&mut self) {
self.enabled = true;
}
///
/// Return `true` if this `Vring` has been enabled.
///
pub fn is_enabled(&self) -> bool {
self.enabled
}
///
/// Queue size of this `Vring`
///
pub fn size(&self) -> u16 {
self.queue_size
}
///
/// Set the queue size of this `Vring`. If `sz` is an invalid value
/// ignore the request. It is illegal to change the queue size after
/// a virtqueue has been enabled, so ignore requests if enabled.
///
/// Valid sizes are less than or equal to `MAX_QUEUE_SIZE` and must
/// be a power of 2.
///
pub fn set_size(&mut self, sz: u16) {
if self.enabled || sz > MAX_QUEUE_SIZE || (sz & (sz - 1) != 0) {
return;
}
self.queue_size = sz;
}
///
/// Reset `Vring` to the initial state. `queue_size` is set to the `default_size`
/// and all other fields are cleared. `enabled` is set to false.
///
pub fn reset(&mut self) {
self.queue_size = self.default_size;
self.descriptors = 0;
self.avail_ring = 0;
self.used_ring = 0;
self.enabled = false;
self.next_used_idx.set(0);
self.cached_avail_idx.set(0);
self.next_avail.set(0);
}
///
/// Does `Vring` currently have available entries?
///
/// Queue is empty if `next_avail` is same value as
/// `avail_ring.idx` value in guest memory If `cached_avail_idx`
/// currently matches `next_avail` it is reloaded from
/// memory in case guest has updated field since last
/// time it was loaded.
///
pub fn is_empty(&self) -> bool {
let next_avail = self.next_avail.get();
if self.cached_avail_idx.get() != next_avail {
return false;
}
next_avail == self.load_avail_idx()
}
///
/// Write an entry into the Used ring.
///
/// The entry is written into the ring structure at offset
/// `next_used_idx % queue_size`. The value of `next_used_idx`
/// is then incremented and the new value is written into
/// guest memory into the `used_ring.idx` field.
///
pub fn put_used(&self, idx: u16, len: u32) {
if idx >= self.queue_size {
return;
}
let used_idx = (self.next_used_idx.get() % self.queue_size) as u64;
let elem_addr = self.used_ring + (4 + used_idx * 8);
// write descriptor index to 'next used' slot in used ring
self.memory.write_int(elem_addr, idx as u32).unwrap();
// write length to 'next used' slot in ring
self.memory.write_int(elem_addr + 4, len as u32).unwrap();
self.next_used_idx.inc();
atomic::fence(Ordering::Release);
// write updated next_used
self.memory.write_int(self.used_ring + 2, self.next_used_idx.get()).unwrap();
}
///
/// Load `avail_ring.idx` from guest memory and store it in `cached_avail_idx`.
///
pub fn load_avail_idx(&self) -> u16 {
let avail_idx = self.memory.read_int::<u16>(self.avail_ring + 2).unwrap();
self.cached_avail_idx.set(avail_idx);
avail_idx
}
///
/// Read from guest memory and return the Avail ring entry at
/// index `ring_idx % queue_size`.
///
fn load_avail_entry(&self, ring_idx: u16) -> u16 {
let offset = (4 + (ring_idx % self.queue_size) * 2) as u64;
self.memory.read_int(self.avail_ring + offset).unwrap()
}
///
/// If queue is not empty, read and return the next Avail ring entry
/// and increment `next_avail`. If queue is empty return `None`
///
pub fn pop_avail_entry(&self) -> Option<u16> {
if self.is_empty() {
return None
}
let next_avail = self.next_avail.get();
let avail_entry = self.load_avail_entry(next_avail);
self.next_avail.inc();
Some(avail_entry)
}
pub fn next_avail(&self) -> u16 {
self.next_avail.get() % self.queue_size
}
///
/// Read and return the `used_event` field from the Avail ring.
///
pub fn read_used_event(&self) -> u16 {
let addr = self.avail_ring + 4 + (self.queue_size as u64 * 2);
self.memory.read_int::<u16>(addr).unwrap()
}
///
/// Read the `flags` field from the Avail ring and return `true` if
/// `NO_INTERRUPT` bit is set.
///
pub fn read_avail_no_interrupt(&self) -> bool {
let flags = self.memory.read_int::<u16>(self.avail_ring).unwrap();
flags & 0x01 != 0
}
///
/// Write `val` to the `avail_event` field of Used ring.
///
/// If `val` is not a valid index for this virtqueue this
/// function does nothing.
///
pub fn write_avail_event(&self, val: u16) {
if val > self.queue_size {
return;
}
let addr = self.used_ring + 4 + (self.queue_size as u64 * 8);
self.memory.write_int::<u16>(addr, val).unwrap();
atomic::fence(Ordering::Release);
}
///
/// Set or clear the `NO_NOTIFY` bit in flags field of Used ring
///
#[allow(dead_code)]
pub fn write_used_no_notify(&self, val: bool) {
let flag = if val { 0x1 } else { 0x0 };
self.memory.write_int::<u16>(self.used_ring, flag).unwrap();
}
///
/// Load the descriptor table entry at `idx` from guest memory and return it.
///
pub fn load_descriptor(&self, idx: u16) -> Option<Descriptor> {
if idx >= self.queue_size {
panic!("load_descriptor called with index larger than queue size");
}
let head = self.descriptors + (idx as u64 * 16);
let addr = self.memory.read_int::<u64>(head).unwrap();
let len= self.memory.read_int::<u32>(head + 8).unwrap();
let flags = self.memory.read_int::<u16>(head + 12).unwrap();
let next = self.memory.read_int::<u16>(head + 14).unwrap();
if self.memory.is_valid_range(addr, len as usize) && next < self.queue_size {
return Some(Descriptor::new(idx, addr, len, flags, next));
}
None
}
pub fn next_used(&self) -> u16 {
self.next_used_idx.get()
}
pub fn validate(&self) -> Result<()> {
if !self.enabled {
return Err(Error::VringNotEnabled);
}
let qsz = self.queue_size as usize;
let desc_table_sz = 16 * qsz;
let avail_ring_sz = 6 + 2 * qsz;
let used_ring_sz = 6 + 8 * qsz;
if !self.memory.is_valid_range(self.descriptors, desc_table_sz) {
return Err(Error::VringRangeInvalid(self.descriptors));
}
if !self.memory.is_valid_range(self.avail_ring, avail_ring_sz) {
return Err(Error::VringAvailInvalid(self.avail_ring));
}
if !self.memory.is_valid_range(self.used_ring, used_ring_sz) {
return Err(Error::VringUsedInvalid(self.used_ring));
}
Ok(())
}
}
///
/// An entry read from the descriptor table
///
#[derive(Copy,Clone)]
pub struct Descriptor {
pub idx: u16,
pub addr: u64,
pub len: u32,
pub flags: u16,
pub next: u16,
}
impl Descriptor {
fn new(idx: u16, addr: u64, len: u32, flags: u16, next:u16) -> Descriptor {
Descriptor{ idx, addr, len, flags, next }
}
///
/// Test if `flag` is set in `self.flags`
///
fn has_flag(&self, flag: u16) -> bool {
self.flags & flag == flag
}
///
/// Is VRING_DESC_F_NEXT set in `self.flags`?
///
pub fn has_next(&self) -> bool {
self.has_flag(VRING_DESC_F_NEXT)
}
///
/// Is VRING_DESC_F_WRITE set in `self.flags`?
///
pub fn is_write(&self) -> bool {
self.has_flag(VRING_DESC_F_WRITE)
}
///
/// Is VRING_DESC_F_INDIRECT set in `self.flags`?
///
#[allow(dead_code)]
pub fn is_indirect(&self) -> bool {
self.has_flag(VRING_DESC_F_INDIRECT)
}
pub fn remaining(&self, offset: usize) -> usize {
if offset >= self.len as usize {
0
} else {
self.len as usize - offset
}
}
pub fn read_from(&self, memory: &GuestRam, offset: usize, buf: &mut[u8]) -> usize {
let sz = cmp::min(buf.len(), self.remaining(offset));
if sz > 0 {
memory.read_bytes(self.addr + offset as u64, &mut buf[..sz]).unwrap();
}
sz
}
pub fn write_to(&self, memory: &GuestRam, offset: usize, buf: &[u8]) -> usize {
let sz = cmp::min(buf.len(), self.remaining(offset));
if sz > 0 {
memory.write_bytes(self.addr + offset as u64, &buf[..sz]).unwrap();
}
sz
}
pub fn write_from_reader<R: Read+Sized>(&self, memory: &GuestRam, offset: usize, mut r: R, size: usize) -> io::Result<usize> {
let sz = cmp::min(size, self.remaining(offset));
if sz > 0 {
let slice = memory.mut_slice(self.addr + offset as u64, sz).unwrap();
return r.read(slice);
}
Ok(0)
}
}
impl fmt::Debug for Descriptor {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Descriptor{{ idx: {} addr: {:x} len: {} flags: {:x} next: {} }}", self.idx, self.addr, self.len, self.flags, self.next)
}
}

View File

@ -6,12 +6,13 @@ use crate::memory::MemoryManager;
mod error; mod error;
mod x86; mod x86;
pub use x86::PCI_MMIO_RESERVED_BASE; pub use x86::{PCI_MMIO_RESERVED_BASE,PCI_MMIO_RESERVED_SIZE,IRQ_BASE,IRQ_MAX};
pub use error::{Error,Result}; pub use error::{Error,Result};
use crate::io::PciIrq;
use crate::vm::kernel_cmdline::KernelCmdLine; use crate::vm::kernel_cmdline::KernelCmdLine;
use crate::vm::VmConfig; use crate::vm::VmConfig;
use crate::virtio::PciIrq;
use crate::vm::kvm_vm::KvmVm; use crate::vm::kvm_vm::KvmVm;
pub fn create_setup(config: &VmConfig) -> X86ArchSetup { pub fn create_setup(config: &VmConfig) -> X86ArchSetup {

View File

@ -1,16 +1,18 @@
use crate::memory::{MemoryManager, MemoryRegion, GuestRam}; use crate::memory::{MemoryManager, MemoryRegion, GuestRam};
use crate::vm::arch::{Error, Result}; use crate::vm::arch::{Error, Result};
use std::cmp; use std::cmp;
use crate::io::PciIrq;
use crate::vm::kernel_cmdline::KernelCmdLine; use crate::vm::kernel_cmdline::KernelCmdLine;
use crate::vm::arch::x86::kernel::{load_pm_kernel, KERNEL_CMDLINE_ADDRESS}; use crate::vm::arch::x86::kernel::{load_pm_kernel, KERNEL_CMDLINE_ADDRESS};
use crate::system; use crate::system;
use crate::vm::arch::x86::mptable::setup_mptable; use crate::vm::arch::x86::mptable::setup_mptable;
use crate::virtio::PciIrq;
use crate::vm::KvmVm; use crate::vm::KvmVm;
pub const HIMEM_BASE: u64 = 1 << 32; pub const HIMEM_BASE: u64 = 1 << 32;
pub const PCI_MMIO_RESERVED_SIZE: usize = 512 << 20; pub const PCI_MMIO_RESERVED_SIZE: usize = 512 << 20;
pub const PCI_MMIO_RESERVED_BASE: u64 = HIMEM_BASE - PCI_MMIO_RESERVED_SIZE as u64; pub const PCI_MMIO_RESERVED_BASE: u64 = HIMEM_BASE - PCI_MMIO_RESERVED_SIZE as u64;
pub const IRQ_BASE: u32 = 5;
pub const IRQ_MAX: u32 = 23;
pub fn x86_setup_memory_regions(memory: &mut MemoryManager, ram_size: usize) -> Result<()> { pub fn x86_setup_memory_regions(memory: &mut MemoryManager, ram_size: usize) -> Result<()> {

View File

@ -8,4 +8,4 @@ mod kernel;
mod setup; mod setup;
pub use setup::X86ArchSetup; pub use setup::X86ArchSetup;
pub use memory::PCI_MMIO_RESERVED_BASE; pub use memory::{PCI_MMIO_RESERVED_BASE,PCI_MMIO_RESERVED_SIZE,IRQ_BASE,IRQ_MAX};

View File

@ -4,9 +4,9 @@ use std::sync::atomic::AtomicBool;
use kvm_bindings::{CpuId, KVM_MAX_CPUID_ENTRIES, kvm_pit_config, KVM_PIT_SPEAKER_DUMMY, kvm_userspace_memory_region}; use kvm_bindings::{CpuId, KVM_MAX_CPUID_ENTRIES, kvm_pit_config, KVM_PIT_SPEAKER_DUMMY, kvm_userspace_memory_region};
use kvm_ioctls::{Cap, Kvm, VmFd}; use kvm_ioctls::{Cap, Kvm, VmFd};
use kvm_ioctls::Cap::*; use kvm_ioctls::Cap::*;
use crate::io::manager::IoManager;
use crate::vm::vcpu::Vcpu; use crate::vm::vcpu::Vcpu;
use crate::vm::{Result, Error, ArchSetup}; use crate::vm::{Result, Error, ArchSetup};
use crate::vm::io::IoDispatcher;
const KVM_API_VERSION: i32 = 12; const KVM_API_VERSION: i32 = 12;
type KvmResult<T> = result::Result<T, kvm_ioctls::Error>; type KvmResult<T> = result::Result<T, kvm_ioctls::Error>;
@ -17,6 +17,7 @@ static REQUIRED_EXTENSIONS: &[Cap] = &[
ExtCpuid, ExtCpuid,
Hlt, Hlt,
Ioeventfd, Ioeventfd,
IoeventfdNoLength,
Irqchip, Irqchip,
MpState, MpState,
Pit2, Pit2,
@ -115,10 +116,10 @@ impl KvmVm {
.map_err(Error::VmSetup) .map_err(Error::VmSetup)
} }
pub fn create_vcpu<A: ArchSetup>(&self, id: u64, io: Arc<IoDispatcher>, shutdown: Arc<AtomicBool>, arch: &mut A) -> Result<Vcpu> { pub fn create_vcpu<A: ArchSetup>(&self, id: u64, io_manager: IoManager, shutdown: Arc<AtomicBool>, arch: &mut A) -> Result<Vcpu> {
let vcpu_fd = self.vm_fd.create_vcpu(id) let vcpu_fd = self.vm_fd.create_vcpu(id)
.map_err(Error::CreateVcpu)?; .map_err(Error::CreateVcpu)?;
let vcpu = Vcpu::new(vcpu_fd, io, shutdown); let vcpu = Vcpu::new(vcpu_fd, io_manager, shutdown);
arch.setup_vcpu(vcpu.vcpu_fd(), self.supported_cpuid().clone()).map_err(Error::ArchError)?; arch.setup_vcpu(vcpu.vcpu_fd(), self.supported_cpuid().clone()).map_err(Error::ArchError)?;
Ok(vcpu) Ok(vcpu)
} }

View File

@ -3,7 +3,6 @@ static PHINIT: &[u8] = include_bytes!("../../ph-init/target/release/ph-init");
static SOMMELIER: &[u8] = include_bytes!("../../sommelier/build/sommelier"); static SOMMELIER: &[u8] = include_bytes!("../../sommelier/build/sommelier");
pub mod arch; pub mod arch;
pub mod io;
mod setup; mod setup;
mod error; mod error;
mod kernel_cmdline; mod kernel_cmdline;

View File

@ -1,13 +1,9 @@
use crate::vm::{VmConfig, Result, Error, PHINIT, SOMMELIER}; use crate::vm::{VmConfig, Result, Error, PHINIT, SOMMELIER};
use crate::vm::arch::ArchSetup; use crate::vm::arch::ArchSetup;
use crate::vm::kernel_cmdline::KernelCmdLine; use crate::vm::kernel_cmdline::KernelCmdLine;
use crate::vm::io::IoDispatcher;
use crate::devices;
use termios::Termios; use termios::Termios;
use crate::virtio::VirtioBus; use crate::devices::{SyntheticFS, VirtioBlock, VirtioNet, VirtioP9, VirtioRandom, VirtioSerial, VirtioWayland};
use crate::virtio; use std::{env, fs, thread};
use crate::devices::SyntheticFS;
use std::{fs, thread};
use crate::system::{Tap, NetlinkSocket}; use crate::system::{Tap, NetlinkSocket};
use crate::disk::DiskImage; use crate::disk::DiskImage;
use std::sync::Arc; use std::sync::Arc;
@ -15,6 +11,10 @@ use crate::memory::MemoryManager;
use std::sync::atomic::AtomicBool; use std::sync::atomic::AtomicBool;
use kvm_ioctls::VmFd; use kvm_ioctls::VmFd;
use vmm_sys_util::eventfd::EventFd; use vmm_sys_util::eventfd::EventFd;
use crate::devices::ac97::{Ac97Dev, Ac97Parameters};
use crate::devices::serial::SerialPort;
use crate::io::manager::IoManager;
use crate::{Logger, LogLevel};
use crate::vm::kvm_vm::KvmVm; use crate::vm::kvm_vm::KvmVm;
use crate::vm::vcpu::Vcpu; use crate::vm::vcpu::Vcpu;
@ -22,12 +22,12 @@ pub struct Vm {
kvm_vm: KvmVm, kvm_vm: KvmVm,
vcpus: Vec<Vcpu>, vcpus: Vec<Vcpu>,
memory: MemoryManager, memory: MemoryManager,
io_dispatch: Arc<IoDispatcher>, io_manager: IoManager,
termios: Option<Termios>, termios: Option<Termios>,
} }
impl Vm { impl Vm {
fn create<A: ArchSetup>(arch: &mut A, reset_evt: EventFd) -> Result<Self> { fn create<A: ArchSetup>(arch: &mut A) -> Result<Self> {
let kvm_vm = KvmVm::open()?; let kvm_vm = KvmVm::open()?;
kvm_vm.create_irqchip()?; kvm_vm.create_irqchip()?;
kvm_vm.vm_fd().set_tss_address(0xfffbd000) kvm_vm.vm_fd().set_tss_address(0xfffbd000)
@ -36,11 +36,13 @@ impl Vm {
let memory = arch.create_memory(kvm_vm.clone()) let memory = arch.create_memory(kvm_vm.clone())
.map_err(Error::ArchError)?; .map_err(Error::ArchError)?;
let io_manager = IoManager::new(memory.clone());
Ok(Vm { Ok(Vm {
kvm_vm, kvm_vm,
memory, memory,
io_manager,
vcpus: Vec::new(), vcpus: Vec::new(),
io_dispatch: IoDispatcher::new(reset_evt),
termios: None, termios: None,
}) })
} }
@ -89,14 +91,16 @@ impl <T: ArchSetup> VmSetup <T> {
pub fn create_vm(&mut self) -> Result<Vm> { pub fn create_vm(&mut self) -> Result<Vm> {
let exit_evt = EventFd::new(libc::EFD_NONBLOCK)?; let exit_evt = EventFd::new(libc::EFD_NONBLOCK)?;
let reset_evt = exit_evt.try_clone()?; let mut vm = Vm::create(&mut self.arch)?;
let mut vm = Vm::create(&mut self.arch, reset_evt)?;
let reset_evt = exit_evt.try_clone()?;
vm.io_manager.register_legacy_devices(reset_evt);
devices::rtc::Rtc::register(vm.io_dispatch.clone());
if self.config.verbose() { if self.config.verbose() {
Logger::set_log_level(LogLevel::Info);
self.cmdline.push("earlyprintk=serial"); self.cmdline.push("earlyprintk=serial");
devices::serial::SerialDevice::register(vm.kvm_vm.clone(),vm.io_dispatch.clone(), 0); vm.io_manager.register_serial_port(SerialPort::COM1);
} else { } else {
self.cmdline.push("quiet"); self.cmdline.push("quiet");
} }
@ -115,36 +119,50 @@ impl <T: ArchSetup> VmSetup <T> {
.map_err(Error::TerminalTermios)?; .map_err(Error::TerminalTermios)?;
vm.termios = Some(saved); vm.termios = Some(saved);
let mut virtio = VirtioBus::new(vm.memory.clone(), vm.io_dispatch.clone(), vm.kvm_vm.clone()); self.setup_synthetic_bootfs(&mut vm.io_manager)?;
self.setup_synthetic_bootfs(&mut virtio)?; self.setup_virtio(&mut vm.io_manager)?;
self.setup_virtio(&mut virtio)
.map_err(Error::SetupVirtio)?; if self.config.is_audio_enable() {
if unsafe { libc::geteuid() } == 0 {
self.drop_privs();
}
env::set_var("HOME", "/home/citadel");
env::set_var("XDG_RUNTIME_DIR", "/run/user/1000");
let irq = vm.io_manager.allocator().allocate_irq();
let mem = vm.memory.guest_ram().clone();
// XXX expect()
let ac97 = Ac97Dev::try_new(&vm.kvm_vm, irq, mem, Ac97Parameters::new_pulseaudio()).expect("audio initialize error");
vm.io_manager.add_pci_device(Arc::new(Mutex::new(ac97)));
}
if let Some(init_cmd) = self.config.get_init_cmdline() { if let Some(init_cmd) = self.config.get_init_cmdline() {
self.cmdline.push_set_val("init", init_cmd); self.cmdline.push_set_val("init", init_cmd);
} }
self.arch.setup_memory(&self.cmdline, &virtio.pci_irqs()) let pci_irqs = vm.io_manager.pci_irqs();
self.arch.setup_memory(&self.cmdline, &pci_irqs)
.map_err(Error::ArchError)?; .map_err(Error::ArchError)?;
let shutdown = Arc::new(AtomicBool::new(false)); let shutdown = Arc::new(AtomicBool::new(false));
for id in 0..self.config.ncpus() { for id in 0..self.config.ncpus() {
let vcpu = vm.kvm_vm.create_vcpu(id as u64, vm.io_dispatch.clone(), shutdown.clone(), &mut self.arch)?; let vcpu = vm.kvm_vm.create_vcpu(id as u64, vm.io_manager.clone(), shutdown.clone(), &mut self.arch)?;
vm.vcpus.push(vcpu); vm.vcpus.push(vcpu);
} }
Ok(vm) Ok(vm)
} }
fn setup_virtio(&mut self, virtio: &mut VirtioBus) -> virtio::Result<()> { fn setup_virtio(&mut self, io_manager: &mut IoManager) -> Result<()> {
devices::VirtioSerial::create(virtio)?; io_manager.add_virtio_device(VirtioSerial::new())?;
devices::VirtioRandom::create(virtio)?; io_manager.add_virtio_device(VirtioRandom::new())?;
if self.config.is_wayland_enabled() { if self.config.is_wayland_enabled() {
devices::VirtioWayland::create(virtio, self.config.is_dmabuf_enabled())?; io_manager.add_virtio_device(VirtioWayland::new(self.config.is_dmabuf_enabled()))?;
} }
let homedir = self.config.homedir(); let homedir = self.config.homedir();
devices::VirtioP9::create(virtio, "home", homedir, false, false)?; io_manager.add_virtio_device(VirtioP9::new_filesystem("home", homedir, false, false))?;
if homedir != "/home/user" && !self.config.is_realm() { if homedir != "/home/user" && !self.config.is_realm() {
self.cmdline.push_set_val("phinit.home", homedir); self.cmdline.push_set_val("phinit.home", homedir);
} }
@ -155,14 +173,14 @@ impl <T: ArchSetup> VmSetup <T> {
if block_root == None { if block_root == None {
block_root = Some(disk.read_only()); block_root = Some(disk.read_only());
} }
devices::VirtioBlock::create(virtio, disk)?; io_manager.add_virtio_device(VirtioBlock::new(disk))?;
} }
for disk in self.config.get_raw_disk_images() { for disk in self.config.get_raw_disk_images() {
if block_root == None { if block_root == None {
block_root = Some(disk.read_only()); block_root = Some(disk.read_only());
} }
devices::VirtioBlock::create(virtio, disk)?; io_manager.add_virtio_device(VirtioBlock::new(disk))?;
} }
if let Some(read_only) = block_root { if let Some(read_only) = block_root {
@ -172,14 +190,14 @@ impl <T: ArchSetup> VmSetup <T> {
self.cmdline.push("phinit.root=/dev/vda"); self.cmdline.push("phinit.root=/dev/vda");
self.cmdline.push("phinit.rootfstype=ext4"); self.cmdline.push("phinit.rootfstype=ext4");
} else { } else {
devices::VirtioP9::create(virtio, "9proot", "/", true, false)?; io_manager.add_virtio_device(VirtioP9::new_filesystem("9proot", "/", true, false))?;
self.cmdline.push_set_val("phinit.root", "9proot"); self.cmdline.push_set_val("phinit.root", "9proot");
self.cmdline.push_set_val("phinit.rootfstype", "9p"); self.cmdline.push_set_val("phinit.rootfstype", "9p");
self.cmdline.push_set_val("phinit.rootflags", "trans=virtio"); self.cmdline.push_set_val("phinit.rootflags", "trans=virtio");
} }
if self.config.network() { if self.config.network() {
self.setup_network(virtio)?; self.setup_network(io_manager)?;
self.drop_privs(); self.drop_privs();
} }
@ -196,12 +214,11 @@ impl <T: ArchSetup> VmSetup <T> {
} }
fn setup_synthetic_bootfs(&mut self, virtio: &mut VirtioBus) -> Result<()> { fn setup_synthetic_bootfs(&mut self, io_manager: &mut IoManager) -> Result<()> {
let bootfs = self.create_bootfs() let bootfs = self.create_bootfs()
.map_err(Error::SetupBootFs)?; .map_err(Error::SetupBootFs)?;
devices::VirtioP9::create_with_filesystem(bootfs, virtio, "/dev/root", "/", false) io_manager.add_virtio_device(VirtioP9::new(bootfs, "/dev/root", "/", false))?;
.map_err(Error::SetupVirtio)?;
self.cmdline.push_set_val("init", "/usr/bin/ph-init"); self.cmdline.push_set_val("init", "/usr/bin/ph-init");
self.cmdline.push_set_val("root", "/dev/root"); self.cmdline.push_set_val("root", "/dev/root");
@ -211,7 +228,7 @@ impl <T: ArchSetup> VmSetup <T> {
Ok(()) Ok(())
} }
fn create_bootfs(&self) -> ::std::io::Result<SyntheticFS> { fn create_bootfs(&self) -> std::io::Result<SyntheticFS> {
let mut s = SyntheticFS::new(); let mut s = SyntheticFS::new();
s.mkdirs(&["/tmp", "/proc", "/sys", "/dev", "/home/user", "/bin", "/etc"]); s.mkdirs(&["/tmp", "/proc", "/sys", "/dev", "/home/user", "/bin", "/etc"]);
@ -226,7 +243,7 @@ impl <T: ArchSetup> VmSetup <T> {
Ok(s) Ok(s)
} }
fn setup_network(&mut self, virtio: &mut VirtioBus) -> virtio::Result<()> { fn setup_network(&mut self, io_manager: &mut IoManager) -> Result<()> {
let tap = match self.setup_tap() { let tap = match self.setup_tap() {
Ok(tap) => tap, Ok(tap) => tap,
Err(e) => { Err(e) => {
@ -234,7 +251,7 @@ impl <T: ArchSetup> VmSetup <T> {
return Ok(()); return Ok(());
} }
}; };
devices::VirtioNet::create(virtio, tap)?; io_manager.add_virtio_device(VirtioNet::new(tap))?;
self.cmdline.push("phinit.ip=172.17.0.22"); self.cmdline.push("phinit.ip=172.17.0.22");
Ok(()) Ok(())
} }

View File

@ -1,34 +1,22 @@
use std::convert::TryInto; use std::sync::{Arc, Barrier};
use std::sync::Arc;
use std::sync::atomic::{AtomicBool,Ordering}; use std::sync::atomic::{AtomicBool,Ordering};
use kvm_ioctls::{VcpuExit, VcpuFd}; use kvm_ioctls::{VcpuExit, VcpuFd};
use vmm_sys_util::sock_ctrl_msg::IntoIovec; use crate::io::manager::IoManager;
use crate::vm::io::IoDispatcher;
/*
pub enum VcpuEvent {
Exit,
}
pub struct VcpuHandle {
sender: Sender<VcpuEvent>,
thread: thread::JoinHandle<()>,
}
*/
pub struct Vcpu { pub struct Vcpu {
vcpu_fd: VcpuFd, vcpu_fd: VcpuFd,
io: Arc<IoDispatcher>, io_manager: IoManager,
shutdown: Arc<AtomicBool>, shutdown: Arc<AtomicBool>,
} }
impl Vcpu { impl Vcpu {
pub fn new(vcpu_fd: VcpuFd, io: Arc<IoDispatcher>, shutdown: Arc<AtomicBool>) -> Self { pub fn new(vcpu_fd: VcpuFd, io_manager: IoManager, shutdown: Arc<AtomicBool>) -> Self {
Vcpu { Vcpu {
vcpu_fd, io, shutdown, vcpu_fd,
io_manager,
shutdown,
} }
} }
@ -36,60 +24,45 @@ impl Vcpu {
&self.vcpu_fd &self.vcpu_fd
} }
fn data_to_int(data: &[u8]) -> u64 {
match data.len() {
1 => data[0] as u64,
2 => u16::from_le_bytes(data.try_into().unwrap()) as u64,
4 => u32::from_le_bytes(data.try_into().unwrap()) as u64,
8 => u64::from_le_bytes(data.try_into().unwrap()),
_ => 0,
}
}
fn int_to_data(n: u64, data: &mut[u8]) {
match data.len() {
1 => data[0] = n as u8,
2 => data.copy_from_slice((n as u16).to_le_bytes().as_slice()),
4 => data.copy_from_slice((n as u32).to_le_bytes().as_slice()),
8 => data.copy_from_slice((n as u64).to_le_bytes().as_slice()),
_ => {},
}
}
fn handle_io_out(&self, port: u16, data: &[u8]) { fn handle_io_out(&self, port: u16, data: &[u8]) {
let val = Self::data_to_int(data) as u32; let _ok = self.io_manager.pio_write(port, data);
self.io.emulate_io_out(port, data.size(), val);
} }
fn handle_io_in(&self, port: u16, data: &mut [u8]) { fn handle_io_in(&self, port: u16, data: &mut [u8]) {
let val = self.io.emulate_io_in(port, data.len()); let _ok = self.io_manager.pio_read(port, data);
Self::int_to_data(val as u64, data);
} }
fn handle_mmio_read(&self, addr: u64, data: &mut [u8]) { fn handle_mmio_read(&self, addr: u64, data: &mut [u8]) {
let val = self.io.emulate_mmio_read(addr, data.len()); let _ok = self.io_manager.mmio_read(addr, data);
Self::int_to_data(val, data);
} }
fn handle_mmio_write(&self, addr: u64, data: &[u8]) { fn handle_mmio_write(&self, addr: u64, data: &[u8]) {
let val = Self::data_to_int(data); let _ok = self.io_manager.mmio_write(addr,data);
self.io.emulate_mmio_write(addr, data.size(), val);
} }
fn handle_shutdown(&self) { fn handle_shutdown(&self) {
self.shutdown.store(true, Ordering::Relaxed); self.shutdown.store(true, Ordering::Relaxed);
} }
pub fn run(&self) { pub fn run(&self, barrier: &Arc<Barrier>) {
barrier.wait();
loop { loop {
match self.vcpu_fd.run().expect("fail") { match self.vcpu_fd.run() {
VcpuExit::IoOut(port, data) => self.handle_io_out(port, data), Ok(VcpuExit::IoOut(port, data)) => self.handle_io_out(port, data),
VcpuExit::IoIn(port, data) => self.handle_io_in(port, data), Ok(VcpuExit::IoIn(port, data)) => self.handle_io_in(port, data),
VcpuExit::MmioRead(addr, data) => self.handle_mmio_read(addr, data), Ok(VcpuExit::MmioRead(addr, data)) => self.handle_mmio_read(addr, data),
VcpuExit::MmioWrite(addr, data) => self.handle_mmio_write(addr, data), Ok(VcpuExit::MmioWrite(addr, data)) => self.handle_mmio_write(addr, data),
VcpuExit::Shutdown => self.handle_shutdown(), Ok(VcpuExit::Shutdown) => self.handle_shutdown(),
exit => { Ok(exit) => {
println!("unhandled exit: {:?}", exit); println!("unhandled exit: {:?}", exit);
},
Err(err) => {
if err.errno() == libc::EAGAIN {}
else {
warn!("VCPU run() returned error: {}", err);
return;
}
} }
} }
if self.shutdown.load(Ordering::Relaxed) { if self.shutdown.load(Ordering::Relaxed) {