diff --git a/.buildkite/custom-tests.json b/.buildkite/custom-tests.json index 6f581a4c..6f5415eb 100644 --- a/.buildkite/custom-tests.json +++ b/.buildkite/custom-tests.json @@ -1,61 +1,9 @@ { "tests": [ - { - "test_name": "build-gnu-mmap", - "command": "cargo build --release --features=xen", - "platform": ["x86_64", "aarch64"] - }, - { - "test_name": "build-gnu-mmap-no-xen", - "command": "cargo build --release --features=backend-mmap", - "platform": ["x86_64", "aarch64"] - }, - { - "test_name": "build-musl-mmap", - "command": "cargo build --release --features=xen --target {target_platform}-unknown-linux-musl", - "platform": ["x86_64", "aarch64"] - }, - { - "test_name": "build-musl-mmap-no-xen", - "command": "cargo build --release --features=backend-mmap --target {target_platform}-unknown-linux-musl", - "platform": ["x86_64", "aarch64"] - }, { "test_name": "miri", "command": "RUST_BACKTRACE=1 MIRIFLAGS='-Zmiri-disable-isolation -Zmiri-backtrace=full' cargo +nightly miri test --features backend-mmap,backend-bitmap", "platform": ["x86_64", "aarch64"] - }, - { - "test_name": "unittests-gnu-no-xen", - "command": "cargo test --features 'backend-bitmap backend-mmap backend-atomic' --workspace", - "platform": [ - "x86_64", - "aarch64" - ] - }, - { - "test_name": "unittests-musl-no-xen", - "command": "cargo test --features 'backend-bitmap backend-mmap backend-atomic' --workspace --target {target_platform}-unknown-linux-musl", - "platform": [ - "x86_64", - "aarch64" - ] - }, - { - "test_name": "clippy-no-xen", - "command": "cargo clippy --workspace --bins --examples --benches --features 'backend-bitmap backend-mmap backend-atomic' --all-targets -- -D warnings -D clippy::undocumented_unsafe_blocks", - "platform": [ - "x86_64", - "aarch64" - ] - }, - { - "test_name": "check-warnings-no-xen", - "command": "RUSTFLAGS=\"-D warnings\" cargo check --all-targets --features 'backend-bitmap backend-mmap backend-atomic' --workspace", - "platform": [ - "x86_64", - "aarch64" - ] } ] } diff --git a/CHANGELOG.md b/CHANGELOG.md index 53b14972..ddbbf6fe 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,6 +20,9 @@ and `GuestRegionMmap::from_range` to be separate from the error type returned by `GuestRegionCollection` functions. Change return type of `GuestRegionMmap::new` from `Result` to `Option`. - \[#324](https:////github.com/rust-vmm/vm-memory/pull/324)\] `GuestMemoryRegion::bitmap()` now returns a `BitmapSlice`. Accessing the full bitmap is now possible only if the type of the memory region is know, for example with `MmapRegion::bitmap()`. +- \[[#317](https://github.com/rust-vmm/vm-memory/pull/317)\]: Make `xen` feature additive, meaning enabling it + no longer disables the `mmap_unix` module at compile time. For this, various Xen-related structs had to be + renamed due to naming conflicts. ### Removed diff --git a/coverage_config_x86_64.json b/coverage_config_x86_64.json index 13f2dfd7..9c2ca7c0 100644 --- a/coverage_config_x86_64.json +++ b/coverage_config_x86_64.json @@ -1,5 +1,5 @@ { "coverage_score": 91.78, "exclude_path": "mmap_windows.rs", - "crate_features": "backend-mmap,backend-atomic,backend-bitmap" + "crate_features": "backend-mmap,backend-atomic,backend-bitmap,xen" } diff --git a/src/bitmap/mod.rs b/src/bitmap/mod.rs index cf1555b2..b6be2c56 100644 --- a/src/bitmap/mod.rs +++ b/src/bitmap/mod.rs @@ -219,7 +219,7 @@ pub(crate) mod tests { // them to test the mmap-based backend implementations for now. Going forward, the generic // test functions defined here can be placed in a separate module (i.e. `test_utilities`) // which is gated by a feature and can be used for testing purposes by other crates as well. - #[cfg(feature = "backend-mmap")] + #[cfg(all(feature = "backend-mmap", target_family = "unix"))] fn test_guest_memory_region(region: &R) { let dirty_addr = MemoryRegionAddress(0x0); let val = 123u64; @@ -253,7 +253,7 @@ pub(crate) mod tests { ); } - #[cfg(feature = "backend-mmap")] + #[cfg(all(feature = "backend-mmap", target_family = "unix"))] // Assumptions about M generated by f ... pub fn test_guest_memory_and_region(f: F) where diff --git a/src/bytes.rs b/src/bytes.rs index c755cfeb..6dfb9251 100644 --- a/src/bytes.rs +++ b/src/bytes.rs @@ -330,7 +330,7 @@ pub trait Bytes { /// * Read bytes from /dev/urandom (uses the `backend-mmap` feature) /// /// ``` - /// # #[cfg(all(feature = "backend-mmap", feature = "rawfd"))] + /// # #[cfg(all(feature = "backend-mmap", feature = "rawfd", target_family = "unix"))] /// # { /// # use vm_memory::{Address, GuestMemory, Bytes, GuestAddress, GuestMemoryMmap}; /// # use std::fs::File; diff --git a/src/guest_memory.rs b/src/guest_memory.rs index 39e4f10a..4b2b035d 100644 --- a/src/guest_memory.rs +++ b/src/guest_memory.rs @@ -174,7 +174,7 @@ impl FileOffset { /// # Examples (uses the `backend-mmap` and `backend-atomic` features) /// /// ``` -/// # #[cfg(feature = "backend-mmap")] +/// # #[cfg(all(feature = "backend-mmap", target_family = "unix"))] /// # { /// # use std::sync::Arc; /// # use vm_memory::{GuestAddress, GuestAddressSpace, GuestMemory, GuestMemoryMmap}; @@ -292,7 +292,7 @@ pub trait GuestMemory { /// `backend-mmap` feature) /// /// ``` - /// # #[cfg(feature = "backend-mmap")] + /// # #[cfg(all(feature = "backend-mmap", target_family = "unix"))] /// # { /// # use vm_memory::{GuestAddress, GuestMemory, GuestMemoryRegion, GuestMemoryMmap}; /// # @@ -316,7 +316,7 @@ pub trait GuestMemory { /// # Examples (uses the `backend-mmap` feature) /// /// ``` - /// # #[cfg(feature = "backend-mmap")] + /// # #[cfg(all(feature = "backend-mmap", target_family = "unix"))] /// # { /// # use vm_memory::{Address, GuestAddress, GuestMemory, GuestMemoryMmap}; /// # @@ -428,7 +428,7 @@ pub trait GuestMemory { /// # Examples (uses the `backend-mmap` feature) /// /// ``` - /// # #[cfg(feature = "backend-mmap")] + /// # #[cfg(all(feature = "backend-mmap", target_family = "unix"))] /// # { /// # use vm_memory::{GuestAddress, GuestMemory, GuestMemoryMmap}; /// # @@ -485,9 +485,9 @@ impl Bytes for T { /// * Write a slice at guestaddress 0x1000. (uses the `backend-mmap` feature) /// /// ``` - /// # #[cfg(feature = "backend-mmap")] + /// # #[cfg(all(feature = "backend-mmap", target_family = "unix"))] /// # { - /// # use vm_memory::{Bytes, GuestAddress, mmap::GuestMemoryMmap}; + /// # use vm_memory::{Bytes, GuestAddress, GuestMemoryMmap}; /// # /// # let start_addr = GuestAddress(0x1000); /// # let mut gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x400)]) @@ -513,9 +513,9 @@ impl Bytes for T { /// * Read a slice of length 16 at guestaddress 0x1000. (uses the `backend-mmap` feature) /// /// ``` - /// # #[cfg(feature = "backend-mmap")] + /// # #[cfg(all(feature = "backend-mmap", target_family = "unix"))] /// # { - /// # use vm_memory::{Bytes, GuestAddress, mmap::GuestMemoryMmap}; + /// # use vm_memory::{Bytes, GuestAddress, GuestMemoryMmap}; /// # /// let start_addr = GuestAddress(0x1000); /// let mut gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x400)]) @@ -608,27 +608,16 @@ impl Bytes for T { #[cfg(test)] mod tests { #![allow(clippy::undocumented_unsafe_blocks)] - use super::*; - #[cfg(feature = "backend-mmap")] - use crate::bytes::ByteValued; - #[cfg(feature = "backend-mmap")] - use crate::GuestAddress; + #[cfg(feature = "backend-mmap")] use std::time::{Duration, Instant}; + use super::*; + #[cfg(feature = "backend-mmap")] + use crate::mmap::tests::AnyBackend; use vmm_sys_util::tempfile::TempFile; - #[cfg(feature = "backend-mmap")] - type GuestMemoryMmap = crate::GuestMemoryMmap<()>; - - #[cfg(feature = "backend-mmap")] - fn make_image(size: u8) -> Vec { - let mut image: Vec = Vec::with_capacity(size as usize); - for i in 0..size { - image.push(i); - } - image - } + use crate::ByteValued; #[test] fn test_file_offset() { @@ -643,19 +632,29 @@ mod tests { } #[cfg(feature = "backend-mmap")] + fn make_image(size: u8) -> Vec { + let mut image: Vec = Vec::with_capacity(size as usize); + for i in 0..size { + image.push(i); + } + image + } + #[test] + #[cfg(feature = "backend-mmap")] fn checked_read_from() { let start_addr1 = GuestAddress(0x0); let start_addr2 = GuestAddress(0x40); - let mem = GuestMemoryMmap::from_ranges(&[(start_addr1, 64), (start_addr2, 64)]).unwrap(); - let image = make_image(0x80); - let offset = GuestAddress(0x30); - let count: usize = 0x20; - assert_eq!( - 0x20_usize, - mem.read_volatile_from(offset, &mut image.as_slice(), count) - .unwrap() - ); + for mem in AnyBackend::all(&[(start_addr1, 64, None), (start_addr2, 64, None)]) { + let image = make_image(0x80); + let offset = GuestAddress(0x30); + let count: usize = 0x20; + assert_eq!( + 0x20_usize, + mem.read_volatile_from(offset, &mut image.as_slice(), count) + .unwrap() + ); + } } // Runs the provided closure in a loop, until at least `duration` time units have elapsed. @@ -684,8 +683,8 @@ mod tests { // flips all the bits of the member with every write, while the reader checks that every byte // has the same value (and thus it did not do a non-atomic access). The test succeeds if // no mismatch is detected after performing accesses for a pre-determined amount of time. - #[cfg(feature = "backend-mmap")] #[cfg(not(miri))] // This test simulates a race condition between guest and vmm + #[cfg(feature = "backend-mmap")] fn non_atomic_access_helper() where T: ByteValued @@ -722,69 +721,68 @@ mod tests { // The address where we start writing/reading a Data value. let data_start = GuestAddress((region_len - mem::size_of::()) as u64); - let mem = GuestMemoryMmap::from_ranges(&[ - (start, region_len), - (start.unchecked_add(region_len as u64), region_len), - ]) - .unwrap(); - - // Need to clone this and move it into the new thread we create. - let mem2 = mem.clone(); - // Just some bytes. - let some_bytes = [1u8, 2, 4, 16, 32, 64, 128, 255]; - - let mut data = Data { - val: T::from(0u8), - some_bytes, - }; - - // Simple check that cross-region write/read is ok. - mem.write_obj(data, data_start).unwrap(); - let read_data = mem.read_obj::>(data_start).unwrap(); - assert_eq!(read_data, data); - - let t = thread::spawn(move || { - let mut count: u64 = 0; - - loop_timed(Duration::from_secs(3), || { - let data = mem2.read_obj::>(data_start).unwrap(); - - // Every time data is written to memory by the other thread, the value of - // data.val alternates between 0 and T::MAX, so the inner bytes should always - // have the same value. If they don't match, it means we read a partial value, - // so the access was not atomic. - let bytes = data.val.into().to_le_bytes(); - for i in 1..mem::size_of::() { - if bytes[0] != bytes[i] { - panic!( - "val bytes don't match {:?} after {} iterations", - &bytes[..mem::size_of::()], - count - ); + for mem in AnyBackend::all(&[ + (start, region_len, None), + (start.unchecked_add(region_len as u64), region_len, None), + ]) { + // Need to clone this and move it into the new thread we create. + let mem2 = mem.clone(); + // Just some bytes. + let some_bytes = [1u8, 2, 4, 16, 32, 64, 128, 255]; + + let mut data = Data { + val: T::from(0u8), + some_bytes, + }; + + // Simple check that cross-region write/read is ok. + mem.write_obj(data, data_start).unwrap(); + let read_data = mem.read_obj::>(data_start).unwrap(); + assert_eq!(read_data, data); + + let t = thread::spawn(move || { + let mut count: u64 = 0; + + loop_timed(Duration::from_secs(3), || { + let data = mem2.read_obj::>(data_start).unwrap(); + + // Every time data is written to memory by the other thread, the value of + // data.val alternates between 0 and T::MAX, so the inner bytes should always + // have the same value. If they don't match, it means we read a partial value, + // so the access was not atomic. + let bytes = data.val.into().to_le_bytes(); + for i in 1..mem::size_of::() { + if bytes[0] != bytes[i] { + panic!( + "val bytes don't match {:?} after {} iterations", + &bytes[..mem::size_of::()], + count + ); + } } - } - count += 1; + count += 1; + }); }); - }); - // Write the object while flipping the bits of data.val over and over again. - loop_timed(Duration::from_secs(3), || { - mem.write_obj(data, data_start).unwrap(); - data.val = !data.val; - }); + // Write the object while flipping the bits of data.val over and over again. + loop_timed(Duration::from_secs(3), || { + mem.write_obj(data, data_start).unwrap(); + data.val = !data.val; + }); - t.join().unwrap() + t.join().unwrap() + } } - #[cfg(feature = "backend-mmap")] #[test] #[cfg(not(miri))] + #[cfg(feature = "backend-mmap")] fn test_non_atomic_access() { non_atomic_access_helper::() } - #[cfg(feature = "backend-mmap")] #[test] + #[cfg(feature = "backend-mmap")] fn test_zero_length_accesses() { #[derive(Default, Clone, Copy)] #[repr(C)] @@ -795,57 +793,49 @@ mod tests { unsafe impl ByteValued for ZeroSizedStruct {} let addr = GuestAddress(0x1000); - let mem = GuestMemoryMmap::from_ranges(&[(addr, 0x1000)]).unwrap(); - let obj = ZeroSizedStruct::default(); - let mut image = make_image(0x80); - - assert_eq!(mem.write(&[], addr).unwrap(), 0); - assert_eq!(mem.read(&mut [], addr).unwrap(), 0); - - assert!(mem.write_slice(&[], addr).is_ok()); - assert!(mem.read_slice(&mut [], addr).is_ok()); - - assert!(mem.write_obj(obj, addr).is_ok()); - assert!(mem.read_obj::(addr).is_ok()); - - assert_eq!( - mem.read_volatile_from(addr, &mut image.as_slice(), 0) - .unwrap(), - 0 - ); - - assert!(mem - .read_exact_volatile_from(addr, &mut image.as_slice(), 0) - .is_ok()); - - assert_eq!( - mem.write_volatile_to(addr, &mut image.as_mut_slice(), 0) - .unwrap(), - 0 - ); - - assert!(mem - .write_all_volatile_to(addr, &mut image.as_mut_slice(), 0) - .is_ok()); - } - - #[cfg(feature = "backend-mmap")] - #[test] - fn test_atomic_accesses() { - let addr = GuestAddress(0x1000); - let mem = GuestMemoryMmap::from_ranges(&[(addr, 0x1000)]).unwrap(); - let bad_addr = addr.unchecked_add(0x1000); - - crate::bytes::tests::check_atomic_accesses(mem, addr, bad_addr); + for mem in AnyBackend::all(&[(addr, 0x1000, None)]) { + let obj = ZeroSizedStruct::default(); + let mut image = make_image(0x80); + + assert_eq!(mem.write(&[], addr).unwrap(), 0); + assert_eq!(mem.read(&mut [], addr).unwrap(), 0); + + assert!(mem.write_slice(&[], addr).is_ok()); + assert!(mem.read_slice(&mut [], addr).is_ok()); + + assert!(mem.write_obj(obj, addr).is_ok()); + assert!(mem.read_obj::(addr).is_ok()); + + assert_eq!( + mem.read_volatile_from(addr, &mut image.as_slice(), 0) + .unwrap(), + 0 + ); + + assert!(mem + .read_exact_volatile_from(addr, &mut image.as_slice(), 0) + .is_ok()); + + assert_eq!( + mem.write_volatile_to(addr, &mut image.as_mut_slice(), 0) + .unwrap(), + 0 + ); + + assert!(mem + .write_all_volatile_to(addr, &mut image.as_mut_slice(), 0) + .is_ok()); + } } - #[cfg(feature = "backend-mmap")] #[cfg(target_os = "linux")] #[test] + #[cfg(feature = "backend-mmap")] fn test_guest_memory_mmap_is_hugetlbfs() { let addr = GuestAddress(0x1000); - let mem = GuestMemoryMmap::from_ranges(&[(addr, 0x1000)]).unwrap(); - let r = mem.find_region(addr).unwrap(); - assert_eq!(r.is_hugetlbfs(), None); + for mem in AnyBackend::all(&[(addr, 0x1000, None)]) { + let r = mem.find_region(addr).unwrap(); + assert_eq!(r.is_hugetlbfs(), None); + } } } diff --git a/src/lib.rs b/src/lib.rs index 2f87f4c8..8ab116ec 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -61,10 +61,20 @@ pub use io::{ReadVolatile, WriteVolatile}; #[cfg(feature = "backend-mmap")] pub mod mmap; -#[cfg(feature = "backend-mmap")] -pub use mmap::{GuestMemoryMmap, GuestRegionMmap, MmapRegion}; -#[cfg(all(feature = "backend-mmap", feature = "xen", target_family = "unix"))] -pub use mmap::{MmapRange, MmapXenFlags}; +#[cfg(all(feature = "xen", target_family = "unix"))] +pub use mmap::xen::{GuestMemoryXen, MmapRange, MmapRegion as MmapRegionXen, MmapXenFlags}; + +#[cfg(all(feature = "backend-mmap", target_family = "unix"))] +pub use mmap::unix::{ + Error as MmapRegionError, GuestMemoryMmap, GuestRegionMmap, MmapRegion, MmapRegionBuilder, +}; + +#[cfg(windows)] +pub use crate::mmap::windows::{ + GuestMemoryWindows as GuestMemoryMmap, GuestRegionWindows as GuestRegionMmap, MmapRegion, +}; // rename for backwards compat +#[cfg(windows)] +pub use std::io::Error as MmapRegionError; pub mod volatile_memory; pub use volatile_memory::{ diff --git a/src/mmap/mod.rs b/src/mmap/mod.rs index b5baaf8a..9a922e46 100644 --- a/src/mmap/mod.rs +++ b/src/mmap/mod.rs @@ -12,29 +12,17 @@ //! //! This implementation is mmap-ing the memory of the guest into the current process. -use std::borrow::Borrow; -use std::ops::Deref; -use std::result; - -use crate::address::Address; -use crate::bitmap::{Bitmap, BS}; -use crate::guest_memory::{self, FileOffset, GuestAddress, GuestUsize, MemoryRegionAddress}; -use crate::region::{ - GuestMemoryRegion, GuestMemoryRegionBytes, GuestRegionCollection, GuestRegionCollectionError, -}; -use crate::volatile_memory::{VolatileMemory, VolatileSlice}; - // re-export for backward compat, as the trait used to be defined in mmap.rs pub use crate::bitmap::NewBitmap; -#[cfg(all(not(feature = "xen"), target_family = "unix"))] -mod unix; +#[cfg(target_family = "unix")] +pub(super) mod unix; #[cfg(all(feature = "xen", target_family = "unix"))] -pub(crate) mod xen; +pub(super) mod xen; #[cfg(target_family = "windows")] -mod windows; +pub(super) mod windows; #[cfg(all(not(feature = "xen"), target_family = "unix"))] pub use unix::{Error as MmapRegionError, MmapRegion, MmapRegionBuilder}; @@ -47,205 +35,202 @@ pub use std::io::Error as MmapRegionError; #[cfg(target_family = "windows")] pub use windows::MmapRegion; -/// [`GuestMemoryRegion`](trait.GuestMemoryRegion.html) implementation that mmaps the guest's -/// memory region in the current process. -/// -/// Represents a continuous region of the guest's physical memory that is backed by a mapping -/// in the virtual address space of the calling process. -#[derive(Debug)] -pub struct GuestRegionMmap { - mapping: MmapRegion, - guest_base: GuestAddress, -} - -impl Deref for GuestRegionMmap { - type Target = MmapRegion; - - fn deref(&self) -> &MmapRegion { - &self.mapping - } -} - -impl GuestRegionMmap { - /// Create a new memory-mapped memory region for the guest's physical memory. - /// - /// Returns `None` if `guest_base` + `mapping.len()` would overflow. - pub fn new(mapping: MmapRegion, guest_base: GuestAddress) -> Option { - guest_base - .0 - .checked_add(mapping.size() as u64) - .map(|_| Self { - mapping, - guest_base, - }) - } -} +#[cfg(test)] +pub(crate) mod tests { + #![allow(clippy::undocumented_unsafe_blocks)] + extern crate vmm_sys_util; -#[cfg(not(feature = "xen"))] -impl GuestRegionMmap { - /// Create a new memory-mapped memory region from guest's physical memory, size and file. - pub fn from_range( - addr: GuestAddress, - size: usize, - file: Option, - ) -> result::Result { - let region = if let Some(ref f_off) = file { - MmapRegion::from_file(f_off.clone(), size)? - } else { - MmapRegion::new(size)? - }; + use crate::bitmap::BS; + use crate::{ + guest_memory, Address, Bytes, FileOffset, GuestAddress, GuestMemory, GuestMemoryError, + GuestMemoryRegion, GuestMemoryRegionBytes, GuestUsize, MemoryRegionAddress, MmapRegion, + VolatileMemory, VolatileSlice, + }; - Self::new(region, addr).ok_or(FromRangesError::InvalidGuestRegion) - } -} + use std::io::Write; + #[cfg(feature = "rawfd")] + use std::{fs::File, path::Path}; + use std::sync::Arc; + use vmm_sys_util::tempfile::TempFile; -#[cfg(feature = "xen")] -impl GuestRegionMmap { - /// Create a new Unix memory-mapped memory region from guest's physical memory, size and file. - /// This must only be used for tests, doctests, benches and is not designed for end consumers. - pub fn from_range( - addr: GuestAddress, - size: usize, - file: Option, - ) -> result::Result { - let range = MmapRange::new_unix(size, file, addr); - - let region = MmapRegion::from_range(range)?; - Self::new(region, addr).ok_or(FromRangesError::InvalidGuestRegion) - } -} + macro_rules! any_backend { + ($($(#[$attr:meta])* $backend: ident[$region: path]), *) => { + #[derive(Debug)] + pub enum AnyRegion { + $( + $(#[$attr])* $backend($region) + ),* + } -impl GuestMemoryRegion for GuestRegionMmap { - type B = B; + pub type AnyBackend = $crate::GuestRegionCollection; + + impl $crate::GuestMemoryRegion for AnyRegion { + type B = (); + + fn len(&self) -> GuestUsize { + match self { + $($(#[$attr])* AnyRegion::$backend(inner) => $crate::GuestMemoryRegion::len(inner)),* + } + } + + fn start_addr(&self) -> GuestAddress { + match self { + $($(#[$attr])* AnyRegion::$backend(inner) => inner.start_addr()),* + } + } + + fn bitmap(&self) {} + + fn get_host_address(&self, addr: MemoryRegionAddress) -> guest_memory::Result<*mut u8> { + match self { + $($(#[$attr])* AnyRegion::$backend(inner) => inner.get_host_address(addr)),* + } + } + + fn file_offset(&self) -> Option<&FileOffset> { + match self { + $($(#[$attr])* AnyRegion::$backend(inner) => inner.file_offset()),* + } + } + + fn get_slice(&self, offset: MemoryRegionAddress, count: usize) -> guest_memory::Result>> { + match self { + $($(#[$attr])* AnyRegion::$backend(inner) => $crate::GuestMemoryRegion::get_slice(inner, offset, count)),* + } + } + } - fn len(&self) -> GuestUsize { - self.mapping.size() as GuestUsize + impl GuestMemoryRegionBytes for AnyRegion {} + }; } - fn start_addr(&self) -> GuestAddress { - self.guest_base + any_backend! { + #[cfg(all(windows, feature = "backend-mmap"))] + Windows[crate::mmap::windows::GuestRegionWindows<()>], + #[cfg(all(unix, feature = "backend-mmap"))] + Mmap[crate::mmap::unix::GuestRegionMmap<()>], + #[cfg(all(unix, feature = "backend-mmap", feature = "xen"))] + Xen[crate::mmap::xen::MmapRegion] } - fn bitmap(&self) -> BS<'_, Self::B> { - self.mapping.bitmap().slice_at(0) - } + // The cfgs make using vec![...] instead more unreadable, so suppress the lint here. + #[allow(clippy::vec_init_then_push)] + impl AnyRegion { + fn all_with_file(addr: GuestAddress, size: usize, f_off: &FileOffset) -> Vec { + let mut regions = Vec::new(); + #[cfg(all(windows, feature = "backend-mmap"))] + regions.push(AnyRegion::Windows( + crate::mmap_windows::GuestRegionWindows::new( + crate::mmap_windows::MmapRegion::from_file(f_off.clone(), size).unwrap(), + addr, + ) + .unwrap(), + )); + #[cfg(all(unix, feature = "backend-mmap"))] + regions.push(AnyRegion::Mmap( + crate::mmap::unix::GuestRegionMmap::new( + MmapRegion::from_file(f_off.clone(), size).unwrap(), + addr, + ) + .unwrap(), + )); + #[cfg(all(unix, feature = "backend-mmap", feature = "xen"))] + regions.push(AnyRegion::Xen( + crate::MmapRegionXen::from_range(crate::MmapRange::new_unix( + size, + Some(f_off.clone()), + addr, + )) + .unwrap(), + )); + regions + } - fn get_host_address(&self, addr: MemoryRegionAddress) -> guest_memory::Result<*mut u8> { - // Not sure why wrapping_offset is not unsafe. Anyway this - // is safe because we've just range-checked addr using check_address. - self.check_address(addr) - .ok_or(guest_memory::Error::InvalidBackendAddress) - .map(|addr| { - self.mapping - .as_ptr() - .wrapping_offset(addr.raw_value() as isize) - }) - } + fn all(addr: GuestAddress, size: usize, file: &Option) -> Vec { + let mut regions = if let Some(file) = file { + Self::all_with_file(addr, size, file) + } else { + Vec::new() + }; + #[cfg(all(windows, feature = "backend-mmap"))] + regions.push(AnyRegion::Windows( + crate::mmap_windows::GuestRegionWindows::new( + crate::mmap::windows::MmapRegion::new(size).unwrap(), + addr, + ) + .unwrap(), + )); + #[cfg(all(unix, feature = "backend-mmap"))] + regions.push(AnyRegion::Mmap( + crate::mmap::unix::GuestRegionMmap::new(MmapRegion::new(size).unwrap(), addr) + .unwrap(), + )); + #[cfg(all(unix, feature = "backend-mmap", feature = "xen"))] + regions.push(AnyRegion::Xen( + crate::MmapRegionXen::from_range(crate::MmapRange::new_unix(size, None, addr)) + .unwrap(), + )); + regions + } - fn file_offset(&self) -> Option<&FileOffset> { - self.mapping.file_offset() + fn as_ptr(&self) -> *mut u8 { + self.get_host_address(MemoryRegionAddress(0)).unwrap() + } } - fn get_slice( - &self, - offset: MemoryRegionAddress, - count: usize, - ) -> guest_memory::Result>> { - let slice = self.mapping.get_slice(offset.raw_value() as usize, count)?; - Ok(slice) + fn transpose(striped: Vec>) -> Vec { + assert!(!striped.is_empty(), "No test cases found!"); + + let mut backends = (0..striped[0].len()) + .map(|_| AnyBackend::default()) + .collect::>(); + for stripe in striped { + backends + .iter_mut() + .zip(stripe) + .for_each(|(backend, region)| { + *backend = backend.insert_region(Arc::new(region)).unwrap(); + }); + } + backends } - #[cfg(target_os = "linux")] - fn is_hugetlbfs(&self) -> Option { - self.mapping.is_hugetlbfs() - } -} + impl AnyBackend { + fn all_with_file(regions: &[(GuestAddress, usize, FileOffset)]) -> Vec { + let striped = regions + .iter() + .map(|(addr, size, file)| AnyRegion::all_with_file(*addr, *size, file)) + .collect::>(); -impl GuestMemoryRegionBytes for GuestRegionMmap {} - -/// [`GuestMemory`](trait.GuestMemory.html) implementation that mmaps the guest's memory -/// in the current process. -/// -/// Represents the entire physical memory of the guest by tracking all its memory regions. -/// Each region is an instance of `GuestRegionMmap`, being backed by a mapping in the -/// virtual address space of the calling process. -pub type GuestMemoryMmap = GuestRegionCollection>; - -/// Errors that can happen during [`GuestMemoryMap::from_ranges`] and related functions. -#[derive(Debug, thiserror::Error)] -pub enum FromRangesError { - /// Error during construction of [`GuestMemoryMmap`] - #[error("Error constructing guest region collection: {0}")] - Collection(#[from] GuestRegionCollectionError), - /// Error while allocating raw mmap region - #[error("Error setting up raw memory for guest region: {0}")] - MmapRegion(#[from] MmapRegionError), - /// A combination of region length and guest address would overflow. - #[error("Combination of guest address and region length invalid (would overflow)")] - InvalidGuestRegion, -} + transpose(striped) + } -impl GuestMemoryMmap { - /// Creates a container and allocates anonymous memory for guest memory regions. - /// - /// Valid memory regions are specified as a slice of (Address, Size) tuples sorted by Address. - pub fn from_ranges(ranges: &[(GuestAddress, usize)]) -> result::Result { - Self::from_ranges_with_files(ranges.iter().map(|r| (r.0, r.1, None))) - } + pub(crate) fn all( + regions: &[(GuestAddress, usize, Option)], + ) -> Vec { + let striped = regions + .iter() + .map(|(addr, size, file)| AnyRegion::all(*addr, *size, file)) + .collect::>(); - /// Creates a container and allocates anonymous memory for guest memory regions. - /// - /// Valid memory regions are specified as a sequence of (Address, Size, [`Option`]) - /// tuples sorted by Address. - pub fn from_ranges_with_files(ranges: T) -> result::Result - where - A: Borrow<(GuestAddress, usize, Option)>, - T: IntoIterator, - { - Self::from_regions( - ranges - .into_iter() - .map(|x| { - GuestRegionMmap::from_range(x.borrow().0, x.borrow().1, x.borrow().2.clone()) - }) - .collect::, _>>()?, - ) - .map_err(Into::into) + transpose(striped) + } } -} - -#[cfg(test)] -mod tests { - #![allow(clippy::undocumented_unsafe_blocks)] - extern crate vmm_sys_util; - - use super::*; - - #[cfg(feature = "backend-bitmap")] - use crate::bitmap::AtomicBitmap; - use crate::{Bytes, GuestMemory, GuestMemoryError}; - - use std::io::Write; - #[cfg(feature = "rawfd")] - use std::{fs::File, path::Path}; - use vmm_sys_util::tempfile::TempFile; - - type GuestRegionMmap = super::GuestRegionMmap<()>; - type GuestMemoryMmap = super::GuestRegionCollection; - type MmapRegion = super::MmapRegion<()>; #[test] fn basic_map() { - let m = MmapRegion::new(1024).unwrap(); - assert_eq!(1024, m.size()); + for m in AnyRegion::all(GuestAddress(0), 1024, &None) { + assert_eq!(1024, m.len()); + } } #[test] fn slice_addr() { - let m = GuestRegionMmap::from_range(GuestAddress(0), 5, None).unwrap(); - let s = m.get_slice(MemoryRegionAddress(2), 3).unwrap(); - let guard = s.ptr_guard(); - assert_eq!(guard.as_ptr(), unsafe { m.as_ptr().offset(2) }); + for m in AnyRegion::all(GuestAddress(0), 5, &None) { + let s = m.get_slice(MemoryRegionAddress(2), 3).unwrap(); + let guard = s.ptr_guard(); + assert_eq!(guard.as_ptr(), unsafe { m.as_ptr().offset(2) }); + } } #[test] @@ -255,14 +240,15 @@ mod tests { let sample_buf = &[1, 2, 3, 4, 5]; assert!(f.write_all(sample_buf).is_ok()); - let file = Some(FileOffset::new(f, 0)); - let mem_map = GuestRegionMmap::from_range(GuestAddress(0), sample_buf.len(), file).unwrap(); - let buf = &mut [0u8; 16]; - assert_eq!( - mem_map.as_volatile_slice().unwrap().read(buf, 0).unwrap(), - sample_buf.len() - ); - assert_eq!(buf[0..sample_buf.len()], sample_buf[..]); + let file = FileOffset::new(f, 0); + for mem_map in AnyRegion::all_with_file(GuestAddress(0), sample_buf.len(), &file) { + let buf = &mut [0u8; 16]; + assert_eq!( + mem_map.as_volatile_slice().unwrap().read(buf, 0).unwrap(), + sample_buf.len() + ); + assert_eq!(buf[0..sample_buf.len()], sample_buf[..]); + } } #[test] @@ -274,20 +260,14 @@ mod tests { let start_addr1 = GuestAddress(0x0); let start_addr2 = GuestAddress(0x800); - let guest_mem = - GuestMemoryMmap::from_ranges(&[(start_addr1, 0x400), (start_addr2, 0x400)]).unwrap(); - let guest_mem_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[ + for guest_mem in AnyBackend::all(&[ (start_addr1, 0x400, Some(FileOffset::new(f1, 0))), (start_addr2, 0x400, Some(FileOffset::new(f2, 0))), - ]) - .unwrap(); - - let guest_mem_list = [guest_mem, guest_mem_backed_by_file]; - for guest_mem in guest_mem_list.iter() { + ]) { assert!(guest_mem.to_region_addr(GuestAddress(0x600)).is_none()); let (r0, addr0) = guest_mem.to_region_addr(GuestAddress(0x800)).unwrap(); let (r1, addr1) = guest_mem.to_region_addr(GuestAddress(0xa00)).unwrap(); - assert!(r0.as_ptr() == r1.as_ptr()); + assert_eq!(r0.as_ptr(), r1.as_ptr()); assert_eq!(addr0, MemoryRegionAddress(0)); assert_eq!(addr1, MemoryRegionAddress(0x200)); } @@ -302,16 +282,10 @@ mod tests { let start_addr1 = GuestAddress(0x0); let start_addr2 = GuestAddress(0x800); - let guest_mem = - GuestMemoryMmap::from_ranges(&[(start_addr1, 0x400), (start_addr2, 0x400)]).unwrap(); - let guest_mem_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[ + for guest_mem in AnyBackend::all(&[ (start_addr1, 0x400, Some(FileOffset::new(f1, 0))), (start_addr2, 0x400, Some(FileOffset::new(f2, 0))), - ]) - .unwrap(); - - let guest_mem_list = [guest_mem, guest_mem_backed_by_file]; - for guest_mem in guest_mem_list.iter() { + ]) { assert!(guest_mem.get_host_address(GuestAddress(0x600)).is_err()); let ptr0 = guest_mem.get_host_address(GuestAddress(0x800)).unwrap(); let ptr1 = guest_mem.get_host_address(GuestAddress(0xa00)).unwrap(); @@ -329,16 +303,7 @@ mod tests { f.set_len(0x400).unwrap(); let start_addr = GuestAddress(0x0); - let guest_mem = GuestMemoryMmap::from_ranges(&[(start_addr, 0x400)]).unwrap(); - let guest_mem_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[( - start_addr, - 0x400, - Some(FileOffset::new(f, 0)), - )]) - .unwrap(); - - let guest_mem_list = [guest_mem, guest_mem_backed_by_file]; - for guest_mem in guest_mem_list.iter() { + for guest_mem in AnyBackend::all(&[(start_addr, 0x400, Some(FileOffset::new(f, 0)))]) { let sample_buf = &[1, 2, 3, 4, 5]; assert_eq!(guest_mem.write(sample_buf, start_addr).unwrap(), 5); @@ -367,16 +332,10 @@ mod tests { let bad_addr2 = GuestAddress(0x1ffc); let max_addr = GuestAddress(0x2000); - let gm = - GuestMemoryMmap::from_ranges(&[(start_addr1, 0x1000), (start_addr2, 0x1000)]).unwrap(); - let gm_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[ + for gm in AnyBackend::all(&[ (start_addr1, 0x1000, Some(FileOffset::new(f1, 0))), (start_addr2, 0x1000, Some(FileOffset::new(f2, 0))), - ]) - .unwrap(); - - let gm_list = [gm, gm_backed_by_file]; - for gm in gm_list.iter() { + ]) { let val1: u64 = 0xaa55_aa55_aa55_aa55; let val2: u64 = 0x55aa_55aa_55aa_55aa; assert!(matches!( @@ -402,16 +361,7 @@ mod tests { f.set_len(0x400).unwrap(); let mut start_addr = GuestAddress(0x1000); - let gm = GuestMemoryMmap::from_ranges(&[(start_addr, 0x400)]).unwrap(); - let gm_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[( - start_addr, - 0x400, - Some(FileOffset::new(f, 0)), - )]) - .unwrap(); - - let gm_list = [gm, gm_backed_by_file]; - for gm in gm_list.iter() { + for gm in AnyBackend::all(&[(start_addr, 0x400, Some(FileOffset::new(f, 0)))]) { let sample_buf = &[1, 2, 3, 4, 5]; assert_eq!(gm.write(sample_buf, start_addr).unwrap(), 5); @@ -436,22 +386,9 @@ mod tests { let f = TempFile::new().unwrap().into_file(); f.set_len(0x400).unwrap(); - let gm = GuestMemoryMmap::from_ranges(&[(GuestAddress(0x1000), 0x400)]).unwrap(); - let gm_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[( - GuestAddress(0x1000), - 0x400, - Some(FileOffset::new(f, 0)), - )]) - .unwrap(); - - let gm_list = [gm, gm_backed_by_file]; - for gm in gm_list.iter() { + for gm in AnyBackend::all(&[(GuestAddress(0x1000), 0x400, Some(FileOffset::new(f, 0)))]) { let addr = GuestAddress(0x1010); - let mut file = if cfg!(target_family = "unix") { - File::open(Path::new("/dev/zero")).unwrap() - } else { - File::open(Path::new("c:\\Windows\\system32\\ntoskrnl.exe")).unwrap() - }; + let mut file = File::open(Path::new("/dev/zero")).unwrap(); gm.write_obj(!0u32, addr).unwrap(); gm.read_exact_volatile_from(addr, &mut file, mem::size_of::()) .unwrap(); @@ -465,11 +402,7 @@ mod tests { let mut sink = vec![0; mem::size_of::()]; gm.write_all_volatile_to(addr, &mut sink.as_mut_slice(), mem::size_of::()) .unwrap(); - if cfg!(target_family = "unix") { - assert_eq!(sink, vec![0; mem::size_of::()]); - } else { - assert_eq!(sink, vec![0x4d, 0x5a, 0x90, 0x00]); - }; + assert_eq!(sink, vec![0; mem::size_of::()]); } } @@ -482,16 +415,10 @@ mod tests { let start_addr1 = GuestAddress(0x0); let start_addr2 = GuestAddress(0x1000); - let gm = - GuestMemoryMmap::from_ranges(&[(start_addr1, 0x1000), (start_addr2, 0x1000)]).unwrap(); - let gm_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[ + for gm in AnyBackend::all(&[ (start_addr1, 0x1000, Some(FileOffset::new(f1, 0))), (start_addr2, 0x1000, Some(FileOffset::new(f2, 0))), - ]) - .unwrap(); - - let gm_list = [gm, gm_backed_by_file]; - for gm in gm_list.iter() { + ]) { let sample_buf = &[1, 2, 3, 4, 5]; assert_eq!(gm.write(sample_buf, GuestAddress(0xffc)).unwrap(), 5); let buf = &mut [0u8; 5]; @@ -506,20 +433,11 @@ mod tests { f.set_len(0x400).unwrap(); let start_addr = GuestAddress(0x0); - let gm = GuestMemoryMmap::from_ranges(&[(start_addr, 0x400)]).unwrap(); - assert!(gm.find_region(start_addr).is_some()); - let region = gm.find_region(start_addr).unwrap(); - assert!(region.file_offset().is_none()); - - let gm = GuestMemoryMmap::from_ranges_with_files(&[( - start_addr, - 0x400, - Some(FileOffset::new(f, 0)), - )]) - .unwrap(); - assert!(gm.find_region(start_addr).is_some()); - let region = gm.find_region(start_addr).unwrap(); - assert!(region.file_offset().is_some()); + for gm in AnyBackend::all_with_file(&[(start_addr, 0x400, FileOffset::new(f, 0))]) { + assert!(gm.find_region(start_addr).is_some()); + let region = gm.find_region(start_addr).unwrap(); + assert!(region.file_offset().is_some()); + } } // Windows needs a dedicated test where it will retrieve the allocation @@ -535,110 +453,94 @@ mod tests { let offset = 0x1000; let start_addr = GuestAddress(0x0); - let gm = GuestMemoryMmap::from_ranges(&[(start_addr, 0x400)]).unwrap(); - assert!(gm.find_region(start_addr).is_some()); - let region = gm.find_region(start_addr).unwrap(); - assert!(region.file_offset().is_none()); - - let gm = GuestMemoryMmap::from_ranges_with_files(&[( - start_addr, - 0x400, - Some(FileOffset::new(f, offset)), - )]) - .unwrap(); - assert!(gm.find_region(start_addr).is_some()); - let region = gm.find_region(start_addr).unwrap(); - assert!(region.file_offset().is_some()); - assert_eq!(region.file_offset().unwrap().start(), offset); + + for gm in AnyBackend::all_with_file(&[(start_addr, 0x400, FileOffset::new(f, offset))]) { + assert!(gm.find_region(start_addr).is_some()); + let region = gm.find_region(start_addr).unwrap(); + assert!(region.file_offset().is_some()); + assert_eq!(region.file_offset().unwrap().start(), offset); + } } #[test] fn test_guest_memory_mmap_get_slice() { - let region = GuestRegionMmap::from_range(GuestAddress(0), 0x400, None).unwrap(); - - // Normal case. - let slice_addr = MemoryRegionAddress(0x100); - let slice_size = 0x200; - let slice = region.get_slice(slice_addr, slice_size).unwrap(); - assert_eq!(slice.len(), slice_size); - - // Empty slice. - let slice_addr = MemoryRegionAddress(0x200); - let slice_size = 0x0; - let slice = region.get_slice(slice_addr, slice_size).unwrap(); - assert!(slice.is_empty()); - - // Error case when slice_size is beyond the boundary. - let slice_addr = MemoryRegionAddress(0x300); - let slice_size = 0x200; - assert!(region.get_slice(slice_addr, slice_size).is_err()); + for region in AnyRegion::all(GuestAddress(0), 0x400, &None) { + // Normal case. + let slice_addr = MemoryRegionAddress(0x100); + let slice_size = 0x200; + let slice = region.get_slice(slice_addr, slice_size).unwrap(); + assert_eq!(slice.len(), slice_size); + + // Empty slice. + let slice_addr = MemoryRegionAddress(0x200); + let slice_size = 0x0; + let slice = region.get_slice(slice_addr, slice_size).unwrap(); + assert!(slice.is_empty()); + + // Error case when slice_size is beyond the boundary. + let slice_addr = MemoryRegionAddress(0x300); + let slice_size = 0x200; + assert!(region.get_slice(slice_addr, slice_size).is_err()); + } } #[test] fn test_guest_memory_mmap_as_volatile_slice() { let region_size = 0x400; - let region = GuestRegionMmap::from_range(GuestAddress(0), region_size, None).unwrap(); - // Test slice length. - let slice = region.as_volatile_slice().unwrap(); - assert_eq!(slice.len(), region_size); + for region in AnyRegion::all(GuestAddress(0), region_size, &None) { + // Test slice length. + let slice = region.as_volatile_slice().unwrap(); + assert_eq!(slice.len(), region_size); - // Test slice data. - let v = 0x1234_5678u32; - let r = slice.get_ref::(0x200).unwrap(); - r.store(v); - assert_eq!(r.load(), v); + // Test slice data. + let v = 0x1234_5678u32; + let r = slice.get_ref::(0x200).unwrap(); + r.store(v); + assert_eq!(r.load(), v); + } } #[test] fn test_guest_memory_get_slice() { let start_addr1 = GuestAddress(0); let start_addr2 = GuestAddress(0x800); - let guest_mem = - GuestMemoryMmap::from_ranges(&[(start_addr1, 0x400), (start_addr2, 0x400)]).unwrap(); - - // Normal cases. - let slice_size = 0x200; - let slice = guest_mem - .get_slice(GuestAddress(0x100), slice_size) - .unwrap(); - assert_eq!(slice.len(), slice_size); - - let slice_size = 0x400; - let slice = guest_mem - .get_slice(GuestAddress(0x800), slice_size) - .unwrap(); - assert_eq!(slice.len(), slice_size); - - // Empty slice. - assert!(guest_mem - .get_slice(GuestAddress(0x900), 0) - .unwrap() - .is_empty()); - - // Error cases, wrong size or base address. - assert!(guest_mem.get_slice(GuestAddress(0), 0x500).is_err()); - assert!(guest_mem.get_slice(GuestAddress(0x600), 0x100).is_err()); - assert!(guest_mem.get_slice(GuestAddress(0xc00), 0x100).is_err()); - } + for guest_mem in AnyBackend::all(&[(start_addr1, 0x400, None), (start_addr2, 0x400, None)]) + { + // Normal cases. + let slice_size = 0x200; + let slice = guest_mem + .get_slice(GuestAddress(0x100), slice_size) + .unwrap(); + assert_eq!(slice.len(), slice_size); - #[test] - fn test_atomic_accesses() { - let region = GuestRegionMmap::from_range(GuestAddress(0), 0x1000, None).unwrap(); + let slice_size = 0x400; + let slice = guest_mem + .get_slice(GuestAddress(0x800), slice_size) + .unwrap(); + assert_eq!(slice.len(), slice_size); + + // Empty slice. + assert!(guest_mem + .get_slice(GuestAddress(0x900), 0) + .unwrap() + .is_empty()); - crate::bytes::tests::check_atomic_accesses( - region, - MemoryRegionAddress(0), - MemoryRegionAddress(0x1000), - ); + // Error cases, wrong size or base address. + assert!(guest_mem.get_slice(GuestAddress(0), 0x500).is_err()); + assert!(guest_mem.get_slice(GuestAddress(0x600), 0x100).is_err()); + assert!(guest_mem.get_slice(GuestAddress(0xc00), 0x100).is_err()); + } } #[test] - #[cfg(feature = "backend-bitmap")] - fn test_dirty_tracking() { - crate::bitmap::tests::test_guest_memory_and_region(|| { - crate::GuestMemoryMmap::::from_ranges(&[(GuestAddress(0), 0x1_0000)]) - .unwrap() - }); + fn test_atomic_accesses() { + for region in AnyRegion::all(GuestAddress(0), 0x1000, &None) { + crate::bytes::tests::check_atomic_accesses( + region, + MemoryRegionAddress(0), + MemoryRegionAddress(0x1000), + ); + } } } diff --git a/src/mmap/unix.rs b/src/mmap/unix.rs index 7756b428..280b79bc 100644 --- a/src/mmap/unix.rs +++ b/src/mmap/unix.rs @@ -10,7 +10,9 @@ //! Helper structure for working with mmaped memory regions in Unix. +use std::borrow::Borrow; use std::io; +use std::ops::Deref; use std::os::unix::io::AsRawFd; use std::ptr::null_mut; use std::result; @@ -18,6 +20,10 @@ use std::result; use crate::bitmap::{Bitmap, NewBitmap, BS}; use crate::guest_memory::FileOffset; use crate::volatile_memory::{self, VolatileMemory, VolatileSlice}; +use crate::{ + guest_memory, Address, GuestAddress, GuestMemoryRegion, GuestMemoryRegionBytes, + GuestRegionCollection, GuestRegionCollectionError, GuestUsize, MemoryRegionAddress, +}; /// Error conditions that may arise when creating a new `MmapRegion` object. #[derive(Debug, thiserror::Error)] @@ -211,7 +217,7 @@ impl MmapRegionBuilder { /// When running a 64-bit virtual machine on a 32-bit hypervisor, only part of the guest's /// physical memory may be mapped into the current process due to the limited virtual address /// space size of the process. -#[derive(Debug)] +#[derive(Clone, Debug)] pub struct MmapRegion { addr: *mut u8, size: usize, @@ -432,6 +438,154 @@ impl Drop for MmapRegion { } } +/// [`GuestMemoryRegion`](trait.GuestMemoryRegion.html) implementation that mmaps the guest's +/// memory region in the current process. +/// +/// Represents a continuous region of the guest's physical memory that is backed by a mapping +/// in the virtual address space of the calling process. +#[derive(Debug)] +pub struct GuestRegionMmap { + mapping: MmapRegion, + guest_base: GuestAddress, +} + +impl Deref for GuestRegionMmap { + type Target = MmapRegion; + + fn deref(&self) -> &MmapRegion { + &self.mapping + } +} + +impl GuestRegionMmap { + /// Create a new memory-mapped memory region for the guest's physical memory. + pub fn new(mapping: MmapRegion, guest_base: GuestAddress) -> Option { + guest_base + .0 + .checked_add(mapping.size() as u64) + .map(|_| GuestRegionMmap { + mapping, + guest_base, + }) + } +} + +impl GuestRegionMmap { + /// Create a new memory-mapped memory region from guest's physical memory, size and file. + pub fn from_range( + addr: GuestAddress, + size: usize, + file: Option, + ) -> result::Result { + let region = if let Some(f_off) = file { + MmapRegion::from_file(f_off, size)? + } else { + MmapRegion::new(size)? + }; + + Self::new(region, addr).ok_or(FromRangesError::InvalidGuestRegion) + } +} + +impl GuestMemoryRegion for GuestRegionMmap { + type B = B; + + fn len(&self) -> GuestUsize { + self.mapping.size() as GuestUsize + } + + fn start_addr(&self) -> GuestAddress { + self.guest_base + } + + fn bitmap(&self) -> BS<'_, Self::B> { + self.mapping.bitmap().slice_at(0) + } + + fn get_host_address(&self, addr: MemoryRegionAddress) -> guest_memory::Result<*mut u8> { + // Not sure why wrapping_offset is not unsafe. Anyway this + // is safe because we've just range-checked addr using check_address. + self.check_address(addr) + .ok_or(guest_memory::Error::InvalidBackendAddress) + .map(|addr| { + self.mapping + .as_ptr() + .wrapping_offset(addr.raw_value() as isize) + }) + } + + fn file_offset(&self) -> Option<&FileOffset> { + self.mapping.file_offset() + } + + fn get_slice( + &self, + offset: MemoryRegionAddress, + count: usize, + ) -> guest_memory::Result>> { + let slice = VolatileMemory::get_slice(&self.mapping, offset.raw_value() as usize, count)?; + Ok(slice) + } + + #[cfg(target_os = "linux")] + fn is_hugetlbfs(&self) -> Option { + self.mapping.is_hugetlbfs() + } +} + +impl GuestMemoryRegionBytes for GuestRegionMmap {} + +/// [`GuestMemory`](trait.GuestMemory.html) implementation that mmaps the guest's memory +/// in the current process. +/// +/// Represents the entire physical memory of the guest by tracking all its memory regions. +/// Each region is an instance of `GuestRegionMmap`, being backed by a mapping in the +/// virtual address space of the calling process. +pub type GuestMemoryMmap = GuestRegionCollection>; + +/// Errors that can happen during [`GuestMemoryMap::from_ranges`] and related functions. +#[derive(Debug, thiserror::Error)] +pub enum FromRangesError { + /// Error during construction of [`GuestMemoryMmap`] + #[error("Error constructing guest region collection: {0}")] + Collection(#[from] GuestRegionCollectionError), + /// Error while allocating raw mmap region + #[error("Error setting up raw memory for guest region: {0}")] + MmapRegion(#[from] Error), + /// A combination of region length and guest address would overflow. + #[error("Combination of guest address and region length invalid (would overflow)")] + InvalidGuestRegion, +} + +impl GuestMemoryMmap { + /// Creates a container and allocates anonymous memory for guest memory regions. + /// + /// Valid memory regions are specified as a slice of (Address, Size) tuples sorted by Address. + pub fn from_ranges(ranges: &[(GuestAddress, usize)]) -> result::Result { + Self::from_ranges_with_files(ranges.iter().map(|r| (r.0, r.1, None))) + } + + /// Creates a container and allocates anonymous memory for guest memory regions. + /// + /// Valid memory regions are specified as a sequence of (Address, Size, [`Option`]) + /// tuples sorted by Address. + pub fn from_ranges_with_files(ranges: T) -> result::Result + where + A: Borrow<(GuestAddress, usize, Option)>, + T: IntoIterator, + { + Self::from_regions( + ranges + .into_iter() + .map(|x| { + GuestRegionMmap::from_range(x.borrow().0, x.borrow().1, x.borrow().2.clone()) + }) + .collect::, _>>()?, + ) + .map_err(Into::into) + } +} + #[cfg(test)] mod tests { #![allow(clippy::undocumented_unsafe_blocks)] @@ -651,11 +805,16 @@ mod tests { } #[test] - #[cfg(feature = "backend-bitmap")] + #[cfg(all(feature = "backend-bitmap", target_family = "unix"))] fn test_dirty_tracking() { // Using the `crate` prefix because we aliased `MmapRegion` to `MmapRegion<()>` for // the rest of the unit tests above. let m = crate::MmapRegion::::new(0x1_0000).unwrap(); crate::bitmap::tests::test_volatile_memory(&m); + + crate::bitmap::tests::test_guest_memory_and_region(|| { + crate::GuestMemoryMmap::::from_ranges(&[(GuestAddress(0), 0x1_0000)]) + .unwrap() + }); } } diff --git a/src/mmap/windows.rs b/src/mmap/windows.rs index 571fa7a2..5e465318 100644 --- a/src/mmap/windows.rs +++ b/src/mmap/windows.rs @@ -4,17 +4,23 @@ //! Helper structure for working with mmaped memory regions in Windows. use std; -use std::io; use std::os::windows::io::{AsRawHandle, RawHandle}; use std::ptr::{null, null_mut}; +use std::{io, result}; use libc::{c_void, size_t}; use winapi::um::errhandlingapi::GetLastError; +use crate::address::Address; use crate::bitmap::{Bitmap, NewBitmap, BS}; use crate::guest_memory::FileOffset; +use crate::region::GuestRegionError; use crate::volatile_memory::{self, compute_offset, VolatileMemory, VolatileSlice}; +use crate::{ + guest_memory, GuestAddress, GuestMemoryRegion, GuestMemoryRegionBytes, GuestRegionCollection, + GuestUsize, MemoryRegionAddress, +}; #[allow(non_snake_case)] #[link(name = "kernel32")] @@ -241,6 +247,85 @@ impl Drop for MmapRegion { } } +/// [`GuestMemoryRegion`](trait.GuestMemoryRegion.html) implementation that mmaps the guest's +/// memory region in the current process. +/// +/// Represents a continuous region of the guest's physical memory that is backed by a mapping +/// in the virtual address space of the calling process. +#[derive(Debug)] +pub struct GuestRegionWindows { + mapping: MmapRegion, + guest_base: GuestAddress, +} + +impl GuestRegionWindows { + /// Create a new memory-mapped memory region for the guest's physical memory. + pub fn new( + mapping: MmapRegion, + guest_base: GuestAddress, + ) -> result::Result { + if guest_base.0.checked_add(mapping.size() as u64).is_none() { + return Err(GuestRegionError::InvalidGuestRegion); + } + + Ok(GuestRegionWindows { + mapping, + guest_base, + }) + } +} + +impl GuestMemoryRegion for GuestRegionWindows { + type B = B; + + fn len(&self) -> GuestUsize { + self.mapping.size() as GuestUsize + } + + fn start_addr(&self) -> GuestAddress { + self.guest_base + } + + fn bitmap(&self) -> &Self::B { + self.mapping.bitmap() + } + + fn get_host_address(&self, addr: MemoryRegionAddress) -> guest_memory::Result<*mut u8> { + // Not sure why wrapping_offset is not unsafe. Anyway this + // is safe because we've just range-checked addr using check_address. + self.check_address(addr) + .ok_or(guest_memory::Error::InvalidBackendAddress) + .map(|addr| { + self.mapping + .as_ptr() + .wrapping_offset(addr.raw_value() as isize) + }) + } + + fn file_offset(&self) -> Option<&FileOffset> { + self.mapping.file_offset() + } + + fn get_slice( + &self, + offset: MemoryRegionAddress, + count: usize, + ) -> guest_memory::Result>> { + let slice = VolatileMemory::get_slice(&self.mapping, offset.raw_value() as usize, count)?; + Ok(slice) + } +} + +impl GuestMemoryRegionBytes for GuestRegionWindows {} + +/// [`GuestMemory`](trait.GuestMemory.html) implementation that mmaps the guest's memory +/// in the current process. +/// +/// Represents the entire physical memory of the guest by tracking all its memory regions. +/// Each region is an instance of `GuestRegionMmap`, being backed by a mapping in the +/// virtual address space of the calling process. +pub type GuestMemoryWindows = GuestRegionCollection>; + #[cfg(test)] mod tests { use std::os::windows::io::FromRawHandle; diff --git a/src/mmap/xen.rs b/src/mmap/xen.rs index f794aa11..a5058435 100644 --- a/src/mmap/xen.rs +++ b/src/mmap/xen.rs @@ -9,8 +9,8 @@ use bitflags::bitflags; use libc::{c_int, c_void, MAP_SHARED, _SC_PAGESIZE}; +use std::sync::Arc; use std::{io, mem::size_of, os::raw::c_ulong, os::unix::io::AsRawFd, ptr::null_mut, result}; - use vmm_sys_util::{ fam::{Error as FamError, FamStruct, FamStructWrapper}, generate_fam_struct_impl, @@ -26,24 +26,23 @@ use tests::ioctl_with_ref; use crate::bitmap::{Bitmap, NewBitmap, BS}; use crate::guest_memory::{FileOffset, GuestAddress}; +use crate::mmap::unix::MmapRegion as MmapUnix; use crate::volatile_memory::{self, VolatileMemory, VolatileSlice}; +use crate::{ + guest_memory, Address, GuestMemoryRegion, GuestMemoryRegionBytes, GuestRegionCollection, + GuestUsize, MemoryRegionAddress, MmapRegionBuilder, +}; /// Error conditions that may arise when creating a new `MmapRegion` object. #[derive(Debug, thiserror::Error)] pub enum Error { - /// The specified file offset and length cause overflow when added. - #[error("The specified file offset and length cause overflow when added")] - InvalidOffsetLength, /// The forbidden `MAP_FIXED` flag was specified. #[error("The forbidden `MAP_FIXED` flag was specified")] MapFixed, - /// A mapping with offset + length > EOF was attempted. - #[error("The specified file offset and length is greater then file length")] - MappingPastEof, /// The `mmap` call returned an error. #[error("{0}")] Mmap(io::Error), - /// Invalid file offset. + /// Invalid file offset (non-zero of missing altogether). #[error("Invalid file offset")] InvalidFileOffset, /// Memory mapped in advance. @@ -57,7 +56,10 @@ pub enum Error { Fam(FamError), /// Unexpected error. #[error("Unexpected error")] - UnexpectedError, + Unexpected, + /// Error establishing normal unix mapping + #[error["{0}"]] + Unix(#[from] crate::mmap::unix::Error), } type Result = result::Result; @@ -151,6 +153,56 @@ pub struct MmapRegion { mmap: MmapXen, } +impl GuestMemoryRegion for MmapRegion { + type B = B; + + fn len(&self) -> GuestUsize { + self.size as GuestUsize + } + + fn start_addr(&self) -> GuestAddress { + self.mmap.mmap.guest_base() + } + + fn bitmap(&self) -> BS<'_, Self::B> { + self.bitmap.slice_at(0) + } + + // TODO: MmapRegion::as_ptr states that it should only be used for passing pointers to ioctls. Should this function then just remain the default implementation of returning Err(InvalidHostAddress)? + fn get_host_address(&self, addr: MemoryRegionAddress) -> crate::guest_memory::Result<*mut u8> { + self.check_address(addr) + .ok_or(guest_memory::Error::InvalidBackendAddress) + .map(|addr| self.as_ptr().wrapping_offset(addr.raw_value() as isize)) + } + + fn file_offset(&self) -> Option<&FileOffset> { + self.file_offset.as_ref() + } + + fn get_slice( + &self, + offset: MemoryRegionAddress, + count: usize, + ) -> crate::guest_memory::Result>> { + VolatileMemory::get_slice(self, offset.raw_value() as usize, count).map_err(Into::into) + } + + // TODO: does this make sense in the context of Xen, or should it just return None, as the default implementation does? + // (and if running on Xen, will target_os="linux" even be true?) + #[cfg(target_os = "linux")] + fn is_hugetlbfs(&self) -> Option { + self.hugetlbfs + } +} + +impl GuestMemoryRegionBytes for MmapRegion {} + +/// A collection of Xen guest memory regions. +/// +/// Represents the entire physical memory of the guest by tracking all its memory regions. +/// Each region is an instance of [`MmapRegionXen`]. +pub type GuestMemoryXen = GuestRegionCollection>; + // SAFETY: Send and Sync aren't automatically inherited for the raw address pointer. // Accessing that pointer is only done through the stateless interface which // allows the object to be shared by multiple threads without a decrease in @@ -172,8 +224,7 @@ impl MmapRegion { /// use std::fs::File; /// use std::path::Path; /// use vm_memory::{ - /// Bytes, FileOffset, GuestAddress, GuestMemoryMmap, GuestRegionMmap, MmapRange, MmapRegion, - /// MmapXenFlags, + /// Bytes, FileOffset, GuestAddress, GuestMemoryXen, MmapRange, MmapRegionXen, MmapXenFlags, /// }; /// # use vmm_sys_util::tempfile::TempFile; /// @@ -189,13 +240,9 @@ impl MmapRegion { /// # // We need a UNIX mapping for tests to succeed. /// # let range = MmapRange::new_unix(0x400, None, addr); /// - /// let r = GuestRegionMmap::new( - /// MmapRegion::<()>::from_range(range).expect("Could not create mmap region"), - /// addr, - /// ) - /// .expect("Could not create guest region"); + /// let r = MmapRegionXen::<()>::from_range(range).expect("Could not create mmap region"); /// - /// let mut gm = GuestMemoryMmap::from_regions(vec![r]).expect("Could not create guest memory"); + /// let mut gm = GuestMemoryXen::from_regions(vec![r]).expect("Could not create guest memory"); /// let res = gm /// .write(&[1, 2, 3, 4, 5], GuestAddress(0x1200)) /// .expect("Could not write to guest memory"); @@ -208,8 +255,7 @@ impl MmapRegion { /// use std::fs::File; /// use std::path::Path; /// use vm_memory::{ - /// Bytes, FileOffset, GuestAddress, GuestMemoryMmap, GuestRegionMmap, MmapRange, MmapRegion, - /// MmapXenFlags, + /// Bytes, FileOffset, GuestAddress, GuestMemoryXen, MmapRange, MmapRegionXen, MmapXenFlags, /// }; /// # use vmm_sys_util::tempfile::TempFile; /// @@ -225,13 +271,9 @@ impl MmapRegion { /// # // We need a UNIX mapping for tests to succeed. /// # let range = MmapRange::new_unix(0x400, None, addr); /// - /// let r = GuestRegionMmap::new( - /// MmapRegion::<()>::from_range(range).expect("Could not create mmap region"), - /// addr, - /// ) - /// .expect("Could not create guest region"); + /// let r = MmapRegionXen::<()>::from_range(range).expect("Could not create mmap region"); /// - /// let mut gm = GuestMemoryMmap::from_regions(vec![r]).expect("Could not create guest memory"); + /// let mut gm = GuestMemoryXen::from_regions(vec![r]).expect("Could not create guest memory"); /// let res = gm /// .write(&[1, 2, 3, 4, 5], GuestAddress(0x1200)) /// .expect("Could not write to guest memory"); @@ -258,8 +300,8 @@ impl MmapRegion { Ok(MmapRegion { bitmap: B::with_len(range.size), size: range.size, - prot: range.prot.ok_or(Error::UnexpectedError)?, - flags: range.flags.ok_or(Error::UnexpectedError)?, + prot: range.prot.ok_or(Error::Unexpected)?, + flags: range.flags.ok_or(Error::Unexpected)?, file_offset: range.file_offset, hugetlbfs: range.hugetlbfs, mmap, @@ -308,8 +350,8 @@ impl MmapRegion { if f_off1.file().as_raw_fd() == f_off2.file().as_raw_fd() { let s1 = f_off1.start(); let s2 = f_off2.start(); - let l1 = self.len() as u64; - let l2 = other.len() as u64; + let l1 = self.size as u64; + let l2 = other.size as u64; if s1 < s2 { return s1 + l1 > s2; @@ -383,45 +425,6 @@ impl VolatileMemory for MmapRegion { } } -#[derive(Clone, Debug, PartialEq)] -struct MmapUnix { - addr: *mut u8, - size: usize, -} - -impl MmapUnix { - fn new(size: usize, prot: i32, flags: i32, fd: i32, f_offset: u64) -> Result { - let addr = - // SAFETY: This is safe because we're not allowing MAP_FIXED, and invalid parameters - // cannot break Rust safety guarantees (things may change if we're mapping /dev/mem or - // some wacky file). - unsafe { libc::mmap(null_mut(), size, prot, flags, fd, f_offset as libc::off_t) }; - - if addr == libc::MAP_FAILED { - return Err(Error::Mmap(io::Error::last_os_error())); - } - - Ok(Self { - addr: addr as *mut u8, - size, - }) - } - - fn addr(&self) -> *mut u8 { - self.addr - } -} - -impl Drop for MmapUnix { - fn drop(&mut self) { - // SAFETY: This is safe because we mmap the area at addr ourselves, and nobody - // else is holding a reference to it. - unsafe { - libc::munmap(self.addr as *mut libc::c_void, self.size); - } - } -} - // Bit mask for the vhost-user xen mmap message. bitflags! { /// Flags for the Xen mmap message. @@ -487,48 +490,44 @@ fn pages(size: usize) -> (usize, usize) { (num, page_size * num) } -fn validate_file(file_offset: &Option) -> Result<(i32, u64)> { +fn validate_file(file_offset: Option) -> Result { let file_offset = match file_offset { Some(f) => f, None => return Err(Error::InvalidFileOffset), }; - let fd = file_offset.file().as_raw_fd(); - let f_offset = file_offset.start(); - // We don't allow file offsets with Xen foreign mappings. - if f_offset != 0 { - return Err(Error::InvalidOffsetLength); + if file_offset.start() != 0 { + return Err(Error::InvalidFileOffset); } - Ok((fd, f_offset)) + Ok(file_offset) } // Xen Foreign memory mapping interface. trait MmapXenTrait: std::fmt::Debug { fn mmap_slice(&self, addr: *const u8, prot: i32, len: usize) -> Result; fn addr(&self) -> *mut u8; + fn guest_base(&self) -> GuestAddress; } // Standard Unix memory mapping for testing other crates. -#[derive(Clone, Debug, PartialEq)] -struct MmapXenUnix(MmapUnix); +#[derive(Clone, Debug)] +struct MmapXenUnix(MmapUnix, GuestAddress); impl MmapXenUnix { fn new(range: &MmapRange) -> Result { - let (fd, offset) = if let Some(ref f_off) = range.file_offset { - (f_off.file().as_raw_fd(), f_off.start()) - } else { - (-1, 0) - }; + let mut builder = MmapRegionBuilder::new(range.size) + .with_mmap_prot(range.prot.ok_or(Error::Unexpected)?) + .with_mmap_flags(range.flags.ok_or(Error::Unexpected)?); - Ok(Self(MmapUnix::new( - range.size, - range.prot.ok_or(Error::UnexpectedError)?, - range.flags.ok_or(Error::UnexpectedError)?, - fd, - offset, - )?)) + if let Some(ref file_offset) = range.file_offset { + builder = builder.with_file_offset(file_offset.clone()); + } + + let mmap_unix = builder.build()?; + + Ok(MmapXenUnix(mmap_unix, range.addr)) } } @@ -539,7 +538,11 @@ impl MmapXenTrait for MmapXenUnix { } fn addr(&self) -> *mut u8 { - self.0.addr() + self.0.as_ptr() + } + + fn guest_base(&self) -> GuestAddress { + self.1 } } @@ -574,7 +577,7 @@ fn ioctl_privcmd_mmapbatch_v2() -> c_ulong { } // Xen foreign memory specific implementation. -#[derive(Clone, Debug, PartialEq)] +#[derive(Clone, Debug)] struct MmapXenForeign { domid: u32, guest_base: GuestAddress, @@ -590,16 +593,15 @@ impl AsRawFd for MmapXenForeign { impl MmapXenForeign { fn new(range: &MmapRange) -> Result { - let (fd, f_offset) = validate_file(&range.file_offset)?; let (count, size) = pages(range.size); + let file_offset = validate_file(range.file_offset.clone())?; + let fd = file_offset.file().as_raw_fd(); - let unix_mmap = MmapUnix::new( - size, - range.prot.ok_or(Error::UnexpectedError)?, - range.flags.ok_or(Error::UnexpectedError)? | MAP_SHARED, - fd, - f_offset, - )?; + let unix_mmap = MmapRegionBuilder::new(size) + .with_mmap_prot(range.prot.ok_or(Error::Unexpected)?) + .with_mmap_flags(range.flags.ok_or(Error::Unexpected)? | MAP_SHARED) + .with_file_offset(file_offset) + .build()?; let foreign = Self { domid: range.mmap_data, @@ -649,7 +651,11 @@ impl MmapXenTrait for MmapXenForeign { } fn addr(&self) -> *mut u8 { - self.unix_mmap.addr() + self.unix_mmap.as_ptr() + } + + fn guest_base(&self) -> GuestAddress { + self.guest_base } } @@ -799,13 +805,13 @@ impl AsRawFd for MmapXenGrant { impl MmapXenGrant { fn new(range: &MmapRange, mmap_flags: MmapXenFlags) -> Result { - validate_file(&range.file_offset)?; + let file_offset = validate_file(range.file_offset.clone())?; let mut grant = Self { guest_base: range.addr, unix_mmap: None, - file_offset: range.file_offset.as_ref().unwrap().clone(), - flags: range.flags.ok_or(Error::UnexpectedError)?, + file_offset, + flags: range.flags.ok_or(Error::Unexpected)?, size: 0, index: 0, domid: range.mmap_data, @@ -814,11 +820,8 @@ impl MmapXenGrant { // Region can't be mapped in advance, partial mapping will be done later via // `MmapXenSlice`. if mmap_flags.mmap_in_advance() { - let (unix_mmap, index) = grant.mmap_range( - range.addr, - range.size, - range.prot.ok_or(Error::UnexpectedError)?, - )?; + let (unix_mmap, index) = + grant.mmap_range(range.addr, range.size, range.prot.ok_or(Error::Unexpected)?)?; grant.unix_mmap = Some(unix_mmap); grant.index = index; @@ -831,7 +834,15 @@ impl MmapXenGrant { fn mmap_range(&self, addr: GuestAddress, size: usize, prot: i32) -> Result<(MmapUnix, u64)> { let (count, size) = pages(size); let index = self.mmap_ioctl(addr, count)?; - let unix_mmap = MmapUnix::new(size, prot, self.flags, self.as_raw_fd(), index)?; + + let unix_mmap = MmapRegionBuilder::new(size) + .with_mmap_prot(prot) + .with_mmap_flags(self.flags) + .with_file_offset(FileOffset::from_arc( + Arc::clone(self.file_offset.arc()), + index, + )) + .build()?; Ok((unix_mmap, index)) } @@ -881,11 +892,15 @@ impl MmapXenTrait for MmapXenGrant { fn addr(&self) -> *mut u8 { if let Some(ref unix_mmap) = self.unix_mmap { - unix_mmap.addr() + unix_mmap.as_ptr() } else { null_mut() } } + + fn guest_base(&self) -> GuestAddress { + self.guest_base + } } impl Drop for MmapXenGrant { @@ -926,7 +941,7 @@ impl MmapXenSlice { let (unix_mmap, index) = grant.mmap_range(GuestAddress(addr), size, prot)?; // SAFETY: We have already mapped the range including offset. - let addr = unsafe { unix_mmap.addr().add(offset) }; + let addr = unsafe { unix_mmap.as_ptr().add(offset) }; Ok(Self { grant: Some(grant), @@ -1028,6 +1043,7 @@ mod tests { fn raw_os_error(&self) -> i32 { match self { Error::Mmap(e) => e.raw_os_error().unwrap(), + Error::Unix(crate::mmap::unix::Error::Mmap(e)) => e.raw_os_error().unwrap(), _ => i32::MIN, } } @@ -1116,17 +1132,17 @@ mod tests { range.file_offset = Some(FileOffset::new(TempFile::new().unwrap().into_file(), 0)); range.prot = None; let r = MmapXenForeign::new(&range); - assert!(matches!(r.unwrap_err(), Error::UnexpectedError)); + assert!(matches!(r.unwrap_err(), Error::Unexpected)); let mut range = MmapRange::initialized(true); range.flags = None; let r = MmapXenForeign::new(&range); - assert!(matches!(r.unwrap_err(), Error::UnexpectedError)); + assert!(matches!(r.unwrap_err(), Error::Unexpected)); let mut range = MmapRange::initialized(true); range.file_offset = Some(FileOffset::new(TempFile::new().unwrap().into_file(), 1)); let r = MmapXenForeign::new(&range); - assert!(matches!(r.unwrap_err(), Error::InvalidOffsetLength)); + assert!(matches!(r.unwrap_err(), Error::InvalidFileOffset)); let mut range = MmapRange::initialized(true); range.size = 0; @@ -1148,7 +1164,7 @@ mod tests { let mut range = MmapRange::initialized(true); range.prot = None; let r = MmapXenGrant::new(&range, MmapXenFlags::empty()); - assert!(matches!(r.unwrap_err(), Error::UnexpectedError)); + assert!(matches!(r.unwrap_err(), Error::Unexpected)); let mut range = MmapRange::initialized(true); range.prot = None; @@ -1158,12 +1174,12 @@ mod tests { let mut range = MmapRange::initialized(true); range.flags = None; let r = MmapXenGrant::new(&range, MmapXenFlags::NO_ADVANCE_MAP); - assert!(matches!(r.unwrap_err(), Error::UnexpectedError)); + assert!(matches!(r.unwrap_err(), Error::Unexpected)); let mut range = MmapRange::initialized(true); range.file_offset = Some(FileOffset::new(TempFile::new().unwrap().into_file(), 1)); let r = MmapXenGrant::new(&range, MmapXenFlags::NO_ADVANCE_MAP); - assert!(matches!(r.unwrap_err(), Error::InvalidOffsetLength)); + assert!(matches!(r.unwrap_err(), Error::InvalidFileOffset)); let mut range = MmapRange::initialized(true); range.size = 0; diff --git a/src/region.rs b/src/region.rs index e716a629..afcad12b 100644 --- a/src/region.rs +++ b/src/region.rs @@ -124,7 +124,7 @@ pub trait GuestMemoryRegion: Bytes { /// # Examples (uses the `backend-mmap` feature) /// /// ``` - /// # #[cfg(feature = "backend-mmap")] + /// # #[cfg(all(feature = "backend-mmap", target_family = "unix"))] /// # { /// # use vm_memory::{GuestAddress, MmapRegion, GuestRegionMmap, GuestMemoryRegion}; /// # use vm_memory::volatile_memory::{VolatileMemory, VolatileSlice, VolatileRef}; @@ -154,7 +154,7 @@ pub trait GuestMemoryRegion: Bytes { /// # Examples (uses the `backend-mmap` feature) /// /// ``` - /// # #[cfg(feature = "backend-mmap")] + /// # #[cfg(all(feature = "backend-mmap", target_family = "unix"))] /// # { /// # use vm_memory::{GuestAddress, GuestMemory, GuestMemoryMmap, GuestRegionMmap}; /// let addr = GuestAddress(0x1000); @@ -335,10 +335,10 @@ impl Bytes for R { /// * Write a slice at guest address 0x1200. /// /// ``` - /// # #[cfg(feature = "backend-mmap")] + /// # #[cfg(all(feature = "backend-mmap", target_family = "unix"))] /// # use vm_memory::{Bytes, GuestAddress, GuestMemoryMmap}; /// # - /// # #[cfg(feature = "backend-mmap")] + /// # #[cfg(all(feature = "backend-mmap", target_family = "unix"))] /// # { /// # let start_addr = GuestAddress(0x1000); /// # let mut gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x400)]) @@ -361,10 +361,10 @@ impl Bytes for R { /// * Read a slice of length 16 at guestaddress 0x1200. /// /// ``` - /// # #[cfg(feature = "backend-mmap")] + /// # #[cfg(all(feature = "backend-mmap", target_family = "unix"))] /// # use vm_memory::{Bytes, GuestAddress, GuestMemoryMmap}; /// # - /// # #[cfg(feature = "backend-mmap")] + /// # #[cfg(all(feature = "backend-mmap", target_family = "unix"))] /// # { /// # let start_addr = GuestAddress(0x1000); /// # let mut gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x400)])