diff --git a/.buildkite/custom-tests.json b/.buildkite/custom-tests.json index 4c7b7895..6f581a4c 100644 --- a/.buildkite/custom-tests.json +++ b/.buildkite/custom-tests.json @@ -22,7 +22,7 @@ }, { "test_name": "miri", - "command": "RUST_BACKTRACE=1 MIRIFLAGS='-Zmiri-disable-isolation -Zmiri-backtrace=full' cargo +nightly miri test --features backend-mmap", + "command": "RUST_BACKTRACE=1 MIRIFLAGS='-Zmiri-disable-isolation -Zmiri-backtrace=full' cargo +nightly miri test --features backend-mmap,backend-bitmap", "platform": ["x86_64", "aarch64"] }, { diff --git a/Cargo.toml b/Cargo.toml index cfb08622..2a60e94e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -13,7 +13,7 @@ autobenches = false [features] default = ["rawfd"] -backend-bitmap = [] +backend-bitmap = ["dep:libc"] backend-mmap = ["dep:libc"] backend-atomic = ["arc-swap"] rawfd = ["dep:libc"] diff --git a/src/bitmap/mod.rs b/src/bitmap/mod.rs index d2682a13..cf1555b2 100644 --- a/src/bitmap/mod.rs +++ b/src/bitmap/mod.rs @@ -6,14 +6,14 @@ //! `GuestMemoryRegion` object, and the resulting bitmaps can then be aggregated to build the //! global view for an entire `GuestMemory` object. -#[cfg(any(test, feature = "backend-bitmap"))] +#[cfg(feature = "backend-bitmap")] mod backend; use std::fmt::Debug; use crate::{GuestMemory, GuestMemoryRegion}; -#[cfg(any(test, feature = "backend-bitmap"))] +#[cfg(feature = "backend-bitmap")] pub use backend::{ArcSlice, AtomicBitmap, RefSlice}; /// Trait implemented by types that support creating `BitmapSlice` objects. @@ -117,12 +117,11 @@ pub type BS<'a, B> = >::S; pub type MS<'a, M> = BS<'a, <::R as GuestMemoryRegion>::B>; #[cfg(test)] +#[cfg(feature = "backend-bitmap")] pub(crate) mod tests { use super::*; - use std::marker::PhantomData; use std::mem::size_of_val; - use std::result::Result; use std::sync::atomic::Ordering; use crate::{Bytes, VolatileMemory}; @@ -165,68 +164,6 @@ pub(crate) mod tests { assert!(range_is_dirty(&s, 0, dirty_len)); } - #[derive(Debug)] - pub enum TestAccessError { - RangeCleanCheck, - RangeDirtyCheck, - } - - // A helper object that implements auxiliary operations for testing `Bytes` implementations - // in the context of dirty bitmap tracking. - struct BytesHelper { - check_range_fn: F, - address_fn: G, - phantom: PhantomData<*const M>, - } - - // `F` represents a closure the checks whether a specified range associated with the `Bytes` - // object that's being tested is marked as dirty or not (depending on the value of the last - // parameter). It has the following parameters: - // - A reference to a `Bytes` implementations that's subject to testing. - // - The offset of the range. - // - The length of the range. - // - Whether we are checking if the range is clean (when `true`) or marked as dirty. - // - // `G` represents a closure that translates an offset into an address value that's - // relevant for the `Bytes` implementation being tested. - impl BytesHelper - where - F: Fn(&M, usize, usize, bool) -> bool, - G: Fn(usize) -> A, - M: Bytes, - { - fn check_range(&self, m: &M, start: usize, len: usize, clean: bool) -> bool { - (self.check_range_fn)(m, start, len, clean) - } - - fn address(&self, offset: usize) -> A { - (self.address_fn)(offset) - } - - fn test_access( - &self, - bytes: &M, - dirty_offset: usize, - dirty_len: usize, - op: Op, - ) -> Result<(), TestAccessError> - where - Op: Fn(&M, A), - { - if !self.check_range(bytes, dirty_offset, dirty_len, true) { - return Err(TestAccessError::RangeCleanCheck); - } - - op(bytes, self.address(dirty_offset)); - - if !self.check_range(bytes, dirty_offset, dirty_len, false) { - return Err(TestAccessError::RangeDirtyCheck); - } - - Ok(()) - } - } - // `F` and `G` stand for the same closure types as described in the `BytesHelper` comment. // The `step` parameter represents the offset that's added the the current address after // performing each access. It provides finer grained control when testing tracking @@ -236,49 +173,46 @@ pub(crate) mod tests { where F: Fn(&M, usize, usize, bool) -> bool, G: Fn(usize) -> A, - A: Copy, M: Bytes, >::E: Debug, { const BUF_SIZE: usize = 1024; let buf = vec![1u8; 1024]; + let mut dirty_offset = 0x1000; let val = 1u64; - let h = BytesHelper { - check_range_fn, - address_fn, - phantom: PhantomData, - }; - - let mut dirty_offset = 0x1000; - // Test `write`. - h.test_access(bytes, dirty_offset, BUF_SIZE, |m, addr| { - assert_eq!(m.write(buf.as_slice(), addr).unwrap(), BUF_SIZE) - }) - .unwrap(); - dirty_offset += step; + assert!(check_range_fn(bytes, dirty_offset, BUF_SIZE, true)); + assert_eq!( + bytes + .write(buf.as_slice(), address_fn(dirty_offset)) + .unwrap(), + BUF_SIZE + ); + assert!(check_range_fn(bytes, dirty_offset, BUF_SIZE, false)); // Test `write_slice`. - h.test_access(bytes, dirty_offset, BUF_SIZE, |m, addr| { - m.write_slice(buf.as_slice(), addr).unwrap() - }) - .unwrap(); dirty_offset += step; + assert!(check_range_fn(bytes, dirty_offset, BUF_SIZE, true)); + bytes + .write_slice(buf.as_slice(), address_fn(dirty_offset)) + .unwrap(); + assert!(check_range_fn(bytes, dirty_offset, BUF_SIZE, false)); // Test `write_obj`. - h.test_access(bytes, dirty_offset, size_of_val(&val), |m, addr| { - m.write_obj(val, addr).unwrap() - }) - .unwrap(); dirty_offset += step; + assert!(check_range_fn(bytes, dirty_offset, BUF_SIZE, true)); + bytes.write_obj(val, address_fn(dirty_offset)).unwrap(); + assert!(check_range_fn(bytes, dirty_offset, BUF_SIZE, false)); // Test `store`. - h.test_access(bytes, dirty_offset, size_of_val(&val), |m, addr| { - m.store(val, addr, Ordering::Relaxed).unwrap() - }) - .unwrap(); + dirty_offset += step; + assert!(check_range_fn(bytes, dirty_offset, BUF_SIZE, true)); + bytes + .store(val, address_fn(dirty_offset), Ordering::Relaxed) + .unwrap(); + assert!(check_range_fn(bytes, dirty_offset, BUF_SIZE, false)); } // This function and the next are currently conditionally compiled because we only use diff --git a/src/mmap/mod.rs b/src/mmap/mod.rs index f514b4e3..b5baaf8a 100644 --- a/src/mmap/mod.rs +++ b/src/mmap/mod.rs @@ -221,12 +221,11 @@ mod tests { use super::*; - use crate::bitmap::tests::test_guest_memory_and_region; + #[cfg(feature = "backend-bitmap")] use crate::bitmap::AtomicBitmap; use crate::{Bytes, GuestMemory, GuestMemoryError}; use std::io::Write; - use std::mem; #[cfg(feature = "rawfd")] use std::{fs::File, path::Path}; use vmm_sys_util::tempfile::TempFile; @@ -432,6 +431,8 @@ mod tests { #[test] #[cfg(feature = "rawfd")] fn read_to_and_write_from_mem() { + use std::mem; + let f = TempFile::new().unwrap().into_file(); f.set_len(0x400).unwrap(); @@ -633,8 +634,9 @@ mod tests { } #[test] + #[cfg(feature = "backend-bitmap")] fn test_dirty_tracking() { - test_guest_memory_and_region(|| { + crate::bitmap::tests::test_guest_memory_and_region(|| { crate::GuestMemoryMmap::::from_ranges(&[(GuestAddress(0), 0x1_0000)]) .unwrap() }); diff --git a/src/mmap/unix.rs b/src/mmap/unix.rs index d130f62b..7756b428 100644 --- a/src/mmap/unix.rs +++ b/src/mmap/unix.rs @@ -438,11 +438,11 @@ mod tests { use super::*; use std::io::Write; - use std::num::NonZeroUsize; use std::slice; use std::sync::Arc; use vmm_sys_util::tempfile::TempFile; + #[cfg(feature = "backend-bitmap")] use crate::bitmap::AtomicBitmap; type MmapRegion = super::MmapRegion<()>; @@ -535,6 +535,7 @@ mod tests { #[test] #[cfg(not(miri))] // Miri cannot mmap files + #[cfg(feature = "backend-bitmap")] fn test_mmap_region_build() { let a = Arc::new(TempFile::new().unwrap().into_file()); @@ -586,7 +587,9 @@ mod tests { assert!(r.owned()); let region_size = 0x10_0000; - let bitmap = AtomicBitmap::new(region_size, unsafe { NonZeroUsize::new_unchecked(0x1000) }); + let bitmap = AtomicBitmap::new(region_size, unsafe { + std::num::NonZeroUsize::new_unchecked(0x1000) + }); let builder = MmapRegionBuilder::new_with_bitmap(region_size, bitmap) .with_hugetlbfs(true) .with_mmap_prot(libc::PROT_READ | libc::PROT_WRITE); @@ -648,6 +651,7 @@ mod tests { } #[test] + #[cfg(feature = "backend-bitmap")] fn test_dirty_tracking() { // Using the `crate` prefix because we aliased `MmapRegion` to `MmapRegion<()>` for // the rest of the unit tests above. diff --git a/src/volatile_memory.rs b/src/volatile_memory.rs index acff239b..db7accd1 100644 --- a/src/volatile_memory.rs +++ b/src/volatile_memory.rs @@ -1447,6 +1447,7 @@ mod tests { #[cfg(feature = "rawfd")] use std::fs::File; + #[cfg(feature = "backend-bitmap")] use std::mem::size_of_val; #[cfg(feature = "rawfd")] use std::path::Path; @@ -1455,15 +1456,19 @@ mod tests { use std::thread::spawn; use matches::assert_matches; + #[cfg(feature = "backend-bitmap")] use std::num::NonZeroUsize; #[cfg(feature = "rawfd")] use vmm_sys_util::tempfile::TempFile; + #[cfg(feature = "backend-bitmap")] use crate::bitmap::tests::{ check_range, range_is_clean, range_is_dirty, test_bytes, test_volatile_memory, }; + #[cfg(feature = "backend-bitmap")] use crate::bitmap::{AtomicBitmap, RefSlice}; + #[cfg(feature = "backend-bitmap")] const DEFAULT_PAGE_SIZE: NonZeroUsize = unsafe { NonZeroUsize::new_unchecked(0x1000) }; #[test] @@ -2080,6 +2085,7 @@ mod tests { } #[test] + #[cfg(feature = "backend-bitmap")] fn test_volatile_slice_dirty_tracking() { let val = 123u64; let dirty_offset = 0x1000; @@ -2170,6 +2176,7 @@ mod tests { } #[test] + #[cfg(feature = "backend-bitmap")] fn test_volatile_ref_dirty_tracking() { let val = 123u64; let mut buf = vec![val]; @@ -2184,6 +2191,7 @@ mod tests { assert!(range_is_dirty(vref.bitmap(), 0, vref.len())); } + #[cfg(feature = "backend-bitmap")] fn test_volatile_array_ref_copy_from_tracking( buf: &mut [T], index: usize, @@ -2210,6 +2218,7 @@ mod tests { } #[test] + #[cfg(feature = "backend-bitmap")] fn test_volatile_array_ref_dirty_tracking() { let val = 123u64; let dirty_len = size_of_val(&val);