From 15606ea7394b8752f38e94440d13dc900c377f9d Mon Sep 17 00:00:00 2001 From: Patrick Roy Date: Wed, 5 Feb 2025 07:48:37 +0000 Subject: [PATCH 01/10] Add naive default impl for `GuestMemory::find_region()` This function can be default-implemented in terms of `GuestMemory::iter()`. Downstream impls can overwrite this more specialized and efficient versions of course (such as GuestMemoryMmap using a binary search). Mainly a QoL improvement to make mock impls of this trait for tests less verbose. Signed-off-by: Patrick Roy --- CHANGELOG.md | 2 ++ src/guest_memory.rs | 4 +++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4f43bd83..71844838 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,8 @@ - \[[#307](https://github.com/rust-vmm/vm-memory/pull/304)\] Move `read_volatile_from`, `read_exact_volatile_from`, `write_volatile_to` and `write_all_volatile_to` functions from the `GuestMemory` trait to the `Bytes` trait. +- \[[#312](https://github.com/rust-vmm/vm-memory/pull/312)\]: Give `GuestMemory::find_region` a default implementation, + based on linear search. - \[#324](https:////github.com/rust-vmm/vm-memory/pull/324)\] `GuestMemoryRegion::bitmap()` now returns a `BitmapSlice`. Accessing the full bitmap is now possible only if the type of the memory region is know, for example with `MmapRegion::bitmap()`. diff --git a/src/guest_memory.rs b/src/guest_memory.rs index 2944169d..bd53facb 100644 --- a/src/guest_memory.rs +++ b/src/guest_memory.rs @@ -408,7 +408,9 @@ pub trait GuestMemory { fn num_regions(&self) -> usize; /// Returns the region containing the specified address or `None`. - fn find_region(&self, addr: GuestAddress) -> Option<&Self::R>; + fn find_region(&self, addr: GuestAddress) -> Option<&Self::R> { + self.iter().find(|region| addr >= region.start_addr() && addr <= region.last_addr()) + } /// Gets an iterator over the entries in the collection. /// From c1f9a01c5fc605e38422a2c579ed827c5e6224d9 Mon Sep 17 00:00:00 2001 From: Patrick Roy Date: Tue, 1 Jul 2025 10:35:04 +0100 Subject: [PATCH 02/10] add naive default impl for GuestMemory::num_regions Similar to find_region(), we can provide an inefficient default impl to make downstream impls of the trait less verbose in scenarios that really do not care (e.g. mock impls for tests). Signed-off-by: Patrick Roy --- CHANGELOG.md | 4 ++-- src/guest_memory.rs | 4 +++- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 71844838..c07dc1f6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,8 +10,8 @@ - \[[#307](https://github.com/rust-vmm/vm-memory/pull/304)\] Move `read_volatile_from`, `read_exact_volatile_from`, `write_volatile_to` and `write_all_volatile_to` functions from the `GuestMemory` trait to the `Bytes` trait. -- \[[#312](https://github.com/rust-vmm/vm-memory/pull/312)\]: Give `GuestMemory::find_region` a default implementation, - based on linear search. +- \[[#312](https://github.com/rust-vmm/vm-memory/pull/312)\]: Give `GuestMemory::find_region` and `GuestMemory::num_regions` + a default implementation, based on linear search. - \[#324](https:////github.com/rust-vmm/vm-memory/pull/324)\] `GuestMemoryRegion::bitmap()` now returns a `BitmapSlice`. Accessing the full bitmap is now possible only if the type of the memory region is know, for example with `MmapRegion::bitmap()`. diff --git a/src/guest_memory.rs b/src/guest_memory.rs index bd53facb..b77472d5 100644 --- a/src/guest_memory.rs +++ b/src/guest_memory.rs @@ -405,7 +405,9 @@ pub trait GuestMemory { type R: GuestMemoryRegion; /// Returns the number of regions in the collection. - fn num_regions(&self) -> usize; + fn num_regions(&self) -> usize { + self.iter().count() + } /// Returns the region containing the specified address or `None`. fn find_region(&self, addr: GuestAddress) -> Option<&Self::R> { From 8f197121eab2dcb45ba33b96a9b7886545d65557 Mon Sep 17 00:00:00 2001 From: Patrick Roy Date: Fri, 14 Mar 2025 12:13:44 +0000 Subject: [PATCH 03/10] introduce `region` module This modules is intended for all functionality that relates to contiguous regions of guest memory. This differentiates it from `guest_memory`, as that is about a holistic view of guest memory, and from `mmap`, which is specifically about guest memory regions backed by mmap VMAs. Signed-off-by: Patrick Roy --- src/guest_memory.rs | 139 ++----------------------------------------- src/lib.rs | 5 +- src/mmap/mod.rs | 3 +- src/region.rs | 141 ++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 151 insertions(+), 137 deletions(-) create mode 100644 src/region.rs diff --git a/src/guest_memory.rs b/src/guest_memory.rs index b77472d5..39e4f10a 100644 --- a/src/guest_memory.rs +++ b/src/guest_memory.rs @@ -50,10 +50,11 @@ use std::sync::atomic::Ordering; use std::sync::Arc; use crate::address::{Address, AddressValue}; -use crate::bitmap::{Bitmap, BS, MS}; +use crate::bitmap::MS; use crate::bytes::{AtomicAccess, Bytes}; use crate::io::{ReadVolatile, WriteVolatile}; use crate::volatile_memory::{self, VolatileSlice}; +use crate::GuestMemoryRegion; /// Errors associated with handling guest memory accesses. #[allow(missing_docs)] @@ -158,139 +159,6 @@ impl FileOffset { } } -/// Represents a continuous region of guest physical memory. -#[allow(clippy::len_without_is_empty)] -pub trait GuestMemoryRegion: Bytes { - /// Type used for dirty memory tracking. - type B: Bitmap; - - /// Returns the size of the region. - fn len(&self) -> GuestUsize; - - /// Returns the minimum (inclusive) address managed by the region. - fn start_addr(&self) -> GuestAddress; - - /// Returns the maximum (inclusive) address managed by the region. - fn last_addr(&self) -> GuestAddress { - // unchecked_add is safe as the region bounds were checked when it was created. - self.start_addr().unchecked_add(self.len() - 1) - } - - /// Borrow the associated `Bitmap` object. - fn bitmap(&self) -> BS<'_, Self::B>; - - /// Returns the given address if it is within this region. - fn check_address(&self, addr: MemoryRegionAddress) -> Option { - if self.address_in_range(addr) { - Some(addr) - } else { - None - } - } - - /// Returns `true` if the given address is within this region. - fn address_in_range(&self, addr: MemoryRegionAddress) -> bool { - addr.raw_value() < self.len() - } - - /// Returns the address plus the offset if it is in this region. - fn checked_offset( - &self, - base: MemoryRegionAddress, - offset: usize, - ) -> Option { - base.checked_add(offset as u64) - .and_then(|addr| self.check_address(addr)) - } - - /// Tries to convert an absolute address to a relative address within this region. - /// - /// Returns `None` if `addr` is out of the bounds of this region. - fn to_region_addr(&self, addr: GuestAddress) -> Option { - addr.checked_offset_from(self.start_addr()) - .and_then(|offset| self.check_address(MemoryRegionAddress(offset))) - } - - /// Returns the host virtual address corresponding to the region address. - /// - /// Some [`GuestMemory`](trait.GuestMemory.html) implementations, like `GuestMemoryMmap`, - /// have the capability to mmap guest address range into host virtual address space for - /// direct access, so the corresponding host virtual address may be passed to other subsystems. - /// - /// # Note - /// The underlying guest memory is not protected from memory aliasing, which breaks the - /// Rust memory safety model. It's the caller's responsibility to ensure that there's no - /// concurrent accesses to the underlying guest memory. - fn get_host_address(&self, _addr: MemoryRegionAddress) -> Result<*mut u8> { - Err(Error::HostAddressNotAvailable) - } - - /// Returns information regarding the file and offset backing this memory region. - fn file_offset(&self) -> Option<&FileOffset> { - None - } - - /// Returns a [`VolatileSlice`](struct.VolatileSlice.html) of `count` bytes starting at - /// `offset`. - #[allow(unused_variables)] - fn get_slice( - &self, - offset: MemoryRegionAddress, - count: usize, - ) -> Result>> { - Err(Error::HostAddressNotAvailable) - } - - /// Gets a slice of memory for the entire region that supports volatile access. - /// - /// # Examples (uses the `backend-mmap` feature) - /// - /// ``` - /// # #[cfg(feature = "backend-mmap")] - /// # { - /// # use vm_memory::{GuestAddress, MmapRegion, GuestRegionMmap, GuestMemoryRegion}; - /// # use vm_memory::volatile_memory::{VolatileMemory, VolatileSlice, VolatileRef}; - /// # - /// let region = GuestRegionMmap::<()>::from_range(GuestAddress(0x0), 0x400, None) - /// .expect("Could not create guest memory"); - /// let slice = region - /// .as_volatile_slice() - /// .expect("Could not get volatile slice"); - /// - /// let v = 42u32; - /// let r = slice - /// .get_ref::(0x200) - /// .expect("Could not get reference"); - /// r.store(v); - /// assert_eq!(r.load(), v); - /// # } - /// ``` - fn as_volatile_slice(&self) -> Result>> { - self.get_slice(MemoryRegionAddress(0), self.len() as usize) - } - - /// Show if the region is based on the `HugeTLBFS`. - /// Returns Some(true) if the region is backed by hugetlbfs. - /// None represents that no information is available. - /// - /// # Examples (uses the `backend-mmap` feature) - /// - /// ``` - /// # #[cfg(feature = "backend-mmap")] - /// # { - /// # use vm_memory::{GuestAddress, GuestMemory, GuestMemoryMmap, GuestRegionMmap}; - /// let addr = GuestAddress(0x1000); - /// let mem = GuestMemoryMmap::<()>::from_ranges(&[(addr, 0x1000)]).unwrap(); - /// let r = mem.find_region(addr).unwrap(); - /// assert_eq!(r.is_hugetlbfs(), None); - /// # } - /// ``` - #[cfg(target_os = "linux")] - fn is_hugetlbfs(&self) -> Option { - None - } -} - /// `GuestAddressSpace` provides a way to retrieve a `GuestMemory` object. /// The vm-memory crate already provides trivial implementation for /// references to `GuestMemory` or reference-counted `GuestMemory` objects, @@ -411,7 +279,8 @@ pub trait GuestMemory { /// Returns the region containing the specified address or `None`. fn find_region(&self, addr: GuestAddress) -> Option<&Self::R> { - self.iter().find(|region| addr >= region.start_addr() && addr <= region.last_addr()) + self.iter() + .find(|region| addr >= region.start_addr() && addr <= region.last_addr()) } /// Gets an iterator over the entries in the collection. diff --git a/src/lib.rs b/src/lib.rs index b8fe5f40..886602a3 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -47,9 +47,12 @@ pub use endian::{Be16, Be32, Be64, BeSize, Le16, Le32, Le64, LeSize}; pub mod guest_memory; pub use guest_memory::{ Error as GuestMemoryError, FileOffset, GuestAddress, GuestAddressSpace, GuestMemory, - GuestMemoryRegion, GuestUsize, MemoryRegionAddress, Result as GuestMemoryResult, + GuestUsize, MemoryRegionAddress, Result as GuestMemoryResult, }; +pub mod region; +pub use region::GuestMemoryRegion; + pub mod io; pub use io::{ReadVolatile, WriteVolatile}; diff --git a/src/mmap/mod.rs b/src/mmap/mod.rs index b8dd0842..603be3c5 100644 --- a/src/mmap/mod.rs +++ b/src/mmap/mod.rs @@ -21,8 +21,9 @@ use std::sync::Arc; use crate::address::Address; use crate::bitmap::{Bitmap, BS}; use crate::guest_memory::{ - self, FileOffset, GuestAddress, GuestMemory, GuestMemoryRegion, GuestUsize, MemoryRegionAddress, + self, FileOffset, GuestAddress, GuestMemory, GuestUsize, MemoryRegionAddress, }; +use crate::region::GuestMemoryRegion; use crate::volatile_memory::{VolatileMemory, VolatileSlice}; use crate::{AtomicAccess, Bytes, ReadVolatile, WriteVolatile}; diff --git a/src/region.rs b/src/region.rs new file mode 100644 index 00000000..35160bd8 --- /dev/null +++ b/src/region.rs @@ -0,0 +1,141 @@ +//! Module containing abstracts for dealing with contiguous regions of guest memory + +use crate::bitmap::{Bitmap, BS}; +use crate::guest_memory::Error; +use crate::guest_memory::Result; +use crate::{ + Address, Bytes, FileOffset, GuestAddress, GuestUsize, MemoryRegionAddress, VolatileSlice, +}; + +/// Represents a continuous region of guest physical memory. +#[allow(clippy::len_without_is_empty)] +pub trait GuestMemoryRegion: Bytes { + /// Type used for dirty memory tracking. + type B: Bitmap; + + /// Returns the size of the region. + fn len(&self) -> GuestUsize; + + /// Returns the minimum (inclusive) address managed by the region. + fn start_addr(&self) -> GuestAddress; + + /// Returns the maximum (inclusive) address managed by the region. + fn last_addr(&self) -> GuestAddress { + // unchecked_add is safe as the region bounds were checked when it was created. + self.start_addr().unchecked_add(self.len() - 1) + } + + /// Borrow the associated `Bitmap` object. + fn bitmap(&self) -> BS<'_, Self::B>; + + /// Returns the given address if it is within this region. + fn check_address(&self, addr: MemoryRegionAddress) -> Option { + if self.address_in_range(addr) { + Some(addr) + } else { + None + } + } + + /// Returns `true` if the given address is within this region. + fn address_in_range(&self, addr: MemoryRegionAddress) -> bool { + addr.raw_value() < self.len() + } + + /// Returns the address plus the offset if it is in this region. + fn checked_offset( + &self, + base: MemoryRegionAddress, + offset: usize, + ) -> Option { + base.checked_add(offset as u64) + .and_then(|addr| self.check_address(addr)) + } + + /// Tries to convert an absolute address to a relative address within this region. + /// + /// Returns `None` if `addr` is out of the bounds of this region. + fn to_region_addr(&self, addr: GuestAddress) -> Option { + addr.checked_offset_from(self.start_addr()) + .and_then(|offset| self.check_address(MemoryRegionAddress(offset))) + } + + /// Returns the host virtual address corresponding to the region address. + /// + /// Some [`GuestMemory`](trait.GuestMemory.html) implementations, like `GuestMemoryMmap`, + /// have the capability to mmap guest address range into host virtual address space for + /// direct access, so the corresponding host virtual address may be passed to other subsystems. + /// + /// # Note + /// The underlying guest memory is not protected from memory aliasing, which breaks the + /// Rust memory safety model. It's the caller's responsibility to ensure that there's no + /// concurrent accesses to the underlying guest memory. + fn get_host_address(&self, _addr: MemoryRegionAddress) -> Result<*mut u8> { + Err(Error::HostAddressNotAvailable) + } + + /// Returns information regarding the file and offset backing this memory region. + fn file_offset(&self) -> Option<&FileOffset> { + None + } + + /// Returns a [`VolatileSlice`](struct.VolatileSlice.html) of `count` bytes starting at + /// `offset`. + #[allow(unused_variables)] + fn get_slice( + &self, + offset: MemoryRegionAddress, + count: usize, + ) -> Result>> { + Err(Error::HostAddressNotAvailable) + } + + /// Gets a slice of memory for the entire region that supports volatile access. + /// + /// # Examples (uses the `backend-mmap` feature) + /// + /// ``` + /// # #[cfg(feature = "backend-mmap")] + /// # { + /// # use vm_memory::{GuestAddress, MmapRegion, GuestRegionMmap, GuestMemoryRegion}; + /// # use vm_memory::volatile_memory::{VolatileMemory, VolatileSlice, VolatileRef}; + /// # + /// let region = GuestRegionMmap::<()>::from_range(GuestAddress(0x0), 0x400, None) + /// .expect("Could not create guest memory"); + /// let slice = region + /// .as_volatile_slice() + /// .expect("Could not get volatile slice"); + /// + /// let v = 42u32; + /// let r = slice + /// .get_ref::(0x200) + /// .expect("Could not get reference"); + /// r.store(v); + /// assert_eq!(r.load(), v); + /// # } + /// ``` + fn as_volatile_slice(&self) -> Result>> { + self.get_slice(MemoryRegionAddress(0), self.len() as usize) + } + + /// Show if the region is based on the `HugeTLBFS`. + /// Returns Some(true) if the region is backed by hugetlbfs. + /// None represents that no information is available. + /// + /// # Examples (uses the `backend-mmap` feature) + /// + /// ``` + /// # #[cfg(feature = "backend-mmap")] + /// # { + /// # use vm_memory::{GuestAddress, GuestMemory, GuestMemoryMmap, GuestRegionMmap}; + /// let addr = GuestAddress(0x1000); + /// let mem = GuestMemoryMmap::<()>::from_ranges(&[(addr, 0x1000)]).unwrap(); + /// let r = mem.find_region(addr).unwrap(); + /// assert_eq!(r.is_hugetlbfs(), None); + /// # } + /// ``` + #[cfg(target_os = "linux")] + fn is_hugetlbfs(&self) -> Option { + None + } +} From 454a0def11f6099e853e7af79aa60aca1850fe77 Mon Sep 17 00:00:00 2001 From: Patrick Roy Date: Wed, 5 Feb 2025 11:29:53 +0000 Subject: [PATCH 04/10] Generalize GuestMemoryMmap to arbitrary GuestMemoryRegions Add the concept of a `GuestRegionCollection`, which just manages a list of some `GuestMemoryRegion` impls. Functionality wise, it offers the same implementations as `GuestMemoryMmap`. As a result, turn `GuestMemoryMmap` into a type alias for `GuestRegionCollection` with a fixed `R = GuestRegionMmap`. Error types get cleaned up in a follow up commit, the messy state of GuestRegionError containing both collection and region constrution specific errors is transitory. Signed-off-by: Patrick Roy --- CHANGELOG.md | 2 + src/lib.rs | 4 +- src/mmap/mod.rs | 181 +++++++----------------------------------------- src/region.rs | 162 ++++++++++++++++++++++++++++++++++++++++++- 4 files changed, 190 insertions(+), 159 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c07dc1f6..9b905592 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,8 @@ ### Added - \[[#311](https://github.com/rust-vmm/vm-memory/pull/311)\] Allow compiling without the ReadVolatile and WriteVolatile implementations +- \[[#312](https://github.com/rust-vmm/vm-memory/pull/312)\] `GuestRegionContainer`, a generic container of `GuestMemoryRegion`s, generalizing `GuestMemoryMmap` (which + is now a type alias for `GuestRegionContainer`). ### Changed diff --git a/src/lib.rs b/src/lib.rs index 886602a3..d89f8459 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -51,7 +51,7 @@ pub use guest_memory::{ }; pub mod region; -pub use region::GuestMemoryRegion; +pub use region::{GuestMemoryRegion, GuestRegionCollection, GuestRegionError as Error}; pub mod io; pub use io::{ReadVolatile, WriteVolatile}; @@ -60,7 +60,7 @@ pub use io::{ReadVolatile, WriteVolatile}; pub mod mmap; #[cfg(feature = "backend-mmap")] -pub use mmap::{Error, GuestMemoryMmap, GuestRegionMmap, MmapRegion}; +pub use mmap::{GuestMemoryMmap, GuestRegionMmap, MmapRegion}; #[cfg(all(feature = "backend-mmap", feature = "xen", target_family = "unix"))] pub use mmap::{MmapRange, MmapXenFlags}; diff --git a/src/mmap/mod.rs b/src/mmap/mod.rs index 603be3c5..99139135 100644 --- a/src/mmap/mod.rs +++ b/src/mmap/mod.rs @@ -16,16 +16,13 @@ use std::borrow::Borrow; use std::ops::Deref; use std::result; use std::sync::atomic::Ordering; -use std::sync::Arc; use crate::address::Address; use crate::bitmap::{Bitmap, BS}; -use crate::guest_memory::{ - self, FileOffset, GuestAddress, GuestMemory, GuestUsize, MemoryRegionAddress, -}; +use crate::guest_memory::{self, FileOffset, GuestAddress, GuestUsize, MemoryRegionAddress}; use crate::region::GuestMemoryRegion; use crate::volatile_memory::{VolatileMemory, VolatileSlice}; -use crate::{AtomicAccess, Bytes, ReadVolatile, WriteVolatile}; +use crate::{AtomicAccess, Bytes, Error, GuestRegionCollection, ReadVolatile, WriteVolatile}; // re-export for backward compat, as the trait used to be defined in mmap.rs pub use crate::bitmap::NewBitmap; @@ -50,27 +47,6 @@ pub use std::io::Error as MmapRegionError; #[cfg(target_family = "windows")] pub use windows::MmapRegion; -/// Errors that can occur when creating a memory map. -#[derive(Debug, thiserror::Error)] -pub enum Error { - /// Adding the guest base address to the length of the underlying mapping resulted - /// in an overflow. - #[error("Adding the guest base address to the length of the underlying mapping resulted in an overflow")] - InvalidGuestRegion, - /// Error creating a `MmapRegion` object. - #[error("{0}")] - MmapRegion(MmapRegionError), - /// No memory region found. - #[error("No memory region found")] - NoMemoryRegion, - /// Some of the memory regions intersect with each other. - #[error("Some of the memory regions intersect with each other")] - MemoryRegionOverlap, - /// The provided memory regions haven't been sorted. - #[error("The provided memory regions haven't been sorted")] - UnsortedMemoryRegions, -} - /// [`GuestMemoryRegion`](trait.GuestMemoryRegion.html) implementation that mmaps the guest's /// memory region in the current process. /// @@ -339,17 +315,9 @@ impl GuestMemoryRegion for GuestRegionMmap { /// Represents the entire physical memory of the guest by tracking all its memory regions. /// Each region is an instance of `GuestRegionMmap`, being backed by a mapping in the /// virtual address space of the calling process. -#[derive(Clone, Debug, Default)] -pub struct GuestMemoryMmap { - regions: Vec>>, -} +pub type GuestMemoryMmap = GuestRegionCollection>; impl GuestMemoryMmap { - /// Creates an empty `GuestMemoryMmap` instance. - pub fn new() -> Self { - Self::default() - } - /// Creates a container and allocates anonymous memory for guest memory regions. /// /// Valid memory regions are specified as a slice of (Address, Size) tuples sorted by Address. @@ -377,111 +345,6 @@ impl GuestMemoryMmap { } } -impl GuestMemoryMmap { - /// Creates a new `GuestMemoryMmap` from a vector of regions. - /// - /// # Arguments - /// - /// * `regions` - The vector of regions. - /// The regions shouldn't overlap and they should be sorted - /// by the starting address. - pub fn from_regions(mut regions: Vec>) -> result::Result { - Self::from_arc_regions(regions.drain(..).map(Arc::new).collect()) - } - - /// Creates a new `GuestMemoryMmap` from a vector of Arc regions. - /// - /// Similar to the constructor `from_regions()` as it returns a - /// `GuestMemoryMmap`. The need for this constructor is to provide a way for - /// consumer of this API to create a new `GuestMemoryMmap` based on existing - /// regions coming from an existing `GuestMemoryMmap` instance. - /// - /// # Arguments - /// - /// * `regions` - The vector of `Arc` regions. - /// The regions shouldn't overlap and they should be sorted - /// by the starting address. - pub fn from_arc_regions(regions: Vec>>) -> result::Result { - if regions.is_empty() { - return Err(Error::NoMemoryRegion); - } - - for window in regions.windows(2) { - let prev = &window[0]; - let next = &window[1]; - - if prev.start_addr() > next.start_addr() { - return Err(Error::UnsortedMemoryRegions); - } - - if prev.last_addr() >= next.start_addr() { - return Err(Error::MemoryRegionOverlap); - } - } - - Ok(Self { regions }) - } - - /// Insert a region into the `GuestMemoryMmap` object and return a new `GuestMemoryMmap`. - /// - /// # Arguments - /// * `region`: the memory region to insert into the guest memory object. - pub fn insert_region( - &self, - region: Arc>, - ) -> result::Result, Error> { - let mut regions = self.regions.clone(); - regions.push(region); - regions.sort_by_key(|x| x.start_addr()); - - Self::from_arc_regions(regions) - } - - /// Remove a region into the `GuestMemoryMmap` object and return a new `GuestMemoryMmap` - /// on success, together with the removed region. - /// - /// # Arguments - /// * `base`: base address of the region to be removed - /// * `size`: size of the region to be removed - pub fn remove_region( - &self, - base: GuestAddress, - size: GuestUsize, - ) -> result::Result<(GuestMemoryMmap, Arc>), Error> { - if let Ok(region_index) = self.regions.binary_search_by_key(&base, |x| x.start_addr()) { - if self.regions.get(region_index).unwrap().mapping.size() as GuestUsize == size { - let mut regions = self.regions.clone(); - let region = regions.remove(region_index); - return Ok((Self { regions }, region)); - } - } - - Err(Error::InvalidGuestRegion) - } -} - -impl GuestMemory for GuestMemoryMmap { - type R = GuestRegionMmap; - - fn num_regions(&self) -> usize { - self.regions.len() - } - - fn find_region(&self, addr: GuestAddress) -> Option<&GuestRegionMmap> { - let index = match self.regions.binary_search_by_key(&addr, |x| x.start_addr()) { - Ok(x) => Some(x), - // Within the closest region with starting address < addr - Err(x) if (x > 0 && addr <= self.regions[x - 1].last_addr()) => Some(x - 1), - _ => None, - }; - index.map(|x| self.regions[x].as_ref()) - } - - fn iter(&self) -> impl Iterator { - self.regions.iter().map(AsRef::as_ref) - } -} - #[cfg(test)] mod tests { #![allow(clippy::undocumented_unsafe_blocks)] @@ -491,16 +354,17 @@ mod tests { use crate::bitmap::tests::test_guest_memory_and_region; use crate::bitmap::AtomicBitmap; - use crate::GuestAddressSpace; + use crate::{Error, GuestAddressSpace, GuestMemory}; use std::io::Write; use std::mem; + use std::sync::Arc; #[cfg(feature = "rawfd")] use std::{fs::File, path::Path}; use vmm_sys_util::tempfile::TempFile; - type GuestMemoryMmap = super::GuestMemoryMmap<()>; type GuestRegionMmap = super::GuestRegionMmap<()>; + type GuestMemoryMmap = super::GuestRegionCollection; type MmapRegion = super::MmapRegion<()>; #[test] @@ -525,9 +389,8 @@ mod tests { } assert_eq!(guest_mem.last_addr(), last_addr); } - for ((region_addr, region_size), mmap) in expected_regions_summary - .iter() - .zip(guest_mem.regions.iter()) + for ((region_addr, region_size), mmap) in + expected_regions_summary.iter().zip(guest_mem.iter()) { assert_eq!(region_addr, &mmap.guest_base); assert_eq!(region_size, &mmap.mapping.size()); @@ -718,7 +581,7 @@ mod tests { let regions_summary = [(GuestAddress(0), 100_usize), (GuestAddress(100), 100_usize)]; let guest_mem = GuestMemoryMmap::new(); - assert_eq!(guest_mem.regions.len(), 0); + assert_eq!(guest_mem.num_regions(), 0); check_guest_memory_mmap(new_guest_memory_mmap(®ions_summary), ®ions_summary); @@ -1056,8 +919,10 @@ mod tests { .map(|x| (x.0, x.1)) .eq(iterated_regions.iter().copied())); - assert_eq!(gm.regions[0].guest_base, regions[0].0); - assert_eq!(gm.regions[1].guest_base, regions[1].0); + let mmap_regions = gm.iter().collect::>(); + + assert_eq!(mmap_regions[0].guest_base, regions[0].0); + assert_eq!(mmap_regions[1].guest_base, regions[1].0); } #[test] @@ -1085,8 +950,10 @@ mod tests { .map(|x| (x.0, x.1)) .eq(iterated_regions.iter().copied())); - assert_eq!(gm.regions[0].guest_base, regions[0].0); - assert_eq!(gm.regions[1].guest_base, regions[1].0); + let mmap_regions = gm.iter().collect::>(); + + assert_eq!(mmap_regions[0].guest_base, regions[0].0); + assert_eq!(mmap_regions[1].guest_base, regions[1].0); } #[test] @@ -1195,11 +1062,13 @@ mod tests { assert_eq!(mem_orig.num_regions(), 2); assert_eq!(gm.num_regions(), 5); - assert_eq!(gm.regions[0].start_addr(), GuestAddress(0x0000)); - assert_eq!(gm.regions[1].start_addr(), GuestAddress(0x4000)); - assert_eq!(gm.regions[2].start_addr(), GuestAddress(0x8000)); - assert_eq!(gm.regions[3].start_addr(), GuestAddress(0xc000)); - assert_eq!(gm.regions[4].start_addr(), GuestAddress(0x10_0000)); + let regions = gm.iter().collect::>(); + + assert_eq!(regions[0].start_addr(), GuestAddress(0x0000)); + assert_eq!(regions[1].start_addr(), GuestAddress(0x4000)); + assert_eq!(regions[2].start_addr(), GuestAddress(0x8000)); + assert_eq!(regions[3].start_addr(), GuestAddress(0xc000)); + assert_eq!(regions[4].start_addr(), GuestAddress(0x10_0000)); } #[test] @@ -1220,7 +1089,7 @@ mod tests { assert_eq!(mem_orig.num_regions(), 2); assert_eq!(gm.num_regions(), 1); - assert_eq!(gm.regions[0].start_addr(), GuestAddress(0x0000)); + assert_eq!(gm.iter().next().unwrap().start_addr(), GuestAddress(0x0000)); assert_eq!(region.start_addr(), GuestAddress(0x10_0000)); } diff --git a/src/region.rs b/src/region.rs index 35160bd8..2cb4818c 100644 --- a/src/region.rs +++ b/src/region.rs @@ -4,8 +4,10 @@ use crate::bitmap::{Bitmap, BS}; use crate::guest_memory::Error; use crate::guest_memory::Result; use crate::{ - Address, Bytes, FileOffset, GuestAddress, GuestUsize, MemoryRegionAddress, VolatileSlice, + Address, Bytes, FileOffset, GuestAddress, GuestMemory, GuestUsize, MemoryRegionAddress, + VolatileSlice, }; +use std::sync::Arc; /// Represents a continuous region of guest physical memory. #[allow(clippy::len_without_is_empty)] @@ -139,3 +141,161 @@ pub trait GuestMemoryRegion: Bytes { None } } + +/// Errors that can occur when dealing with [`GuestRegion`]s, or collections thereof +#[derive(Debug, thiserror::Error)] +pub enum GuestRegionError { + /// Adding the guest base address to the length of the underlying mapping resulted + /// in an overflow. + #[error("Adding the guest base address to the length of the underlying mapping resulted in an overflow")] + #[cfg(feature = "backend-mmap")] + InvalidGuestRegion, + /// Error creating a `MmapRegion` object. + #[error("{0}")] + #[cfg(feature = "backend-mmap")] + MmapRegion(crate::mmap::MmapRegionError), + /// No memory region found. + #[error("No memory region found")] + NoMemoryRegion, + /// Some of the memory regions intersect with each other. + #[error("Some of the memory regions intersect with each other")] + MemoryRegionOverlap, + /// The provided memory regions haven't been sorted. + #[error("The provided memory regions haven't been sorted")] + UnsortedMemoryRegions, +} + +/// [`GuestMemory`](trait.GuestMemory.html) implementation based on a homogeneous collection +/// of [`GuestMemoryRegion`] implementations. +/// +/// Represents a sorted set of non-overlapping physical guest memory regions. +#[derive(Debug)] +pub struct GuestRegionCollection { + regions: Vec>, +} + +impl Default for GuestRegionCollection { + fn default() -> Self { + Self { + regions: Vec::new(), + } + } +} + +impl Clone for GuestRegionCollection { + fn clone(&self) -> Self { + GuestRegionCollection { + regions: self.regions.iter().map(Arc::clone).collect(), + } + } +} + +impl GuestRegionCollection { + /// Creates an empty `GuestMemoryMmap` instance. + pub fn new() -> Self { + Self::default() + } + + /// Creates a new [`GuestRegionCollection`] from a vector of regions. + /// + /// # Arguments + /// + /// * `regions` - The vector of regions. + /// The regions shouldn't overlap, and they should be sorted + /// by the starting address. + pub fn from_regions(mut regions: Vec) -> std::result::Result { + Self::from_arc_regions(regions.drain(..).map(Arc::new).collect()) + } + + /// Creates a new [`GuestRegionCollection`] from a vector of Arc regions. + /// + /// Similar to the constructor `from_regions()` as it returns a + /// [`GuestRegionCollection`]. The need for this constructor is to provide a way for + /// consumer of this API to create a new [`GuestRegionCollection`] based on existing + /// regions coming from an existing [`GuestRegionCollection`] instance. + /// + /// # Arguments + /// + /// * `regions` - The vector of `Arc` regions. + /// The regions shouldn't overlap and they should be sorted + /// by the starting address. + pub fn from_arc_regions(regions: Vec>) -> std::result::Result { + if regions.is_empty() { + return Err(GuestRegionError::NoMemoryRegion); + } + + for window in regions.windows(2) { + let prev = &window[0]; + let next = &window[1]; + + if prev.start_addr() > next.start_addr() { + return Err(GuestRegionError::UnsortedMemoryRegions); + } + + if prev.last_addr() >= next.start_addr() { + return Err(GuestRegionError::MemoryRegionOverlap); + } + } + + Ok(Self { regions }) + } + + /// Insert a region into the `GuestMemoryMmap` object and return a new `GuestMemoryMmap`. + /// + /// # Arguments + /// * `region`: the memory region to insert into the guest memory object. + pub fn insert_region( + &self, + region: Arc, + ) -> std::result::Result, GuestRegionError> { + let mut regions = self.regions.clone(); + regions.push(region); + regions.sort_by_key(|x| x.start_addr()); + + Self::from_arc_regions(regions) + } + + /// Remove a region from the [`GuestRegionCollection`] object and return a new `GuestRegionCollection` + /// on success, together with the removed region. + /// + /// # Arguments + /// * `base`: base address of the region to be removed + /// * `size`: size of the region to be removed + pub fn remove_region( + &self, + base: GuestAddress, + size: GuestUsize, + ) -> std::result::Result<(GuestRegionCollection, Arc), GuestRegionError> { + if let Ok(region_index) = self.regions.binary_search_by_key(&base, |x| x.start_addr()) { + if self.regions.get(region_index).unwrap().len() == size { + let mut regions = self.regions.clone(); + let region = regions.remove(region_index); + return Ok((Self { regions }, region)); + } + } + + Err(GuestRegionError::NoMemoryRegion) + } +} + +impl GuestMemory for GuestRegionCollection { + type R = R; + + fn num_regions(&self) -> usize { + self.regions.len() + } + + fn find_region(&self, addr: GuestAddress) -> Option<&R> { + let index = match self.regions.binary_search_by_key(&addr, |x| x.start_addr()) { + Ok(x) => Some(x), + // Within the closest region with starting address < addr + Err(x) if (x > 0 && addr <= self.regions[x - 1].last_addr()) => Some(x - 1), + _ => None, + }; + index.map(|x| self.regions[x].as_ref()) + } + + fn iter(&self) -> impl Iterator { + self.regions.iter().map(AsRef::as_ref) + } +} From e8f785929fccc7795b426d1a75d1997a07276126 Mon Sep 17 00:00:00 2001 From: Patrick Roy Date: Wed, 5 Feb 2025 13:25:13 +0000 Subject: [PATCH 05/10] refactor: use `matches!` instead of to_string() for tests Some tests that were explicitly testing for error conditions used converted errors to strings to determine whether two errors are the same (by saying they're only the same if their string representation was identical). Replace this with more roboust assertions on `matches!`. Signed-off-by: Patrick Roy --- CHANGELOG.md | 2 + src/lib.rs | 4 +- src/mmap/mod.rs | 335 ++++++++--------------------------------- src/mmap/unix.rs | 4 +- src/mmap/xen.rs | 30 ++-- src/region.rs | 189 ++++++++++++++++++++++- src/volatile_memory.rs | 52 ------- 7 files changed, 261 insertions(+), 355 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9b905592..dd0d5843 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,8 @@ `write_volatile_to` and `write_all_volatile_to` functions from the `GuestMemory` trait to the `Bytes` trait. - \[[#312](https://github.com/rust-vmm/vm-memory/pull/312)\]: Give `GuestMemory::find_region` and `GuestMemory::num_regions` a default implementation, based on linear search. +- \[[#312](https://github.com/rust-vmm/vm-memory/pull/312)\]: Provide a marker trait, `GuestMemoryRegionBytes`, which enables a default implementation of `Bytes` + for a `GuestMemoryRegion` if implemented. - \[#324](https:////github.com/rust-vmm/vm-memory/pull/324)\] `GuestMemoryRegion::bitmap()` now returns a `BitmapSlice`. Accessing the full bitmap is now possible only if the type of the memory region is know, for example with `MmapRegion::bitmap()`. diff --git a/src/lib.rs b/src/lib.rs index d89f8459..0b2b572a 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -51,7 +51,9 @@ pub use guest_memory::{ }; pub mod region; -pub use region::{GuestMemoryRegion, GuestRegionCollection, GuestRegionError as Error}; +pub use region::{ + GuestMemoryRegion, GuestMemoryRegionBytes, GuestRegionCollection, GuestRegionError as Error, +}; pub mod io; pub use io::{ReadVolatile, WriteVolatile}; diff --git a/src/mmap/mod.rs b/src/mmap/mod.rs index 99139135..dcbe2573 100644 --- a/src/mmap/mod.rs +++ b/src/mmap/mod.rs @@ -15,14 +15,13 @@ use std::borrow::Borrow; use std::ops::Deref; use std::result; -use std::sync::atomic::Ordering; use crate::address::Address; use crate::bitmap::{Bitmap, BS}; use crate::guest_memory::{self, FileOffset, GuestAddress, GuestUsize, MemoryRegionAddress}; -use crate::region::GuestMemoryRegion; +use crate::region::{GuestMemoryRegion, GuestMemoryRegionBytes}; use crate::volatile_memory::{VolatileMemory, VolatileSlice}; -use crate::{AtomicAccess, Bytes, Error, GuestRegionCollection, ReadVolatile, WriteVolatile}; +use crate::{Error, GuestRegionCollection}; // re-export for backward compat, as the trait used to be defined in mmap.rs pub use crate::bitmap::NewBitmap; @@ -115,154 +114,6 @@ impl GuestRegionMmap { } } -impl Bytes for GuestRegionMmap { - type E = guest_memory::Error; - - /// # Examples - /// * Write a slice at guest address 0x1200. - /// - /// ``` - /// # use vm_memory::{Bytes, GuestAddress, GuestMemoryMmap}; - /// # - /// # let start_addr = GuestAddress(0x1000); - /// # let mut gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x400)]) - /// # .expect("Could not create guest memory"); - /// # - /// let res = gm - /// .write(&[1, 2, 3, 4, 5], GuestAddress(0x1200)) - /// .expect("Could not write to guest memory"); - /// assert_eq!(5, res); - /// ``` - fn write(&self, buf: &[u8], addr: MemoryRegionAddress) -> guest_memory::Result { - let maddr = addr.raw_value() as usize; - self.as_volatile_slice() - .unwrap() - .write(buf, maddr) - .map_err(Into::into) - } - - /// # Examples - /// * Read a slice of length 16 at guestaddress 0x1200. - /// - /// ``` - /// # use vm_memory::{Bytes, GuestAddress, GuestMemoryMmap}; - /// # - /// # let start_addr = GuestAddress(0x1000); - /// # let mut gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x400)]) - /// # .expect("Could not create guest memory"); - /// # - /// let buf = &mut [0u8; 16]; - /// let res = gm - /// .read(buf, GuestAddress(0x1200)) - /// .expect("Could not read from guest memory"); - /// assert_eq!(16, res); - /// ``` - fn read(&self, buf: &mut [u8], addr: MemoryRegionAddress) -> guest_memory::Result { - let maddr = addr.raw_value() as usize; - self.as_volatile_slice() - .unwrap() - .read(buf, maddr) - .map_err(Into::into) - } - - fn write_slice(&self, buf: &[u8], addr: MemoryRegionAddress) -> guest_memory::Result<()> { - let maddr = addr.raw_value() as usize; - self.as_volatile_slice() - .unwrap() - .write_slice(buf, maddr) - .map_err(Into::into) - } - - fn read_slice(&self, buf: &mut [u8], addr: MemoryRegionAddress) -> guest_memory::Result<()> { - let maddr = addr.raw_value() as usize; - self.as_volatile_slice() - .unwrap() - .read_slice(buf, maddr) - .map_err(Into::into) - } - - fn read_volatile_from( - &self, - addr: MemoryRegionAddress, - src: &mut F, - count: usize, - ) -> Result - where - F: ReadVolatile, - { - self.as_volatile_slice() - .unwrap() - .read_volatile_from(addr.0 as usize, src, count) - .map_err(Into::into) - } - - fn read_exact_volatile_from( - &self, - addr: MemoryRegionAddress, - src: &mut F, - count: usize, - ) -> Result<(), Self::E> - where - F: ReadVolatile, - { - self.as_volatile_slice() - .unwrap() - .read_exact_volatile_from(addr.0 as usize, src, count) - .map_err(Into::into) - } - - fn write_volatile_to( - &self, - addr: MemoryRegionAddress, - dst: &mut F, - count: usize, - ) -> Result - where - F: WriteVolatile, - { - self.as_volatile_slice() - .unwrap() - .write_volatile_to(addr.0 as usize, dst, count) - .map_err(Into::into) - } - - fn write_all_volatile_to( - &self, - addr: MemoryRegionAddress, - dst: &mut F, - count: usize, - ) -> Result<(), Self::E> - where - F: WriteVolatile, - { - self.as_volatile_slice() - .unwrap() - .write_all_volatile_to(addr.0 as usize, dst, count) - .map_err(Into::into) - } - - fn store( - &self, - val: T, - addr: MemoryRegionAddress, - order: Ordering, - ) -> guest_memory::Result<()> { - self.as_volatile_slice().and_then(|s| { - s.store(val, addr.raw_value() as usize, order) - .map_err(Into::into) - }) - } - - fn load( - &self, - addr: MemoryRegionAddress, - order: Ordering, - ) -> guest_memory::Result { - self.as_volatile_slice() - .and_then(|s| s.load(addr.raw_value() as usize, order).map_err(Into::into)) - } -} - impl GuestMemoryRegion for GuestRegionMmap { type B = B; @@ -309,6 +160,8 @@ impl GuestMemoryRegion for GuestRegionMmap { } } +impl GuestMemoryRegionBytes for GuestRegionMmap {} + /// [`GuestMemory`](trait.GuestMemory.html) implementation that mmaps the guest's memory /// in the current process. /// @@ -354,7 +207,7 @@ mod tests { use crate::bitmap::tests::test_guest_memory_and_region; use crate::bitmap::AtomicBitmap; - use crate::{Error, GuestAddressSpace, GuestMemory}; + use crate::{Bytes, Error, GuestAddressSpace, GuestMemory, GuestMemoryError}; use std::io::Write; use std::mem; @@ -451,129 +304,66 @@ mod tests { fn test_no_memory_region() { let regions_summary = []; - assert_eq!( - format!( - "{:?}", - new_guest_memory_mmap(®ions_summary).err().unwrap() - ), - format!("{:?}", Error::NoMemoryRegion) - ); - - assert_eq!( - format!( - "{:?}", - new_guest_memory_mmap_with_files(®ions_summary) - .err() - .unwrap() - ), - format!("{:?}", Error::NoMemoryRegion) - ); - - assert_eq!( - format!( - "{:?}", - new_guest_memory_mmap_from_regions(®ions_summary) - .err() - .unwrap() - ), - format!("{:?}", Error::NoMemoryRegion) - ); - - assert_eq!( - format!( - "{:?}", - new_guest_memory_mmap_from_arc_regions(®ions_summary) - .err() - .unwrap() - ), - format!("{:?}", Error::NoMemoryRegion) - ); + assert!(matches!( + new_guest_memory_mmap(®ions_summary).unwrap_err(), + Error::NoMemoryRegion + )); + assert!(matches!( + new_guest_memory_mmap_with_files(®ions_summary).unwrap_err(), + Error::NoMemoryRegion + )); + assert!(matches!( + new_guest_memory_mmap_from_regions(®ions_summary).unwrap_err(), + Error::NoMemoryRegion + )); + assert!(matches!( + new_guest_memory_mmap_from_regions(®ions_summary).unwrap_err(), + Error::NoMemoryRegion + )); } #[test] fn test_overlapping_memory_regions() { let regions_summary = [(GuestAddress(0), 100_usize), (GuestAddress(99), 100_usize)]; - assert_eq!( - format!( - "{:?}", - new_guest_memory_mmap(®ions_summary).err().unwrap() - ), - format!("{:?}", Error::MemoryRegionOverlap) - ); - - assert_eq!( - format!( - "{:?}", - new_guest_memory_mmap_with_files(®ions_summary) - .err() - .unwrap() - ), - format!("{:?}", Error::MemoryRegionOverlap) - ); - - assert_eq!( - format!( - "{:?}", - new_guest_memory_mmap_from_regions(®ions_summary) - .err() - .unwrap() - ), - format!("{:?}", Error::MemoryRegionOverlap) - ); - - assert_eq!( - format!( - "{:?}", - new_guest_memory_mmap_from_arc_regions(®ions_summary) - .err() - .unwrap() - ), - format!("{:?}", Error::MemoryRegionOverlap) - ); + assert!(matches!( + new_guest_memory_mmap(®ions_summary).unwrap_err(), + Error::MemoryRegionOverlap + )); + assert!(matches!( + new_guest_memory_mmap_with_files(®ions_summary).unwrap_err(), + Error::MemoryRegionOverlap + )); + assert!(matches!( + new_guest_memory_mmap_from_regions(®ions_summary).unwrap_err(), + Error::MemoryRegionOverlap + )); + assert!(matches!( + new_guest_memory_mmap_from_regions(®ions_summary).unwrap_err(), + Error::MemoryRegionOverlap + )); } #[test] fn test_unsorted_memory_regions() { let regions_summary = [(GuestAddress(100), 100_usize), (GuestAddress(0), 100_usize)]; - assert_eq!( - format!( - "{:?}", - new_guest_memory_mmap(®ions_summary).err().unwrap() - ), - format!("{:?}", Error::UnsortedMemoryRegions) - ); - - assert_eq!( - format!( - "{:?}", - new_guest_memory_mmap_with_files(®ions_summary) - .err() - .unwrap() - ), - format!("{:?}", Error::UnsortedMemoryRegions) - ); - - assert_eq!( - format!( - "{:?}", - new_guest_memory_mmap_from_regions(®ions_summary) - .err() - .unwrap() - ), - format!("{:?}", Error::UnsortedMemoryRegions) - ); - - assert_eq!( - format!( - "{:?}", - new_guest_memory_mmap_from_arc_regions(®ions_summary) - .err() - .unwrap() - ), - format!("{:?}", Error::UnsortedMemoryRegions) - ); + assert!(matches!( + new_guest_memory_mmap(®ions_summary).unwrap_err(), + Error::UnsortedMemoryRegions + )); + assert!(matches!( + new_guest_memory_mmap_with_files(®ions_summary).unwrap_err(), + Error::UnsortedMemoryRegions + )); + assert!(matches!( + new_guest_memory_mmap_from_regions(®ions_summary).unwrap_err(), + Error::UnsortedMemoryRegions + )); + assert!(matches!( + new_guest_memory_mmap_from_regions(®ions_summary).unwrap_err(), + Error::UnsortedMemoryRegions + )); } #[test] @@ -798,18 +588,13 @@ mod tests { for gm in gm_list.iter() { let val1: u64 = 0xaa55_aa55_aa55_aa55; let val2: u64 = 0x55aa_55aa_55aa_55aa; - assert_eq!( - format!("{:?}", gm.write_obj(val1, bad_addr).err().unwrap()), - format!("InvalidGuestAddress({:?})", bad_addr,) - ); - assert_eq!( - format!("{:?}", gm.write_obj(val1, bad_addr2).err().unwrap()), - format!( - "PartialBuffer {{ expected: {:?}, completed: {:?} }}", - mem::size_of::(), - max_addr.checked_offset_from(bad_addr2).unwrap() - ) - ); + assert!(matches!( + gm.write_obj(val1, bad_addr).unwrap_err(), + GuestMemoryError::InvalidGuestAddress(addr) if addr == bad_addr + )); + assert!(matches!( + gm.write_obj(val1, bad_addr2).unwrap_err(), + GuestMemoryError::PartialBuffer { expected, completed} if expected == size_of::() && completed == max_addr.checked_offset_from(bad_addr2).unwrap() as usize)); gm.write_obj(val1, GuestAddress(0x500)).unwrap(); gm.write_obj(val2, GuestAddress(0x1000 + 32)).unwrap(); diff --git a/src/mmap/unix.rs b/src/mmap/unix.rs index b5ee5b04..d130f62b 100644 --- a/src/mmap/unix.rs +++ b/src/mmap/unix.rs @@ -561,7 +561,7 @@ mod tests { prot, flags | libc::MAP_FIXED, ); - assert_eq!(format!("{:?}", r.unwrap_err()), "MapFixed"); + assert!(matches!(r.unwrap_err(), Error::MapFixed)); // Let's resize the file. assert_eq!(unsafe { libc::ftruncate(a.as_raw_fd(), 1024 * 10) }, 0); @@ -606,7 +606,7 @@ mod tests { let flags = libc::MAP_NORESERVE | libc::MAP_PRIVATE; let r = unsafe { MmapRegion::build_raw((addr + 1) as *mut u8, size, prot, flags) }; - assert_eq!(format!("{:?}", r.unwrap_err()), "InvalidPointer"); + assert!(matches!(r.unwrap_err(), Error::InvalidPointer)); let r = unsafe { MmapRegion::build_raw(addr as *mut u8, size, prot, flags).unwrap() }; diff --git a/src/mmap/xen.rs b/src/mmap/xen.rs index 0d96dd47..f794aa11 100644 --- a/src/mmap/xen.rs +++ b/src/mmap/xen.rs @@ -1069,26 +1069,18 @@ mod tests { range.mmap_flags = 16; let r = MmapXen::new(&range); - assert_eq!( - format!("{:?}", r.unwrap_err()), - format!("MmapFlags({})", range.mmap_flags), - ); + assert!(matches!(r.unwrap_err(), Error::MmapFlags(flags) if flags == range.mmap_flags)); range.mmap_flags = MmapXenFlags::FOREIGN.bits() | MmapXenFlags::GRANT.bits(); let r = MmapXen::new(&range); - assert_eq!( - format!("{:?}", r.unwrap_err()), - format!("MmapFlags({:x})", MmapXenFlags::ALL.bits()), + assert!( + matches!(r.unwrap_err(), Error::MmapFlags(flags) if flags == MmapXenFlags::ALL.bits()) ); range.mmap_flags = MmapXenFlags::FOREIGN.bits() | MmapXenFlags::NO_ADVANCE_MAP.bits(); let r = MmapXen::new(&range); - assert_eq!( - format!("{:?}", r.unwrap_err()), - format!( - "MmapFlags({:x})", - MmapXenFlags::NO_ADVANCE_MAP.bits() | MmapXenFlags::FOREIGN.bits(), - ), + assert!( + matches!(r.unwrap_err(), Error::MmapFlags(flags) if flags == MmapXenFlags::NO_ADVANCE_MAP.bits() | MmapXenFlags::FOREIGN.bits()) ); } @@ -1124,17 +1116,17 @@ mod tests { range.file_offset = Some(FileOffset::new(TempFile::new().unwrap().into_file(), 0)); range.prot = None; let r = MmapXenForeign::new(&range); - assert_eq!(format!("{:?}", r.unwrap_err()), "UnexpectedError"); + assert!(matches!(r.unwrap_err(), Error::UnexpectedError)); let mut range = MmapRange::initialized(true); range.flags = None; let r = MmapXenForeign::new(&range); - assert_eq!(format!("{:?}", r.unwrap_err()), "UnexpectedError"); + assert!(matches!(r.unwrap_err(), Error::UnexpectedError)); let mut range = MmapRange::initialized(true); range.file_offset = Some(FileOffset::new(TempFile::new().unwrap().into_file(), 1)); let r = MmapXenForeign::new(&range); - assert_eq!(format!("{:?}", r.unwrap_err()), "InvalidOffsetLength"); + assert!(matches!(r.unwrap_err(), Error::InvalidOffsetLength)); let mut range = MmapRange::initialized(true); range.size = 0; @@ -1156,7 +1148,7 @@ mod tests { let mut range = MmapRange::initialized(true); range.prot = None; let r = MmapXenGrant::new(&range, MmapXenFlags::empty()); - assert_eq!(format!("{:?}", r.unwrap_err()), "UnexpectedError"); + assert!(matches!(r.unwrap_err(), Error::UnexpectedError)); let mut range = MmapRange::initialized(true); range.prot = None; @@ -1166,12 +1158,12 @@ mod tests { let mut range = MmapRange::initialized(true); range.flags = None; let r = MmapXenGrant::new(&range, MmapXenFlags::NO_ADVANCE_MAP); - assert_eq!(format!("{:?}", r.unwrap_err()), "UnexpectedError"); + assert!(matches!(r.unwrap_err(), Error::UnexpectedError)); let mut range = MmapRange::initialized(true); range.file_offset = Some(FileOffset::new(TempFile::new().unwrap().into_file(), 1)); let r = MmapXenGrant::new(&range, MmapXenFlags::NO_ADVANCE_MAP); - assert_eq!(format!("{:?}", r.unwrap_err()), "InvalidOffsetLength"); + assert!(matches!(r.unwrap_err(), Error::InvalidOffsetLength)); let mut range = MmapRange::initialized(true); range.size = 0; diff --git a/src/region.rs b/src/region.rs index 2cb4818c..27aea1c8 100644 --- a/src/region.rs +++ b/src/region.rs @@ -1,17 +1,44 @@ //! Module containing abstracts for dealing with contiguous regions of guest memory use crate::bitmap::{Bitmap, BS}; -use crate::guest_memory::Error; use crate::guest_memory::Result; use crate::{ - Address, Bytes, FileOffset, GuestAddress, GuestMemory, GuestUsize, MemoryRegionAddress, - VolatileSlice, + Address, AtomicAccess, Bytes, FileOffset, GuestAddress, GuestMemory, GuestMemoryError, + GuestUsize, MemoryRegionAddress, ReadVolatile, VolatileSlice, WriteVolatile, }; +use std::sync::atomic::Ordering; use std::sync::Arc; /// Represents a continuous region of guest physical memory. +/// +/// Note that the [`Bytes`] super trait requirement can be satisfied by implementing +/// [`GuestMemoryRegionBytes`], which provides a default implementation of `Bytes` +/// for memory regions that are backed by physical RAM: +/// +/// ``` +/// /// +/// use vm_memory::bitmap::BS; +/// use vm_memory::{GuestAddress, GuestMemoryRegion, GuestMemoryRegionBytes, GuestUsize}; +/// +/// struct MyRegion; +/// +/// impl GuestMemoryRegion for MyRegion { +/// type B = (); +/// fn len(&self) -> GuestUsize { +/// todo!() +/// } +/// fn start_addr(&self) -> GuestAddress { +/// todo!() +/// } +/// fn bitmap(&self) { +/// todo!() +/// } +/// } +/// +/// impl GuestMemoryRegionBytes for MyRegion {} +/// ``` #[allow(clippy::len_without_is_empty)] -pub trait GuestMemoryRegion: Bytes { +pub trait GuestMemoryRegion: Bytes { /// Type used for dirty memory tracking. type B: Bitmap; @@ -73,7 +100,7 @@ pub trait GuestMemoryRegion: Bytes { /// Rust memory safety model. It's the caller's responsibility to ensure that there's no /// concurrent accesses to the underlying guest memory. fn get_host_address(&self, _addr: MemoryRegionAddress) -> Result<*mut u8> { - Err(Error::HostAddressNotAvailable) + Err(GuestMemoryError::HostAddressNotAvailable) } /// Returns information regarding the file and offset backing this memory region. @@ -89,7 +116,7 @@ pub trait GuestMemoryRegion: Bytes { offset: MemoryRegionAddress, count: usize, ) -> Result>> { - Err(Error::HostAddressNotAvailable) + Err(GuestMemoryError::HostAddressNotAvailable) } /// Gets a slice of memory for the entire region that supports volatile access. @@ -299,3 +326,153 @@ impl GuestMemory for GuestRegionCollection { self.regions.iter().map(AsRef::as_ref) } } + +/// A marker trait that if implemented on a type `R` makes available a default +/// implementation of `Bytes` for `R`, based on the assumption +/// that the entire `GuestMemoryRegion` is just traditional memory without any +/// special access requirements. +pub trait GuestMemoryRegionBytes: GuestMemoryRegion {} + +impl Bytes for R { + type E = GuestMemoryError; + + /// # Examples + /// * Write a slice at guest address 0x1200. + /// + /// ``` + /// # #[cfg(feature = "backend-mmap")] + /// # use vm_memory::{Bytes, GuestAddress, GuestMemoryMmap}; + /// # + /// # #[cfg(feature = "backend-mmap")] + /// # { + /// # let start_addr = GuestAddress(0x1000); + /// # let mut gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x400)]) + /// # .expect("Could not create guest memory"); + /// # + /// let res = gm + /// .write(&[1, 2, 3, 4, 5], GuestAddress(0x1200)) + /// .expect("Could not write to guest memory"); + /// assert_eq!(5, res); + /// # } + /// ``` + fn write(&self, buf: &[u8], addr: MemoryRegionAddress) -> Result { + let maddr = addr.raw_value() as usize; + self.as_volatile_slice()? + .write(buf, maddr) + .map_err(Into::into) + } + + /// # Examples + /// * Read a slice of length 16 at guestaddress 0x1200. + /// + /// ``` + /// # #[cfg(feature = "backend-mmap")] + /// # use vm_memory::{Bytes, GuestAddress, GuestMemoryMmap}; + /// # + /// # #[cfg(feature = "backend-mmap")] + /// # { + /// # let start_addr = GuestAddress(0x1000); + /// # let mut gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x400)]) + /// # .expect("Could not create guest memory"); + /// # + /// let buf = &mut [0u8; 16]; + /// let res = gm + /// .read(buf, GuestAddress(0x1200)) + /// .expect("Could not read from guest memory"); + /// assert_eq!(16, res); + /// # } + /// ``` + fn read(&self, buf: &mut [u8], addr: MemoryRegionAddress) -> Result { + let maddr = addr.raw_value() as usize; + self.as_volatile_slice()? + .read(buf, maddr) + .map_err(Into::into) + } + + fn write_slice(&self, buf: &[u8], addr: MemoryRegionAddress) -> Result<()> { + let maddr = addr.raw_value() as usize; + self.as_volatile_slice()? + .write_slice(buf, maddr) + .map_err(Into::into) + } + + fn read_slice(&self, buf: &mut [u8], addr: MemoryRegionAddress) -> Result<()> { + let maddr = addr.raw_value() as usize; + self.as_volatile_slice()? + .read_slice(buf, maddr) + .map_err(Into::into) + } + + fn read_volatile_from( + &self, + addr: MemoryRegionAddress, + src: &mut F, + count: usize, + ) -> Result + where + F: ReadVolatile, + { + self.as_volatile_slice()? + .read_volatile_from(addr.0 as usize, src, count) + .map_err(Into::into) + } + + fn read_exact_volatile_from( + &self, + addr: MemoryRegionAddress, + src: &mut F, + count: usize, + ) -> Result<()> + where + F: ReadVolatile, + { + self.as_volatile_slice()? + .read_exact_volatile_from(addr.0 as usize, src, count) + .map_err(Into::into) + } + + fn write_volatile_to( + &self, + addr: MemoryRegionAddress, + dst: &mut F, + count: usize, + ) -> Result + where + F: WriteVolatile, + { + self.as_volatile_slice()? + .write_volatile_to(addr.0 as usize, dst, count) + .map_err(Into::into) + } + + fn write_all_volatile_to( + &self, + addr: MemoryRegionAddress, + dst: &mut F, + count: usize, + ) -> Result<()> + where + F: WriteVolatile, + { + self.as_volatile_slice()? + .write_all_volatile_to(addr.0 as usize, dst, count) + .map_err(Into::into) + } + + fn store( + &self, + val: T, + addr: MemoryRegionAddress, + order: Ordering, + ) -> Result<()> { + self.as_volatile_slice().and_then(|s| { + s.store(val, addr.raw_value() as usize, order) + .map_err(Into::into) + }) + } + + fn load(&self, addr: MemoryRegionAddress, order: Ordering) -> Result { + self.as_volatile_slice() + .and_then(|s| s.load(addr.raw_value() as usize, order).map_err(Into::into)) + } +} diff --git a/src/volatile_memory.rs b/src/volatile_memory.rs index 43c1d206..acff239b 100644 --- a/src/volatile_memory.rs +++ b/src/volatile_memory.rs @@ -1488,58 +1488,6 @@ mod tests { slice.compute_end_offset(6, 0).unwrap_err(); } - #[test] - fn test_display_error() { - assert_eq!( - format!("{}", Error::OutOfBounds { addr: 0x10 }), - "address 0x10 is out of bounds" - ); - - assert_eq!( - format!( - "{}", - Error::Overflow { - base: 0x0, - offset: 0x10 - } - ), - "address 0x0 offset by 0x10 would overflow" - ); - - assert_eq!( - format!( - "{}", - Error::TooBig { - nelements: 100_000, - size: 1_000_000_000 - } - ), - "100000 elements of size 1000000000 would overflow a usize" - ); - - assert_eq!( - format!( - "{}", - Error::Misaligned { - addr: 0x4, - alignment: 8 - } - ), - "address 0x4 is not aligned to 8" - ); - - assert_eq!( - format!( - "{}", - Error::PartialBuffer { - expected: 100, - completed: 90 - } - ), - "only used 90 bytes in 100 long buffer" - ); - } - #[test] fn misaligned_ref() { let mut a = [0u8; 3]; From 938b9a97a88d37735b73936d95c56760f71d1cb2 Mon Sep 17 00:00:00 2001 From: Patrick Roy Date: Fri, 14 Mar 2025 14:18:10 +0000 Subject: [PATCH 06/10] test: move GuestRegionCollection specific tests to region.rs Move some tests that are all about the invariants of GuestRegionCollection constructors to region.rs, where they can be run without the need for the backend-mmap feature (by instead using a mock memory region). While we're at it, fix these tests calling from_regions twice, but from_arc_regions never. Remove some test cases that are superfluous, because since the `regions` field of `GuestRegionCollection` is private, all construction needs to go through `from_regions`/`from_arc_regions`, and testing that wrappers around these functions uphold the invariants of the wrapped functions is not very useful. test_memory and create_vec_with_regions were the same test, so deduplicate while moving to region.rs. Generally, most of these tests could be moved to region.rs, given sufficient mocking of the memory region. I've somewhat arbitrarily drawn the line at "only transfer tests where the mock only needs GuestAddress and length", which roughly translates to "move tests where we are testing the default implementations of the GuestMemory and GuestMemoryRegion traits, which are not overwritten in the mmap-based implementations". Of course, we could write a mock that implements actual allocation of memory via std::alloc::alloc, but at that point we'd be testing the mock more than actual vm-memory code (and start loosing coverage of the mmap implementations). Signed-off-by: Patrick Roy --- src/mmap/mod.rs | 402 +----------------------------------------------- src/region.rs | 325 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 326 insertions(+), 401 deletions(-) diff --git a/src/mmap/mod.rs b/src/mmap/mod.rs index dcbe2573..32e90188 100644 --- a/src/mmap/mod.rs +++ b/src/mmap/mod.rs @@ -207,11 +207,10 @@ mod tests { use crate::bitmap::tests::test_guest_memory_and_region; use crate::bitmap::AtomicBitmap; - use crate::{Bytes, Error, GuestAddressSpace, GuestMemory, GuestMemoryError}; + use crate::{Bytes, GuestMemory, GuestMemoryError}; use std::io::Write; use std::mem; - use std::sync::Arc; #[cfg(feature = "rawfd")] use std::{fs::File, path::Path}; use vmm_sys_util::tempfile::TempFile; @@ -226,171 +225,6 @@ mod tests { assert_eq!(1024, m.size()); } - fn check_guest_memory_mmap( - maybe_guest_mem: Result, - expected_regions_summary: &[(GuestAddress, usize)], - ) { - assert!(maybe_guest_mem.is_ok()); - - let guest_mem = maybe_guest_mem.unwrap(); - assert_eq!(guest_mem.num_regions(), expected_regions_summary.len()); - let maybe_last_mem_reg = expected_regions_summary.last(); - if let Some((region_addr, region_size)) = maybe_last_mem_reg { - let mut last_addr = region_addr.unchecked_add(*region_size as u64); - if last_addr.raw_value() != 0 { - last_addr = last_addr.unchecked_sub(1); - } - assert_eq!(guest_mem.last_addr(), last_addr); - } - for ((region_addr, region_size), mmap) in - expected_regions_summary.iter().zip(guest_mem.iter()) - { - assert_eq!(region_addr, &mmap.guest_base); - assert_eq!(region_size, &mmap.mapping.size()); - - assert!(guest_mem.find_region(*region_addr).is_some()); - } - } - - fn new_guest_memory_mmap( - regions_summary: &[(GuestAddress, usize)], - ) -> Result { - GuestMemoryMmap::from_ranges(regions_summary) - } - - fn new_guest_memory_mmap_from_regions( - regions_summary: &[(GuestAddress, usize)], - ) -> Result { - GuestMemoryMmap::from_regions( - regions_summary - .iter() - .map(|(region_addr, region_size)| { - GuestRegionMmap::from_range(*region_addr, *region_size, None).unwrap() - }) - .collect(), - ) - } - - fn new_guest_memory_mmap_from_arc_regions( - regions_summary: &[(GuestAddress, usize)], - ) -> Result { - GuestMemoryMmap::from_arc_regions( - regions_summary - .iter() - .map(|(region_addr, region_size)| { - Arc::new(GuestRegionMmap::from_range(*region_addr, *region_size, None).unwrap()) - }) - .collect(), - ) - } - - fn new_guest_memory_mmap_with_files( - regions_summary: &[(GuestAddress, usize)], - ) -> Result { - let regions: Vec<(GuestAddress, usize, Option)> = regions_summary - .iter() - .map(|(region_addr, region_size)| { - let f = TempFile::new().unwrap().into_file(); - f.set_len(*region_size as u64).unwrap(); - - (*region_addr, *region_size, Some(FileOffset::new(f, 0))) - }) - .collect(); - - GuestMemoryMmap::from_ranges_with_files(®ions) - } - - #[test] - fn test_no_memory_region() { - let regions_summary = []; - - assert!(matches!( - new_guest_memory_mmap(®ions_summary).unwrap_err(), - Error::NoMemoryRegion - )); - assert!(matches!( - new_guest_memory_mmap_with_files(®ions_summary).unwrap_err(), - Error::NoMemoryRegion - )); - assert!(matches!( - new_guest_memory_mmap_from_regions(®ions_summary).unwrap_err(), - Error::NoMemoryRegion - )); - assert!(matches!( - new_guest_memory_mmap_from_regions(®ions_summary).unwrap_err(), - Error::NoMemoryRegion - )); - } - - #[test] - fn test_overlapping_memory_regions() { - let regions_summary = [(GuestAddress(0), 100_usize), (GuestAddress(99), 100_usize)]; - - assert!(matches!( - new_guest_memory_mmap(®ions_summary).unwrap_err(), - Error::MemoryRegionOverlap - )); - assert!(matches!( - new_guest_memory_mmap_with_files(®ions_summary).unwrap_err(), - Error::MemoryRegionOverlap - )); - assert!(matches!( - new_guest_memory_mmap_from_regions(®ions_summary).unwrap_err(), - Error::MemoryRegionOverlap - )); - assert!(matches!( - new_guest_memory_mmap_from_regions(®ions_summary).unwrap_err(), - Error::MemoryRegionOverlap - )); - } - - #[test] - fn test_unsorted_memory_regions() { - let regions_summary = [(GuestAddress(100), 100_usize), (GuestAddress(0), 100_usize)]; - - assert!(matches!( - new_guest_memory_mmap(®ions_summary).unwrap_err(), - Error::UnsortedMemoryRegions - )); - assert!(matches!( - new_guest_memory_mmap_with_files(®ions_summary).unwrap_err(), - Error::UnsortedMemoryRegions - )); - assert!(matches!( - new_guest_memory_mmap_from_regions(®ions_summary).unwrap_err(), - Error::UnsortedMemoryRegions - )); - assert!(matches!( - new_guest_memory_mmap_from_regions(®ions_summary).unwrap_err(), - Error::UnsortedMemoryRegions - )); - } - - #[test] - fn test_valid_memory_regions() { - let regions_summary = [(GuestAddress(0), 100_usize), (GuestAddress(100), 100_usize)]; - - let guest_mem = GuestMemoryMmap::new(); - assert_eq!(guest_mem.num_regions(), 0); - - check_guest_memory_mmap(new_guest_memory_mmap(®ions_summary), ®ions_summary); - - check_guest_memory_mmap( - new_guest_memory_mmap_with_files(®ions_summary), - ®ions_summary, - ); - - check_guest_memory_mmap( - new_guest_memory_mmap_from_regions(®ions_summary), - ®ions_summary, - ); - - check_guest_memory_mmap( - new_guest_memory_mmap_from_arc_regions(®ions_summary), - ®ions_summary, - ); - } - #[test] fn slice_addr() { let m = GuestRegionMmap::from_range(GuestAddress(0), 5, None).unwrap(); @@ -416,64 +250,6 @@ mod tests { assert_eq!(buf[0..sample_buf.len()], sample_buf[..]); } - #[test] - fn test_address_in_range() { - let f1 = TempFile::new().unwrap().into_file(); - f1.set_len(0x400).unwrap(); - let f2 = TempFile::new().unwrap().into_file(); - f2.set_len(0x400).unwrap(); - - let start_addr1 = GuestAddress(0x0); - let start_addr2 = GuestAddress(0x800); - let guest_mem = - GuestMemoryMmap::from_ranges(&[(start_addr1, 0x400), (start_addr2, 0x400)]).unwrap(); - let guest_mem_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[ - (start_addr1, 0x400, Some(FileOffset::new(f1, 0))), - (start_addr2, 0x400, Some(FileOffset::new(f2, 0))), - ]) - .unwrap(); - - let guest_mem_list = [guest_mem, guest_mem_backed_by_file]; - for guest_mem in guest_mem_list.iter() { - assert!(guest_mem.address_in_range(GuestAddress(0x200))); - assert!(!guest_mem.address_in_range(GuestAddress(0x600))); - assert!(guest_mem.address_in_range(GuestAddress(0xa00))); - assert!(!guest_mem.address_in_range(GuestAddress(0xc00))); - } - } - - #[test] - fn test_check_address() { - let f1 = TempFile::new().unwrap().into_file(); - f1.set_len(0x400).unwrap(); - let f2 = TempFile::new().unwrap().into_file(); - f2.set_len(0x400).unwrap(); - - let start_addr1 = GuestAddress(0x0); - let start_addr2 = GuestAddress(0x800); - let guest_mem = - GuestMemoryMmap::from_ranges(&[(start_addr1, 0x400), (start_addr2, 0x400)]).unwrap(); - let guest_mem_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[ - (start_addr1, 0x400, Some(FileOffset::new(f1, 0))), - (start_addr2, 0x400, Some(FileOffset::new(f2, 0))), - ]) - .unwrap(); - - let guest_mem_list = [guest_mem, guest_mem_backed_by_file]; - for guest_mem in guest_mem_list.iter() { - assert_eq!( - guest_mem.check_address(GuestAddress(0x200)), - Some(GuestAddress(0x200)) - ); - assert_eq!(guest_mem.check_address(GuestAddress(0x600)), None); - assert_eq!( - guest_mem.check_address(GuestAddress(0xa00)), - Some(GuestAddress(0xa00)) - ); - assert_eq!(guest_mem.check_address(GuestAddress(0xc00)), None); - } - } - #[test] fn test_to_region_addr() { let f1 = TempFile::new().unwrap().into_file(); @@ -680,67 +456,6 @@ mod tests { } } - #[test] - fn create_vec_with_regions() { - let region_size = 0x400; - let regions = vec![ - (GuestAddress(0x0), region_size), - (GuestAddress(0x1000), region_size), - ]; - let mut iterated_regions = Vec::new(); - let gm = GuestMemoryMmap::from_ranges(®ions).unwrap(); - - for region in gm.iter() { - assert_eq!(region.len(), region_size as GuestUsize); - } - - for region in gm.iter() { - iterated_regions.push((region.start_addr(), region.len() as usize)); - } - assert_eq!(regions, iterated_regions); - - assert!(regions - .iter() - .map(|x| (x.0, x.1)) - .eq(iterated_regions.iter().copied())); - - let mmap_regions = gm.iter().collect::>(); - - assert_eq!(mmap_regions[0].guest_base, regions[0].0); - assert_eq!(mmap_regions[1].guest_base, regions[1].0); - } - - #[test] - fn test_memory() { - let region_size = 0x400; - let regions = vec![ - (GuestAddress(0x0), region_size), - (GuestAddress(0x1000), region_size), - ]; - let mut iterated_regions = Vec::new(); - let gm = Arc::new(GuestMemoryMmap::from_ranges(®ions).unwrap()); - let mem = gm.memory(); - - for region in mem.iter() { - assert_eq!(region.len(), region_size as GuestUsize); - } - - for region in mem.iter() { - iterated_regions.push((region.start_addr(), region.len() as usize)); - } - assert_eq!(regions, iterated_regions); - - assert!(regions - .iter() - .map(|x| (x.0, x.1)) - .eq(iterated_regions.iter().copied())); - - let mmap_regions = gm.iter().collect::>(); - - assert_eq!(mmap_regions[0].guest_base, regions[0].0); - assert_eq!(mmap_regions[1].guest_base, regions[1].0); - } - #[test] fn test_access_cross_boundary() { let f1 = TempFile::new().unwrap().into_file(); @@ -820,64 +535,6 @@ mod tests { assert_eq!(region.file_offset().unwrap().start(), offset); } - #[test] - fn test_mmap_insert_region() { - let region_size = 0x1000; - let regions = vec![ - (GuestAddress(0x0), region_size), - (GuestAddress(0x10_0000), region_size), - ]; - let gm = Arc::new(GuestMemoryMmap::from_ranges(®ions).unwrap()); - let mem_orig = gm.memory(); - assert_eq!(mem_orig.num_regions(), 2); - - let mmap = - Arc::new(GuestRegionMmap::from_range(GuestAddress(0x8000), 0x1000, None).unwrap()); - let gm = gm.insert_region(mmap).unwrap(); - let mmap = - Arc::new(GuestRegionMmap::from_range(GuestAddress(0x4000), 0x1000, None).unwrap()); - let gm = gm.insert_region(mmap).unwrap(); - let mmap = - Arc::new(GuestRegionMmap::from_range(GuestAddress(0xc000), 0x1000, None).unwrap()); - let gm = gm.insert_region(mmap).unwrap(); - let mmap = - Arc::new(GuestRegionMmap::from_range(GuestAddress(0xc000), 0x1000, None).unwrap()); - gm.insert_region(mmap).unwrap_err(); - - assert_eq!(mem_orig.num_regions(), 2); - assert_eq!(gm.num_regions(), 5); - - let regions = gm.iter().collect::>(); - - assert_eq!(regions[0].start_addr(), GuestAddress(0x0000)); - assert_eq!(regions[1].start_addr(), GuestAddress(0x4000)); - assert_eq!(regions[2].start_addr(), GuestAddress(0x8000)); - assert_eq!(regions[3].start_addr(), GuestAddress(0xc000)); - assert_eq!(regions[4].start_addr(), GuestAddress(0x10_0000)); - } - - #[test] - fn test_mmap_remove_region() { - let region_size = 0x1000; - let regions = vec![ - (GuestAddress(0x0), region_size), - (GuestAddress(0x10_0000), region_size), - ]; - let gm = Arc::new(GuestMemoryMmap::from_ranges(®ions).unwrap()); - let mem_orig = gm.memory(); - assert_eq!(mem_orig.num_regions(), 2); - - gm.remove_region(GuestAddress(0), 128).unwrap_err(); - gm.remove_region(GuestAddress(0x4000), 128).unwrap_err(); - let (gm, region) = gm.remove_region(GuestAddress(0x10_0000), 0x1000).unwrap(); - - assert_eq!(mem_orig.num_regions(), 2); - assert_eq!(gm.num_regions(), 1); - - assert_eq!(gm.iter().next().unwrap().start_addr(), GuestAddress(0x0000)); - assert_eq!(region.start_addr(), GuestAddress(0x10_0000)); - } - #[test] fn test_guest_memory_mmap_get_slice() { let region = GuestRegionMmap::from_range(GuestAddress(0), 0x400, None).unwrap(); @@ -948,63 +605,6 @@ mod tests { assert!(guest_mem.get_slice(GuestAddress(0xc00), 0x100).is_err()); } - #[test] - fn test_checked_offset() { - let start_addr1 = GuestAddress(0); - let start_addr2 = GuestAddress(0x800); - let start_addr3 = GuestAddress(0xc00); - let guest_mem = GuestMemoryMmap::from_ranges(&[ - (start_addr1, 0x400), - (start_addr2, 0x400), - (start_addr3, 0x400), - ]) - .unwrap(); - - assert_eq!( - guest_mem.checked_offset(start_addr1, 0x200), - Some(GuestAddress(0x200)) - ); - assert_eq!( - guest_mem.checked_offset(start_addr1, 0xa00), - Some(GuestAddress(0xa00)) - ); - assert_eq!( - guest_mem.checked_offset(start_addr2, 0x7ff), - Some(GuestAddress(0xfff)) - ); - assert_eq!(guest_mem.checked_offset(start_addr2, 0xc00), None); - assert_eq!(guest_mem.checked_offset(start_addr1, usize::MAX), None); - - assert_eq!(guest_mem.checked_offset(start_addr1, 0x400), None); - assert_eq!( - guest_mem.checked_offset(start_addr1, 0x400 - 1), - Some(GuestAddress(0x400 - 1)) - ); - } - - #[test] - fn test_check_range() { - let start_addr1 = GuestAddress(0); - let start_addr2 = GuestAddress(0x800); - let start_addr3 = GuestAddress(0xc00); - let guest_mem = GuestMemoryMmap::from_ranges(&[ - (start_addr1, 0x400), - (start_addr2, 0x400), - (start_addr3, 0x400), - ]) - .unwrap(); - - assert!(guest_mem.check_range(start_addr1, 0x0)); - assert!(guest_mem.check_range(start_addr1, 0x200)); - assert!(guest_mem.check_range(start_addr1, 0x400)); - assert!(!guest_mem.check_range(start_addr1, 0xa00)); - assert!(guest_mem.check_range(start_addr2, 0x7ff)); - assert!(guest_mem.check_range(start_addr2, 0x800)); - assert!(!guest_mem.check_range(start_addr2, 0x801)); - assert!(!guest_mem.check_range(start_addr2, 0xc00)); - assert!(!guest_mem.check_range(start_addr1, usize::MAX)); - } - #[test] fn test_atomic_accesses() { let region = GuestRegionMmap::from_range(GuestAddress(0), 0x1000, None).unwrap(); diff --git a/src/region.rs b/src/region.rs index 27aea1c8..62dbf3f5 100644 --- a/src/region.rs +++ b/src/region.rs @@ -476,3 +476,328 @@ impl Bytes for R { .and_then(|s| s.load(addr.raw_value() as usize, order).map_err(Into::into)) } } + +#[cfg(test)] +mod tests { + use crate::region::{GuestMemoryRegionBytes, GuestRegionError}; + use crate::{ + Address, GuestAddress, GuestMemory, GuestMemoryRegion, GuestRegionCollection, GuestUsize, + }; + use std::sync::Arc; + + #[derive(Debug, PartialEq, Eq)] + struct MockRegion { + start: GuestAddress, + len: GuestUsize, + } + + impl GuestMemoryRegion for MockRegion { + type B = (); + + fn len(&self) -> GuestUsize { + self.len + } + + fn start_addr(&self) -> GuestAddress { + self.start + } + + fn bitmap(&self) {} + } + + impl GuestMemoryRegionBytes for MockRegion {} + + type Collection = GuestRegionCollection; + + fn check_guest_memory_mmap( + maybe_guest_mem: Result, + expected_regions_summary: &[(GuestAddress, u64)], + ) { + assert!(maybe_guest_mem.is_ok()); + + let guest_mem = maybe_guest_mem.unwrap(); + assert_eq!(guest_mem.num_regions(), expected_regions_summary.len()); + let maybe_last_mem_reg = expected_regions_summary.last(); + if let Some((region_addr, region_size)) = maybe_last_mem_reg { + let mut last_addr = region_addr.unchecked_add(*region_size); + if last_addr.raw_value() != 0 { + last_addr = last_addr.unchecked_sub(1); + } + assert_eq!(guest_mem.last_addr(), last_addr); + } + for ((region_addr, region_size), mmap) in + expected_regions_summary.iter().zip(guest_mem.iter()) + { + assert_eq!(region_addr, &mmap.start); + assert_eq!(region_size, &mmap.len); + + assert!(guest_mem.find_region(*region_addr).is_some()); + } + } + + fn new_guest_memory_collection_from_regions( + regions_summary: &[(GuestAddress, u64)], + ) -> Result { + Collection::from_regions( + regions_summary + .iter() + .map(|&(start, len)| MockRegion { start, len }) + .collect(), + ) + } + + fn new_guest_memory_collection_from_arc_regions( + regions_summary: &[(GuestAddress, u64)], + ) -> Result { + Collection::from_arc_regions( + regions_summary + .iter() + .map(|&(start, len)| Arc::new(MockRegion { start, len })) + .collect(), + ) + } + + #[test] + fn test_no_memory_region() { + let regions_summary = []; + + assert!(matches!( + new_guest_memory_collection_from_regions(®ions_summary).unwrap_err(), + GuestRegionError::NoMemoryRegion + )); + assert!(matches!( + new_guest_memory_collection_from_arc_regions(®ions_summary).unwrap_err(), + GuestRegionError::NoMemoryRegion + )); + } + + #[test] + fn test_overlapping_memory_regions() { + let regions_summary = [(GuestAddress(0), 100), (GuestAddress(99), 100)]; + + assert!(matches!( + new_guest_memory_collection_from_regions(®ions_summary).unwrap_err(), + GuestRegionError::MemoryRegionOverlap + )); + assert!(matches!( + new_guest_memory_collection_from_arc_regions(®ions_summary).unwrap_err(), + GuestRegionError::MemoryRegionOverlap + )); + } + + #[test] + fn test_unsorted_memory_regions() { + let regions_summary = [(GuestAddress(100), 100), (GuestAddress(0), 100)]; + + assert!(matches!( + new_guest_memory_collection_from_regions(®ions_summary).unwrap_err(), + GuestRegionError::UnsortedMemoryRegions + )); + assert!(matches!( + new_guest_memory_collection_from_arc_regions(®ions_summary).unwrap_err(), + GuestRegionError::UnsortedMemoryRegions + )); + } + + #[test] + fn test_valid_memory_regions() { + let regions_summary = [(GuestAddress(0), 100), (GuestAddress(100), 100)]; + + let guest_mem = Collection::new(); + assert_eq!(guest_mem.num_regions(), 0); + + check_guest_memory_mmap( + new_guest_memory_collection_from_regions(®ions_summary), + ®ions_summary, + ); + + check_guest_memory_mmap( + new_guest_memory_collection_from_arc_regions(®ions_summary), + ®ions_summary, + ); + } + + #[test] + fn test_mmap_insert_region() { + let region_size = 0x1000; + let regions = vec![ + (GuestAddress(0x0), region_size), + (GuestAddress(0x10_0000), region_size), + ]; + let mem_orig = new_guest_memory_collection_from_regions(®ions).unwrap(); + let mut gm = mem_orig.clone(); + assert_eq!(mem_orig.num_regions(), 2); + + let new_regions = [ + (GuestAddress(0x8000), 0x1000), + (GuestAddress(0x4000), 0x1000), + (GuestAddress(0xc000), 0x1000), + ]; + + for (start, len) in new_regions { + gm = gm + .insert_region(Arc::new(MockRegion { start, len })) + .unwrap(); + } + + gm.insert_region(Arc::new(MockRegion { + start: GuestAddress(0xc000), + len: 0x1000, + })) + .unwrap_err(); + + assert_eq!(mem_orig.num_regions(), 2); + assert_eq!(gm.num_regions(), 5); + + let regions = gm.iter().collect::>(); + + assert_eq!(regions[0].start_addr(), GuestAddress(0x0000)); + assert_eq!(regions[1].start_addr(), GuestAddress(0x4000)); + assert_eq!(regions[2].start_addr(), GuestAddress(0x8000)); + assert_eq!(regions[3].start_addr(), GuestAddress(0xc000)); + assert_eq!(regions[4].start_addr(), GuestAddress(0x10_0000)); + } + + #[test] + fn test_mmap_remove_region() { + let region_size = 0x1000; + let regions = vec![ + (GuestAddress(0x0), region_size), + (GuestAddress(0x10_0000), region_size), + ]; + let mem_orig = new_guest_memory_collection_from_regions(®ions).unwrap(); + let gm = mem_orig.clone(); + assert_eq!(mem_orig.num_regions(), 2); + + gm.remove_region(GuestAddress(0), 128).unwrap_err(); + gm.remove_region(GuestAddress(0x4000), 128).unwrap_err(); + let (gm, region) = gm.remove_region(GuestAddress(0x10_0000), 0x1000).unwrap(); + + assert_eq!(mem_orig.num_regions(), 2); + assert_eq!(gm.num_regions(), 1); + + assert_eq!(gm.iter().next().unwrap().start_addr(), GuestAddress(0x0000)); + assert_eq!(region.start_addr(), GuestAddress(0x10_0000)); + } + + #[test] + fn test_iter() { + let region_size = 0x400; + let regions = vec![ + (GuestAddress(0x0), region_size), + (GuestAddress(0x1000), region_size), + ]; + let mut iterated_regions = Vec::new(); + let gm = new_guest_memory_collection_from_regions(®ions).unwrap(); + + for region in gm.iter() { + assert_eq!(region.len(), region_size as GuestUsize); + } + + for region in gm.iter() { + iterated_regions.push((region.start_addr(), region.len())); + } + assert_eq!(regions, iterated_regions); + + assert!(regions + .iter() + .map(|x| (x.0, x.1)) + .eq(iterated_regions.iter().copied())); + + let mmap_regions = gm.iter().collect::>(); + + assert_eq!(mmap_regions[0].start, regions[0].0); + assert_eq!(mmap_regions[1].start, regions[1].0); + } + + #[test] + fn test_address_in_range() { + let start_addr1 = GuestAddress(0x0); + let start_addr2 = GuestAddress(0x800); + let guest_mem = + new_guest_memory_collection_from_regions(&[(start_addr1, 0x400), (start_addr2, 0x400)]) + .unwrap(); + + assert!(guest_mem.address_in_range(GuestAddress(0x200))); + assert!(!guest_mem.address_in_range(GuestAddress(0x600))); + assert!(guest_mem.address_in_range(GuestAddress(0xa00))); + assert!(!guest_mem.address_in_range(GuestAddress(0xc00))); + } + + #[test] + fn test_check_address() { + let start_addr1 = GuestAddress(0x0); + let start_addr2 = GuestAddress(0x800); + let guest_mem = + new_guest_memory_collection_from_regions(&[(start_addr1, 0x400), (start_addr2, 0x400)]) + .unwrap(); + + assert_eq!( + guest_mem.check_address(GuestAddress(0x200)), + Some(GuestAddress(0x200)) + ); + assert_eq!(guest_mem.check_address(GuestAddress(0x600)), None); + assert_eq!( + guest_mem.check_address(GuestAddress(0xa00)), + Some(GuestAddress(0xa00)) + ); + assert_eq!(guest_mem.check_address(GuestAddress(0xc00)), None); + } + + #[test] + fn test_checked_offset() { + let start_addr1 = GuestAddress(0); + let start_addr2 = GuestAddress(0x800); + let start_addr3 = GuestAddress(0xc00); + let guest_mem = new_guest_memory_collection_from_regions(&[ + (start_addr1, 0x400), + (start_addr2, 0x400), + (start_addr3, 0x400), + ]) + .unwrap(); + + assert_eq!( + guest_mem.checked_offset(start_addr1, 0x200), + Some(GuestAddress(0x200)) + ); + assert_eq!( + guest_mem.checked_offset(start_addr1, 0xa00), + Some(GuestAddress(0xa00)) + ); + assert_eq!( + guest_mem.checked_offset(start_addr2, 0x7ff), + Some(GuestAddress(0xfff)) + ); + assert_eq!(guest_mem.checked_offset(start_addr2, 0xc00), None); + assert_eq!(guest_mem.checked_offset(start_addr1, usize::MAX), None); + + assert_eq!(guest_mem.checked_offset(start_addr1, 0x400), None); + assert_eq!( + guest_mem.checked_offset(start_addr1, 0x400 - 1), + Some(GuestAddress(0x400 - 1)) + ); + } + + #[test] + fn test_check_range() { + let start_addr1 = GuestAddress(0); + let start_addr2 = GuestAddress(0x800); + let start_addr3 = GuestAddress(0xc00); + let guest_mem = new_guest_memory_collection_from_regions(&[ + (start_addr1, 0x400), + (start_addr2, 0x400), + (start_addr3, 0x400), + ]) + .unwrap(); + + assert!(guest_mem.check_range(start_addr1, 0x0)); + assert!(guest_mem.check_range(start_addr1, 0x200)); + assert!(guest_mem.check_range(start_addr1, 0x400)); + assert!(!guest_mem.check_range(start_addr1, 0xa00)); + assert!(guest_mem.check_range(start_addr2, 0x7ff)); + assert!(guest_mem.check_range(start_addr2, 0x800)); + assert!(!guest_mem.check_range(start_addr2, 0x801)); + assert!(!guest_mem.check_range(start_addr2, 0xc00)); + assert!(!guest_mem.check_range(start_addr1, usize::MAX)); + } +} From d22a324c8c57203c9d77bf8077a8efca612ca264 Mon Sep 17 00:00:00 2001 From: Patrick Roy Date: Fri, 14 Mar 2025 16:55:27 +0000 Subject: [PATCH 07/10] refactor(test): Use mock regions in atomic.rs tests These tests only test properties of the GuestMemoryAtomic implementation that are independent of the actual M: GuestMemory being used. So simplify it to drop the dependency on backend-mmap. Signed-off-by: Patrick Roy --- src/atomic.rs | 64 +++++++++++++++++++++++++++------------------------ src/region.rs | 12 +++++----- 2 files changed, 40 insertions(+), 36 deletions(-) diff --git a/src/atomic.rs b/src/atomic.rs index 4b20b2c4..22697d05 100644 --- a/src/atomic.rs +++ b/src/atomic.rs @@ -140,14 +140,12 @@ impl GuestMemoryExclusiveGuard<'_, M> { } #[cfg(test)] -#[cfg(feature = "backend-mmap")] mod tests { use super::*; - use crate::{GuestAddress, GuestMemory, GuestMemoryRegion, GuestUsize, MmapRegion}; + use crate::region::tests::{new_guest_memory_collection_from_regions, Collection, MockRegion}; + use crate::{GuestAddress, GuestMemory, GuestMemoryRegion, GuestUsize}; - type GuestMemoryMmap = crate::GuestMemoryMmap<()>; - type GuestRegionMmap = crate::GuestRegionMmap<()>; - type GuestMemoryMmapAtomic = GuestMemoryAtomic; + type GuestMemoryMmapAtomic = GuestMemoryAtomic; #[test] fn test_atomic_memory() { @@ -157,7 +155,7 @@ mod tests { (GuestAddress(0x1000), region_size), ]; let mut iterated_regions = Vec::new(); - let gmm = GuestMemoryMmap::from_ranges(®ions).unwrap(); + let gmm = new_guest_memory_collection_from_regions(®ions).unwrap(); let gm = GuestMemoryMmapAtomic::new(gmm); let mem = gm.memory(); @@ -166,7 +164,7 @@ mod tests { } for region in mem.iter() { - iterated_regions.push((region.start_addr(), region.len() as usize)); + iterated_regions.push((region.start_addr(), region.len())); } assert_eq!(regions, iterated_regions); assert_eq!(mem.num_regions(), 2); @@ -207,7 +205,7 @@ mod tests { (GuestAddress(0x0), region_size), (GuestAddress(0x1000), region_size), ]; - let gmm = GuestMemoryMmap::from_ranges(®ions).unwrap(); + let gmm = new_guest_memory_collection_from_regions(®ions).unwrap(); let gm = GuestMemoryMmapAtomic::new(gmm); let mem = { let guard1 = gm.memory(); @@ -219,11 +217,11 @@ mod tests { #[test] fn test_atomic_hotplug() { let region_size = 0x1000; - let regions = vec![ + let regions = [ (GuestAddress(0x0), region_size), (GuestAddress(0x10_0000), region_size), ]; - let mut gmm = Arc::new(GuestMemoryMmap::from_ranges(®ions).unwrap()); + let mut gmm = Arc::new(new_guest_memory_collection_from_regions(®ions).unwrap()); let gm: GuestMemoryAtomic<_> = gmm.clone().into(); let mem_orig = gm.memory(); assert_eq!(mem_orig.num_regions(), 2); @@ -231,26 +229,32 @@ mod tests { { let guard = gm.lock().unwrap(); let new_gmm = Arc::make_mut(&mut gmm); - let mmap = Arc::new( - GuestRegionMmap::new(MmapRegion::new(0x1000).unwrap(), GuestAddress(0x8000)) - .unwrap(), - ); - let new_gmm = new_gmm.insert_region(mmap).unwrap(); - let mmap = Arc::new( - GuestRegionMmap::new(MmapRegion::new(0x1000).unwrap(), GuestAddress(0x4000)) - .unwrap(), - ); - let new_gmm = new_gmm.insert_region(mmap).unwrap(); - let mmap = Arc::new( - GuestRegionMmap::new(MmapRegion::new(0x1000).unwrap(), GuestAddress(0xc000)) - .unwrap(), - ); - let new_gmm = new_gmm.insert_region(mmap).unwrap(); - let mmap = Arc::new( - GuestRegionMmap::new(MmapRegion::new(0x1000).unwrap(), GuestAddress(0xc000)) - .unwrap(), - ); - new_gmm.insert_region(mmap).unwrap_err(); + let new_gmm = new_gmm + .insert_region(Arc::new(MockRegion { + start: GuestAddress(0x8000), + len: 0x1000, + })) + .unwrap(); + let new_gmm = new_gmm + .insert_region(Arc::new(MockRegion { + start: GuestAddress(0x4000), + len: 0x1000, + })) + .unwrap(); + let new_gmm = new_gmm + .insert_region(Arc::new(MockRegion { + start: GuestAddress(0xc000), + len: 0x1000, + })) + .unwrap(); + + new_gmm + .insert_region(Arc::new(MockRegion { + start: GuestAddress(0x8000), + len: 0x1000, + })) + .unwrap_err(); + guard.replace(new_gmm); } diff --git a/src/region.rs b/src/region.rs index 62dbf3f5..8e416071 100644 --- a/src/region.rs +++ b/src/region.rs @@ -478,7 +478,7 @@ impl Bytes for R { } #[cfg(test)] -mod tests { +pub(crate) mod tests { use crate::region::{GuestMemoryRegionBytes, GuestRegionError}; use crate::{ Address, GuestAddress, GuestMemory, GuestMemoryRegion, GuestRegionCollection, GuestUsize, @@ -486,9 +486,9 @@ mod tests { use std::sync::Arc; #[derive(Debug, PartialEq, Eq)] - struct MockRegion { - start: GuestAddress, - len: GuestUsize, + pub(crate) struct MockRegion { + pub(crate) start: GuestAddress, + pub(crate) len: GuestUsize, } impl GuestMemoryRegion for MockRegion { @@ -507,7 +507,7 @@ mod tests { impl GuestMemoryRegionBytes for MockRegion {} - type Collection = GuestRegionCollection; + pub(crate) type Collection = GuestRegionCollection; fn check_guest_memory_mmap( maybe_guest_mem: Result, @@ -535,7 +535,7 @@ mod tests { } } - fn new_guest_memory_collection_from_regions( + pub(crate) fn new_guest_memory_collection_from_regions( regions_summary: &[(GuestAddress, u64)], ) -> Result { Collection::from_regions( From dd023ad6d2860e8e58bbf1613cdfc58a4e4af7be Mon Sep 17 00:00:00 2001 From: Patrick Roy Date: Tue, 1 Jul 2025 12:37:05 +0100 Subject: [PATCH 08/10] refactor: Have GuestRegionMmap::new return Option There is only a single error condition, so we can indicate the error case with an Option. First step towards untangling the region specific errors from the region collection specific errors. Signed-off-by: Patrick Roy --- CHANGELOG.md | 4 +++- src/mmap/mod.rs | 23 ++++++++++++----------- 2 files changed, 15 insertions(+), 12 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index dd0d5843..53b14972 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,7 +16,9 @@ a default implementation, based on linear search. - \[[#312](https://github.com/rust-vmm/vm-memory/pull/312)\]: Provide a marker trait, `GuestMemoryRegionBytes`, which enables a default implementation of `Bytes` for a `GuestMemoryRegion` if implemented. - +- \[[#312](https://github.com/rust-vmm/vm-memory/pull/312)\]: Adjust error types returned from `GuestMemoryMmap::from_ranges[_with_files]` + and `GuestRegionMmap::from_range` to be separate from the error type returned by `GuestRegionCollection` functions. + Change return type of `GuestRegionMmap::new` from `Result` to `Option`. - \[#324](https:////github.com/rust-vmm/vm-memory/pull/324)\] `GuestMemoryRegion::bitmap()` now returns a `BitmapSlice`. Accessing the full bitmap is now possible only if the type of the memory region is know, for example with `MmapRegion::bitmap()`. ### Removed diff --git a/src/mmap/mod.rs b/src/mmap/mod.rs index 32e90188..8e4626f7 100644 --- a/src/mmap/mod.rs +++ b/src/mmap/mod.rs @@ -67,15 +67,16 @@ impl Deref for GuestRegionMmap { impl GuestRegionMmap { /// Create a new memory-mapped memory region for the guest's physical memory. - pub fn new(mapping: MmapRegion, guest_base: GuestAddress) -> result::Result { - if guest_base.0.checked_add(mapping.size() as u64).is_none() { - return Err(Error::InvalidGuestRegion); - } - - Ok(GuestRegionMmap { - mapping, - guest_base, - }) + /// + /// Returns `None` if `guest_base` + `mapping.len()` would overflow. + pub fn new(mapping: MmapRegion, guest_base: GuestAddress) -> Option { + guest_base + .0 + .checked_add(mapping.size() as u64) + .map(|_| Self { + mapping, + guest_base, + }) } } @@ -94,7 +95,7 @@ impl GuestRegionMmap { } .map_err(Error::MmapRegion)?; - Self::new(region, addr) + Self::new(region, addr).ok_or(Error::InvalidGuestRegion) } } @@ -110,7 +111,7 @@ impl GuestRegionMmap { let range = MmapRange::new_unix(size, file, addr); let region = MmapRegion::from_range(range).map_err(Error::MmapRegion)?; - Self::new(region, addr) + Self::new(region, addr).ok_or(Error::InvalidGuestRegion) } } From 33ae0b3062745e2b87b4477bbc8df6887f84240c Mon Sep 17 00:00:00 2001 From: Patrick Roy Date: Tue, 1 Jul 2025 12:47:00 +0100 Subject: [PATCH 09/10] refactor: clean up region collection related errors Turn the error type defined in region.rs into one specifically to GuestRegionCollections, and move the non-collection related errors into a new error type that is specific to the `from_ranges*` family of functions (these are the only functions that can return both a region construction error, as well as a collection error). The `from_ranges*` family of functions is publicly exported, but in reality only used in tests, so breakage of this should be limited. Since we're breaking things anyway, remove the reexport of GuestRegionCollectionError as `vm_memory::Error`. It made no sense for this one to have this special designation. Signed-off-by: Patrick Roy --- src/lib.rs | 2 +- src/mmap/mod.rs | 43 ++++++++++++++++++++++++++------------- src/region.rs | 53 ++++++++++++++++++++++--------------------------- 3 files changed, 54 insertions(+), 44 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 0b2b572a..2f87f4c8 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -52,7 +52,7 @@ pub use guest_memory::{ pub mod region; pub use region::{ - GuestMemoryRegion, GuestMemoryRegionBytes, GuestRegionCollection, GuestRegionError as Error, + GuestMemoryRegion, GuestMemoryRegionBytes, GuestRegionCollection, GuestRegionCollectionError, }; pub mod io; diff --git a/src/mmap/mod.rs b/src/mmap/mod.rs index 8e4626f7..f514b4e3 100644 --- a/src/mmap/mod.rs +++ b/src/mmap/mod.rs @@ -19,9 +19,10 @@ use std::result; use crate::address::Address; use crate::bitmap::{Bitmap, BS}; use crate::guest_memory::{self, FileOffset, GuestAddress, GuestUsize, MemoryRegionAddress}; -use crate::region::{GuestMemoryRegion, GuestMemoryRegionBytes}; +use crate::region::{ + GuestMemoryRegion, GuestMemoryRegionBytes, GuestRegionCollection, GuestRegionCollectionError, +}; use crate::volatile_memory::{VolatileMemory, VolatileSlice}; -use crate::{Error, GuestRegionCollection}; // re-export for backward compat, as the trait used to be defined in mmap.rs pub use crate::bitmap::NewBitmap; @@ -87,15 +88,14 @@ impl GuestRegionMmap { addr: GuestAddress, size: usize, file: Option, - ) -> result::Result { + ) -> result::Result { let region = if let Some(ref f_off) = file { - MmapRegion::from_file(f_off.clone(), size) + MmapRegion::from_file(f_off.clone(), size)? } else { - MmapRegion::new(size) - } - .map_err(Error::MmapRegion)?; + MmapRegion::new(size)? + }; - Self::new(region, addr).ok_or(Error::InvalidGuestRegion) + Self::new(region, addr).ok_or(FromRangesError::InvalidGuestRegion) } } @@ -107,11 +107,11 @@ impl GuestRegionMmap { addr: GuestAddress, size: usize, file: Option, - ) -> result::Result { + ) -> result::Result { let range = MmapRange::new_unix(size, file, addr); - let region = MmapRegion::from_range(range).map_err(Error::MmapRegion)?; - Self::new(region, addr).ok_or(Error::InvalidGuestRegion) + let region = MmapRegion::from_range(range)?; + Self::new(region, addr).ok_or(FromRangesError::InvalidGuestRegion) } } @@ -171,11 +171,25 @@ impl GuestMemoryRegionBytes for GuestRegionMmap {} /// virtual address space of the calling process. pub type GuestMemoryMmap = GuestRegionCollection>; +/// Errors that can happen during [`GuestMemoryMap::from_ranges`] and related functions. +#[derive(Debug, thiserror::Error)] +pub enum FromRangesError { + /// Error during construction of [`GuestMemoryMmap`] + #[error("Error constructing guest region collection: {0}")] + Collection(#[from] GuestRegionCollectionError), + /// Error while allocating raw mmap region + #[error("Error setting up raw memory for guest region: {0}")] + MmapRegion(#[from] MmapRegionError), + /// A combination of region length and guest address would overflow. + #[error("Combination of guest address and region length invalid (would overflow)")] + InvalidGuestRegion, +} + impl GuestMemoryMmap { /// Creates a container and allocates anonymous memory for guest memory regions. /// /// Valid memory regions are specified as a slice of (Address, Size) tuples sorted by Address. - pub fn from_ranges(ranges: &[(GuestAddress, usize)]) -> result::Result { + pub fn from_ranges(ranges: &[(GuestAddress, usize)]) -> result::Result { Self::from_ranges_with_files(ranges.iter().map(|r| (r.0, r.1, None))) } @@ -183,7 +197,7 @@ impl GuestMemoryMmap { /// /// Valid memory regions are specified as a sequence of (Address, Size, [`Option`]) /// tuples sorted by Address. - pub fn from_ranges_with_files(ranges: T) -> result::Result + pub fn from_ranges_with_files(ranges: T) -> result::Result where A: Borrow<(GuestAddress, usize, Option)>, T: IntoIterator, @@ -194,8 +208,9 @@ impl GuestMemoryMmap { .map(|x| { GuestRegionMmap::from_range(x.borrow().0, x.borrow().1, x.borrow().2.clone()) }) - .collect::, Error>>()?, + .collect::, _>>()?, ) + .map_err(Into::into) } } diff --git a/src/region.rs b/src/region.rs index 8e416071..e716a629 100644 --- a/src/region.rs +++ b/src/region.rs @@ -169,18 +169,9 @@ pub trait GuestMemoryRegion: Bytes { } } -/// Errors that can occur when dealing with [`GuestRegion`]s, or collections thereof +/// Errors that can occur when dealing with [`GuestRegionCollection`]s #[derive(Debug, thiserror::Error)] -pub enum GuestRegionError { - /// Adding the guest base address to the length of the underlying mapping resulted - /// in an overflow. - #[error("Adding the guest base address to the length of the underlying mapping resulted in an overflow")] - #[cfg(feature = "backend-mmap")] - InvalidGuestRegion, - /// Error creating a `MmapRegion` object. - #[error("{0}")] - #[cfg(feature = "backend-mmap")] - MmapRegion(crate::mmap::MmapRegionError), +pub enum GuestRegionCollectionError { /// No memory region found. #[error("No memory region found")] NoMemoryRegion, @@ -230,7 +221,9 @@ impl GuestRegionCollection { /// * `regions` - The vector of regions. /// The regions shouldn't overlap, and they should be sorted /// by the starting address. - pub fn from_regions(mut regions: Vec) -> std::result::Result { + pub fn from_regions( + mut regions: Vec, + ) -> std::result::Result { Self::from_arc_regions(regions.drain(..).map(Arc::new).collect()) } @@ -246,9 +239,11 @@ impl GuestRegionCollection { /// * `regions` - The vector of `Arc` regions. /// The regions shouldn't overlap and they should be sorted /// by the starting address. - pub fn from_arc_regions(regions: Vec>) -> std::result::Result { + pub fn from_arc_regions( + regions: Vec>, + ) -> std::result::Result { if regions.is_empty() { - return Err(GuestRegionError::NoMemoryRegion); + return Err(GuestRegionCollectionError::NoMemoryRegion); } for window in regions.windows(2) { @@ -256,11 +251,11 @@ impl GuestRegionCollection { let next = &window[1]; if prev.start_addr() > next.start_addr() { - return Err(GuestRegionError::UnsortedMemoryRegions); + return Err(GuestRegionCollectionError::UnsortedMemoryRegions); } if prev.last_addr() >= next.start_addr() { - return Err(GuestRegionError::MemoryRegionOverlap); + return Err(GuestRegionCollectionError::MemoryRegionOverlap); } } @@ -274,7 +269,7 @@ impl GuestRegionCollection { pub fn insert_region( &self, region: Arc, - ) -> std::result::Result, GuestRegionError> { + ) -> std::result::Result, GuestRegionCollectionError> { let mut regions = self.regions.clone(); regions.push(region); regions.sort_by_key(|x| x.start_addr()); @@ -292,7 +287,7 @@ impl GuestRegionCollection { &self, base: GuestAddress, size: GuestUsize, - ) -> std::result::Result<(GuestRegionCollection, Arc), GuestRegionError> { + ) -> std::result::Result<(GuestRegionCollection, Arc), GuestRegionCollectionError> { if let Ok(region_index) = self.regions.binary_search_by_key(&base, |x| x.start_addr()) { if self.regions.get(region_index).unwrap().len() == size { let mut regions = self.regions.clone(); @@ -301,7 +296,7 @@ impl GuestRegionCollection { } } - Err(GuestRegionError::NoMemoryRegion) + Err(GuestRegionCollectionError::NoMemoryRegion) } } @@ -479,7 +474,7 @@ impl Bytes for R { #[cfg(test)] pub(crate) mod tests { - use crate::region::{GuestMemoryRegionBytes, GuestRegionError}; + use crate::region::{GuestMemoryRegionBytes, GuestRegionCollectionError}; use crate::{ Address, GuestAddress, GuestMemory, GuestMemoryRegion, GuestRegionCollection, GuestUsize, }; @@ -510,7 +505,7 @@ pub(crate) mod tests { pub(crate) type Collection = GuestRegionCollection; fn check_guest_memory_mmap( - maybe_guest_mem: Result, + maybe_guest_mem: Result, expected_regions_summary: &[(GuestAddress, u64)], ) { assert!(maybe_guest_mem.is_ok()); @@ -537,7 +532,7 @@ pub(crate) mod tests { pub(crate) fn new_guest_memory_collection_from_regions( regions_summary: &[(GuestAddress, u64)], - ) -> Result { + ) -> Result { Collection::from_regions( regions_summary .iter() @@ -548,7 +543,7 @@ pub(crate) mod tests { fn new_guest_memory_collection_from_arc_regions( regions_summary: &[(GuestAddress, u64)], - ) -> Result { + ) -> Result { Collection::from_arc_regions( regions_summary .iter() @@ -563,11 +558,11 @@ pub(crate) mod tests { assert!(matches!( new_guest_memory_collection_from_regions(®ions_summary).unwrap_err(), - GuestRegionError::NoMemoryRegion + GuestRegionCollectionError::NoMemoryRegion )); assert!(matches!( new_guest_memory_collection_from_arc_regions(®ions_summary).unwrap_err(), - GuestRegionError::NoMemoryRegion + GuestRegionCollectionError::NoMemoryRegion )); } @@ -577,11 +572,11 @@ pub(crate) mod tests { assert!(matches!( new_guest_memory_collection_from_regions(®ions_summary).unwrap_err(), - GuestRegionError::MemoryRegionOverlap + GuestRegionCollectionError::MemoryRegionOverlap )); assert!(matches!( new_guest_memory_collection_from_arc_regions(®ions_summary).unwrap_err(), - GuestRegionError::MemoryRegionOverlap + GuestRegionCollectionError::MemoryRegionOverlap )); } @@ -591,11 +586,11 @@ pub(crate) mod tests { assert!(matches!( new_guest_memory_collection_from_regions(®ions_summary).unwrap_err(), - GuestRegionError::UnsortedMemoryRegions + GuestRegionCollectionError::UnsortedMemoryRegions )); assert!(matches!( new_guest_memory_collection_from_arc_regions(®ions_summary).unwrap_err(), - GuestRegionError::UnsortedMemoryRegions + GuestRegionCollectionError::UnsortedMemoryRegions )); } From 9f3151645915a326237d39ec5e747a005a24161c Mon Sep 17 00:00:00 2001 From: Patrick Roy Date: Tue, 1 Jul 2025 13:54:19 +0100 Subject: [PATCH 10/10] chore: update coverage Signed-off-by: Patrick Roy --- coverage_config_x86_64.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/coverage_config_x86_64.json b/coverage_config_x86_64.json index 2d04cea1..13f2dfd7 100644 --- a/coverage_config_x86_64.json +++ b/coverage_config_x86_64.json @@ -1,5 +1,5 @@ { - "coverage_score": 92.28, + "coverage_score": 91.78, "exclude_path": "mmap_windows.rs", "crate_features": "backend-mmap,backend-atomic,backend-bitmap" }