Skip to content

Commit e2f4bd7

Browse files
committed
Fix some stuff
Signed-off-by: Ludvig Liljenberg <4257730+ludfjig@users.noreply.github.com>
1 parent c379d50 commit e2f4bd7

File tree

11 files changed

+182
-117
lines changed

11 files changed

+182
-117
lines changed

src/hyperlight_host/src/hypervisor/hyperv_linux.rs

Lines changed: 46 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -307,6 +307,7 @@ pub(crate) struct HypervLinuxDriver {
307307
vcpu_fd: VcpuFd,
308308
entrypoint: u64,
309309
mem_regions: Vec<MemoryRegion>,
310+
n_initial_regions: usize,
310311
orig_rsp: GuestPtr,
311312
interrupt_handle: Arc<LinuxInterruptHandle>,
312313

@@ -417,10 +418,20 @@ impl HypervLinuxDriver {
417418
// the downside of doing this here is that the call to get_dirty_log will takes longer as the number of pages increase
418419
// but for larger sandboxes its easily cheaper than copying all the pages
419420

420-
#[cfg(mshv2)]
421-
vm_fd.get_dirty_log(base_pfn, total_size, CLEAR_DIRTY_BIT_FLAG)?;
422-
#[cfg(mshv3)]
423-
vm_fd.get_dirty_log(base_pfn, total_size, MSHV_GPAP_ACCESS_OP_CLEAR as u8)?;
421+
// Clear dirty bits for each memory region separately since they may not be contiguous
422+
for region in &mem_regions {
423+
let mshv_region: mshv_user_mem_region = region.to_owned().into();
424+
let region_size = region.guest_region.len();
425+
426+
#[cfg(mshv2)]
427+
vm_fd.get_dirty_log(mshv_region.guest_pfn, region_size, CLEAR_DIRTY_BIT_FLAG)?;
428+
#[cfg(mshv3)]
429+
vm_fd.get_dirty_log(
430+
mshv_region.guest_pfn,
431+
region_size,
432+
MSHV_GPAP_ACCESS_OP_CLEAR as u8,
433+
)?;
434+
}
424435

425436
let interrupt_handle = Arc::new(LinuxInterruptHandle {
426437
running: AtomicU64::new(0),
@@ -452,6 +463,7 @@ impl HypervLinuxDriver {
452463
page_size: 0,
453464
vm_fd,
454465
vcpu_fd,
466+
n_initial_regions: mem_regions.len(),
455467
mem_regions,
456468
entrypoint: entrypoint_ptr.absolute()?,
457469
orig_rsp: rsp_ptr,
@@ -887,7 +899,8 @@ impl Hypervisor for HypervLinuxDriver {
887899
self.interrupt_handle.clone()
888900
}
889901

890-
fn get_and_clear_dirty_pages(&mut self) -> Result<Vec<u64>> {
902+
// TODO: Implement getting additional host-mapped dirty pages.
903+
fn get_and_clear_dirty_pages(&mut self) -> Result<(Vec<u64>, Option<Vec<Vec<u64>>>)> {
891904
let first_mshv_region: mshv_user_mem_region = self
892905
.mem_regions
893906
.first()
@@ -896,16 +909,38 @@ impl Hypervisor for HypervLinuxDriver {
896909
))?
897910
.to_owned()
898911
.into();
899-
let total_size = self.mem_regions.iter().map(|r| r.guest_region.len()).sum();
900-
let res = self.vm_fd.get_dirty_log(
912+
913+
let n_contiguous = self
914+
.mem_regions
915+
.windows(2)
916+
.take_while(|window| window[0].guest_region.end == window[1].guest_region.start)
917+
.count()
918+
+ 1; // +1 because windows(2) gives us n-1 pairs for n regions
919+
920+
if n_contiguous != self.n_initial_regions {
921+
return Err(new_error!(
922+
"get_and_clear_dirty_pages: not all regions are contiguous, expected {} but got {}",
923+
self.n_initial_regions,
924+
n_contiguous
925+
));
926+
}
927+
928+
let sandbox_total_size = self
929+
.mem_regions
930+
.iter()
931+
.take(n_contiguous)
932+
.map(|r| r.guest_region.len())
933+
.sum();
934+
935+
let sandbox_dirty_pages = self.vm_fd.get_dirty_log(
901936
first_mshv_region.guest_pfn,
902-
total_size,
937+
sandbox_total_size,
903938
#[cfg(mshv2)]
904939
CLEAR_DIRTY_BIT_FLAG,
905940
#[cfg(mshv3)]
906941
(MSHV_GPAP_ACCESS_OP_CLEAR as u8),
907942
)?;
908-
Ok(res)
943+
Ok((sandbox_dirty_pages, None))
909944
}
910945

911946
#[cfg(crashdump)]
@@ -1158,7 +1193,8 @@ mod tests {
11581193
return;
11591194
}
11601195
const MEM_SIZE: usize = 0x3000;
1161-
let gm = shared_mem_with_code(CODE.as_slice(), MEM_SIZE, 0).unwrap();
1196+
let mut gm = shared_mem_with_code(CODE.as_slice(), MEM_SIZE, 0).unwrap();
1197+
gm.stop_tracking_dirty_pages().unwrap();
11621198
let rsp_ptr = GuestPtr::try_from(0).unwrap();
11631199
let pml4_ptr = GuestPtr::try_from(0).unwrap();
11641200
let entrypoint_ptr = GuestPtr::try_from(0).unwrap();

src/hyperlight_host/src/hypervisor/hyperv_windows.rs

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -607,19 +607,21 @@ impl Hypervisor for HypervWindowsDriver {
607607
Ok(())
608608
}
609609

610-
fn get_and_clear_dirty_pages(&mut self) -> Result<Vec<u64>> {
610+
fn get_and_clear_dirty_pages(&mut self) -> Result<(Vec<u64>, Option<Vec<Vec<u64>>>)> {
611611
// For now we just mark all pages dirty which is the equivalent of taking a full snapshot
612612
let total_size = self.mem_regions.iter().map(|r| r.guest_region.len()).sum();
613-
new_page_bitmap(total_size, true)
613+
Ok((new_page_bitmap(total_size, true)?, None))
614614
}
615615

616616
#[instrument(err(Debug), skip_all, parent = Span::current(), level = "Trace")]
617617
unsafe fn map_region(&mut self, _rgn: &MemoryRegion) -> Result<()> {
618-
log_then_return!("Mapping host memory into the guest not yet supported on this platform");
618+
// TODO: when adding support, also update `get_and_clear_dirty_pages`, see kvm/mshv for details
619+
log_then_return!("Mapping host memory into the guest not yet supported on this platform.");
619620
}
620621

621622
#[instrument(err(Debug), skip_all, parent = Span::current(), level = "Trace")]
622623
unsafe fn unmap_regions(&mut self, n: u64) -> Result<()> {
624+
// TODO: when adding support, also update `get_and_clear_dirty_pages`, see kvm/mshv for details
623625
if n > 0 {
624626
log_then_return!(
625627
"Mapping host memory into the guest not yet supported on this platform"

src/hyperlight_host/src/hypervisor/kvm.rs

Lines changed: 24 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -292,6 +292,7 @@ pub(crate) struct KVMDriver {
292292
entrypoint: u64,
293293
orig_rsp: GuestPtr,
294294
mem_regions: Vec<MemoryRegion>,
295+
n_initial_regions: usize,
295296
interrupt_handle: Arc<LinuxInterruptHandle>,
296297

297298
#[cfg(gdb)]
@@ -374,6 +375,7 @@ impl KVMDriver {
374375
vcpu_fd,
375376
entrypoint,
376377
orig_rsp: rsp_gp,
378+
n_initial_regions: mem_regions.len(),
377379
mem_regions,
378380
interrupt_handle: interrupt_handle.clone(),
379381
#[cfg(gdb)]
@@ -752,11 +754,27 @@ impl Hypervisor for KVMDriver {
752754
self.interrupt_handle.clone()
753755
}
754756

755-
fn get_and_clear_dirty_pages(&mut self) -> Result<Vec<u64>> {
757+
// TODO: Implement getting additional host-mapped dirty pages.
758+
fn get_and_clear_dirty_pages(&mut self) -> Result<(Vec<u64>, Option<Vec<Vec<u64>>>)> {
759+
let n_contiguous = self
760+
.mem_regions
761+
.windows(2)
762+
.take_while(|window| window[0].guest_region.end == window[1].guest_region.start)
763+
.count()
764+
+ 1; // +1 because windows(2) gives us n-1 pairs for n regions
765+
766+
if n_contiguous != self.n_initial_regions {
767+
return Err(new_error!(
768+
"get_and_clear_dirty_pages: not all regions are contiguous, expected {} but got {}",
769+
self.n_initial_regions,
770+
n_contiguous
771+
));
772+
}
756773
let mut page_indices = vec![];
757774
let mut current_page = 0;
775+
758776
// Iterate over all memory regions and get the dirty pages for each region ignoring guard pages which cannot be dirty
759-
for (i, mem_region) in self.mem_regions.iter().enumerate() {
777+
for (i, mem_region) in self.mem_regions.iter().take(n_contiguous).enumerate() {
760778
let num_pages = mem_region.guest_region.len() / PAGE_SIZE_USIZE;
761779
let bitmap = match mem_region.flags {
762780
MemoryRegionFlags::READ => {
@@ -780,15 +798,15 @@ impl Hypervisor for KVMDriver {
780798
current_page += num_pages;
781799
}
782800

783-
// covert vec of page indices to vec of blocks
784-
let mut res = new_page_bitmap(current_page * PAGE_SIZE_USIZE, false)?;
801+
// convert vec of page indices to vec of blocks
802+
let mut sandbox_dirty_pages = new_page_bitmap(current_page * PAGE_SIZE_USIZE, false)?;
785803
for page_idx in page_indices {
786804
let block_idx = page_idx / PAGES_IN_BLOCK;
787805
let bit_idx = page_idx % PAGES_IN_BLOCK;
788-
res[block_idx] |= 1 << bit_idx;
806+
sandbox_dirty_pages[block_idx] |= 1 << bit_idx;
789807
}
790808

791-
Ok(res)
809+
Ok((sandbox_dirty_pages, None))
792810
}
793811

794812
#[cfg(crashdump)]

src/hyperlight_host/src/hypervisor/mod.rs

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -199,7 +199,10 @@ pub(crate) trait Hypervisor: Debug + Sync + Send {
199199
/// Get dirty pages as a bitmap (Vec<u64>).
200200
/// Each bit in a u64 represents a page.
201201
/// This also clears the bitflags, marking the pages as non-dirty.
202-
fn get_and_clear_dirty_pages(&mut self) -> Result<Vec<u64>>;
202+
/// The Vec<u64> in the tuple is the bitmap of the first contiguous memory regions, which represents the sandbox itself.
203+
/// The Vec<Vec<u64>> in the tuple are the host-mapped regions, which aren't necessarily contiguous, and not yet implemented
204+
#[allow(clippy::type_complexity)]
205+
fn get_and_clear_dirty_pages(&mut self) -> Result<(Vec<u64>, Option<Vec<Vec<u64>>>)>;
203206

204207
/// Get InterruptHandle to underlying VM
205208
fn interrupt_handle(&self) -> Arc<dyn InterruptHandle>;

src/hyperlight_host/src/mem/memory_region.rs

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -30,9 +30,8 @@ use bitflags::bitflags;
3030
#[cfg(mshv)]
3131
use hyperlight_common::mem::PAGE_SHIFT;
3232
use hyperlight_common::mem::PAGE_SIZE_USIZE;
33-
use kvm_bindings::KVM_MEM_LOG_DIRTY_PAGES;
3433
#[cfg(kvm)]
35-
use kvm_bindings::{KVM_MEM_READONLY, kvm_userspace_memory_region};
34+
use kvm_bindings::{KVM_MEM_LOG_DIRTY_PAGES, KVM_MEM_READONLY, kvm_userspace_memory_region};
3635
#[cfg(mshv2)]
3736
use mshv_bindings::{
3837
HV_MAP_GPA_EXECUTABLE, HV_MAP_GPA_PERMISSIONS_NONE, HV_MAP_GPA_READABLE, HV_MAP_GPA_WRITABLE,

src/hyperlight_host/src/mem/mgr.rs

Lines changed: 10 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -286,12 +286,8 @@ where
286286
// merge the host dirty page map into the dirty bitmap
287287
let merged = bitmap_union(&res, vm_dirty_bitmap);
288288

289-
let snapshot_manager = SharedMemorySnapshotManager::new(
290-
&mut self.shared_mem,
291-
&merged,
292-
layout,
293-
self.mapped_rgns,
294-
)?;
289+
let mut snapshot_manager = SharedMemorySnapshotManager::new(&mut self.shared_mem, layout)?;
290+
snapshot_manager.create_new_snapshot(&mut self.shared_mem, &merged, self.mapped_rgns)?;
295291
existing_snapshot_manager.replace(snapshot_manager);
296292
Ok(())
297293
}
@@ -300,6 +296,10 @@ where
300296
/// off the stack
301297
/// It should be used when you want to restore the state of the memory to a previous state but still want to
302298
/// retain that state, for example after calling a function in the guest
299+
///
300+
/// Returns the number of memory regions mapped into the sandbox
301+
/// that need to be unmapped in order for the restore to be
302+
/// completed.
303303
pub(crate) fn restore_state_from_last_snapshot(&mut self, dirty_bitmap: &[u64]) -> Result<u64> {
304304
let mut snapshot_manager = self
305305
.snapshot_manager
@@ -311,7 +311,10 @@ where
311311
log_then_return!("Snapshot manager not initialized");
312312
}
313313
Some(snapshot_manager) => {
314-
snapshot_manager.restore_from_snapshot(&mut self.shared_mem, dirty_bitmap)
314+
let old_rgns = self.mapped_rgns;
315+
self.mapped_rgns =
316+
snapshot_manager.restore_from_snapshot(&mut self.shared_mem, dirty_bitmap)?;
317+
Ok(old_rgns - self.mapped_rgns)
315318
}
316319
}
317320
}

src/hyperlight_host/src/mem/shared_mem.rs

Lines changed: 21 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -524,21 +524,28 @@ impl ExclusiveSharedMemory {
524524
log_then_return!(WindowsAPIError(e.clone()));
525525
}
526526

527+
// HostMapping is only non-Send/Sync because raw pointers
528+
// are not ("as a lint", as the Rust docs say). We don't
529+
// want to mark HostMapping Send/Sync immediately, because
530+
// that could socially imply that it's "safe" to use
531+
// unsafe accesses from multiple threads at once. Instead, we
532+
// directly impl Send and Sync on this type. Since this
533+
// type does have Send and Sync manually impl'd, the Arc
534+
// is not pointless as the lint suggests.
535+
#[allow(clippy::arc_with_non_send_sync)]
536+
let host_mapping = Arc::new(HostMapping {
537+
ptr: addr.Value as *mut u8,
538+
size: total_size,
539+
handle,
540+
});
541+
542+
let dirty_page_tracker = Arc::new(Mutex::new(Some(DirtyPageTracker::new(Arc::clone(
543+
&host_mapping,
544+
))?)));
545+
527546
Ok(Self {
528-
// HostMapping is only non-Send/Sync because raw pointers
529-
// are not ("as a lint", as the Rust docs say). We don't
530-
// want to mark HostMapping Send/Sync immediately, because
531-
// that could socially imply that it's "safe" to use
532-
// unsafe accesses from multiple threads at once. Instead, we
533-
// directly impl Send and Sync on this type. Since this
534-
// type does have Send and Sync manually impl'd, the Arc
535-
// is not pointless as the lint suggests.
536-
#[allow(clippy::arc_with_non_send_sync)]
537-
region: Arc::new(HostMapping {
538-
ptr: addr.Value as *mut u8,
539-
size: total_size,
540-
handle,
541-
}),
547+
region: host_mapping,
548+
signal_dirty_bitmap_tracker: dirty_page_tracker,
542549
})
543550
}
544551

0 commit comments

Comments
 (0)