Skip to content

Commit 2324ea1

Browse files
committed
Fix some stuff
Signed-off-by: Ludvig Liljenberg <4257730+ludfjig@users.noreply.github.com>
1 parent c379d50 commit 2324ea1

File tree

7 files changed

+129
-74
lines changed

7 files changed

+129
-74
lines changed

src/hyperlight_host/src/hypervisor/hyperv_linux.rs

Lines changed: 44 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -307,6 +307,7 @@ pub(crate) struct HypervLinuxDriver {
307307
vcpu_fd: VcpuFd,
308308
entrypoint: u64,
309309
mem_regions: Vec<MemoryRegion>,
310+
n_initial_regions: usize,
310311
orig_rsp: GuestPtr,
311312
interrupt_handle: Arc<LinuxInterruptHandle>,
312313

@@ -417,10 +418,20 @@ impl HypervLinuxDriver {
417418
// the downside of doing this here is that the call to get_dirty_log will takes longer as the number of pages increase
418419
// but for larger sandboxes its easily cheaper than copying all the pages
419420

420-
#[cfg(mshv2)]
421-
vm_fd.get_dirty_log(base_pfn, total_size, CLEAR_DIRTY_BIT_FLAG)?;
422-
#[cfg(mshv3)]
423-
vm_fd.get_dirty_log(base_pfn, total_size, MSHV_GPAP_ACCESS_OP_CLEAR as u8)?;
421+
// Clear dirty bits for each memory region separately since they may not be contiguous
422+
for region in &mem_regions {
423+
let mshv_region: mshv_user_mem_region = region.to_owned().into();
424+
let region_size = region.guest_region.len();
425+
426+
#[cfg(mshv2)]
427+
vm_fd.get_dirty_log(mshv_region.guest_pfn, region_size, CLEAR_DIRTY_BIT_FLAG)?;
428+
#[cfg(mshv3)]
429+
vm_fd.get_dirty_log(
430+
mshv_region.guest_pfn,
431+
region_size,
432+
MSHV_GPAP_ACCESS_OP_CLEAR as u8,
433+
)?;
434+
}
424435

425436
let interrupt_handle = Arc::new(LinuxInterruptHandle {
426437
running: AtomicU64::new(0),
@@ -452,6 +463,7 @@ impl HypervLinuxDriver {
452463
page_size: 0,
453464
vm_fd,
454465
vcpu_fd,
466+
n_initial_regions: mem_regions.len(),
455467
mem_regions,
456468
entrypoint: entrypoint_ptr.absolute()?,
457469
orig_rsp: rsp_ptr,
@@ -887,7 +899,8 @@ impl Hypervisor for HypervLinuxDriver {
887899
self.interrupt_handle.clone()
888900
}
889901

890-
fn get_and_clear_dirty_pages(&mut self) -> Result<Vec<u64>> {
902+
// TODO: Implement getting additional host-mapped dirty pages.
903+
fn get_and_clear_dirty_pages(&mut self) -> Result<(Vec<u64>, Option<Vec<Vec<u64>>>)> {
891904
let first_mshv_region: mshv_user_mem_region = self
892905
.mem_regions
893906
.first()
@@ -896,16 +909,38 @@ impl Hypervisor for HypervLinuxDriver {
896909
))?
897910
.to_owned()
898911
.into();
899-
let total_size = self.mem_regions.iter().map(|r| r.guest_region.len()).sum();
900-
let res = self.vm_fd.get_dirty_log(
912+
913+
let n_contiguous = self
914+
.mem_regions
915+
.windows(2)
916+
.take_while(|window| window[0].guest_region.end == window[1].guest_region.start)
917+
.count()
918+
+ 1; // +1 because windows(2) gives us n-1 pairs for n regions
919+
920+
if n_contiguous != self.n_initial_regions {
921+
return Err(new_error!(
922+
"get_and_clear_dirty_pages: not all regions are contiguous, expected {} but got {}",
923+
self.n_initial_regions,
924+
n_contiguous
925+
));
926+
}
927+
928+
let sandbox_total_size = self
929+
.mem_regions
930+
.iter()
931+
.take(n_contiguous)
932+
.map(|r| r.guest_region.len())
933+
.sum();
934+
935+
let sandbox_dirty_pages = self.vm_fd.get_dirty_log(
901936
first_mshv_region.guest_pfn,
902-
total_size,
937+
sandbox_total_size,
903938
#[cfg(mshv2)]
904939
CLEAR_DIRTY_BIT_FLAG,
905940
#[cfg(mshv3)]
906941
(MSHV_GPAP_ACCESS_OP_CLEAR as u8),
907942
)?;
908-
Ok(res)
943+
Ok((sandbox_dirty_pages, None))
909944
}
910945

911946
#[cfg(crashdump)]

src/hyperlight_host/src/hypervisor/kvm.rs

Lines changed: 24 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -292,6 +292,7 @@ pub(crate) struct KVMDriver {
292292
entrypoint: u64,
293293
orig_rsp: GuestPtr,
294294
mem_regions: Vec<MemoryRegion>,
295+
n_initial_regions: usize,
295296
interrupt_handle: Arc<LinuxInterruptHandle>,
296297

297298
#[cfg(gdb)]
@@ -374,6 +375,7 @@ impl KVMDriver {
374375
vcpu_fd,
375376
entrypoint,
376377
orig_rsp: rsp_gp,
378+
n_initial_regions: mem_regions.len(),
377379
mem_regions,
378380
interrupt_handle: interrupt_handle.clone(),
379381
#[cfg(gdb)]
@@ -752,11 +754,27 @@ impl Hypervisor for KVMDriver {
752754
self.interrupt_handle.clone()
753755
}
754756

755-
fn get_and_clear_dirty_pages(&mut self) -> Result<Vec<u64>> {
757+
// TODO: Implement getting additional host-mapped dirty pages.
758+
fn get_and_clear_dirty_pages(&mut self) -> Result<(Vec<u64>, Option<Vec<Vec<u64>>>)> {
759+
let n_contiguous = self
760+
.mem_regions
761+
.windows(2)
762+
.take_while(|window| window[0].guest_region.end == window[1].guest_region.start)
763+
.count()
764+
+ 1; // +1 because windows(2) gives us n-1 pairs for n regions
765+
766+
if n_contiguous != self.n_initial_regions {
767+
return Err(new_error!(
768+
"get_and_clear_dirty_pages: not all regions are contiguous, expected {} but got {}",
769+
self.n_initial_regions,
770+
n_contiguous
771+
));
772+
}
756773
let mut page_indices = vec![];
757774
let mut current_page = 0;
775+
758776
// Iterate over all memory regions and get the dirty pages for each region ignoring guard pages which cannot be dirty
759-
for (i, mem_region) in self.mem_regions.iter().enumerate() {
777+
for (i, mem_region) in self.mem_regions.iter().take(n_contiguous).enumerate() {
760778
let num_pages = mem_region.guest_region.len() / PAGE_SIZE_USIZE;
761779
let bitmap = match mem_region.flags {
762780
MemoryRegionFlags::READ => {
@@ -780,15 +798,15 @@ impl Hypervisor for KVMDriver {
780798
current_page += num_pages;
781799
}
782800

783-
// covert vec of page indices to vec of blocks
784-
let mut res = new_page_bitmap(current_page * PAGE_SIZE_USIZE, false)?;
801+
// convert vec of page indices to vec of blocks
802+
let mut sandbox_dirty_pages = new_page_bitmap(current_page * PAGE_SIZE_USIZE, false)?;
785803
for page_idx in page_indices {
786804
let block_idx = page_idx / PAGES_IN_BLOCK;
787805
let bit_idx = page_idx % PAGES_IN_BLOCK;
788-
res[block_idx] |= 1 << bit_idx;
806+
sandbox_dirty_pages[block_idx] |= 1 << bit_idx;
789807
}
790808

791-
Ok(res)
809+
Ok((sandbox_dirty_pages, None))
792810
}
793811

794812
#[cfg(crashdump)]

src/hyperlight_host/src/hypervisor/mod.rs

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -199,7 +199,10 @@ pub(crate) trait Hypervisor: Debug + Sync + Send {
199199
/// Get dirty pages as a bitmap (Vec<u64>).
200200
/// Each bit in a u64 represents a page.
201201
/// This also clears the bitflags, marking the pages as non-dirty.
202-
fn get_and_clear_dirty_pages(&mut self) -> Result<Vec<u64>>;
202+
/// The Vec<u64> in the tuple is the bitmap of the first contiguous memory regions, which represents the sandbox itself.
203+
/// The Vec<Vec<u64>> in the tuple are the host-mapped regions, which aren't necessarily contiguous, and not yet implemented
204+
#[allow(clippy::type_complexity)]
205+
fn get_and_clear_dirty_pages(&mut self) -> Result<(Vec<u64>, Option<Vec<Vec<u64>>>)>;
203206

204207
/// Get InterruptHandle to underlying VM
205208
fn interrupt_handle(&self) -> Arc<dyn InterruptHandle>;

src/hyperlight_host/src/mem/mgr.rs

Lines changed: 10 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -286,12 +286,8 @@ where
286286
// merge the host dirty page map into the dirty bitmap
287287
let merged = bitmap_union(&res, vm_dirty_bitmap);
288288

289-
let snapshot_manager = SharedMemorySnapshotManager::new(
290-
&mut self.shared_mem,
291-
&merged,
292-
layout,
293-
self.mapped_rgns,
294-
)?;
289+
let mut snapshot_manager = SharedMemorySnapshotManager::new(&mut self.shared_mem, layout)?;
290+
snapshot_manager.create_new_snapshot(&mut self.shared_mem, &merged, self.mapped_rgns)?;
295291
existing_snapshot_manager.replace(snapshot_manager);
296292
Ok(())
297293
}
@@ -300,6 +296,10 @@ where
300296
/// off the stack
301297
/// It should be used when you want to restore the state of the memory to a previous state but still want to
302298
/// retain that state, for example after calling a function in the guest
299+
///
300+
/// Returns the number of memory regions mapped into the sandbox
301+
/// that need to be unmapped in order for the restore to be
302+
/// completed.
303303
pub(crate) fn restore_state_from_last_snapshot(&mut self, dirty_bitmap: &[u64]) -> Result<u64> {
304304
let mut snapshot_manager = self
305305
.snapshot_manager
@@ -311,7 +311,10 @@ where
311311
log_then_return!("Snapshot manager not initialized");
312312
}
313313
Some(snapshot_manager) => {
314-
snapshot_manager.restore_from_snapshot(&mut self.shared_mem, dirty_bitmap)
314+
let old_rgns = self.mapped_rgns;
315+
self.mapped_rgns =
316+
snapshot_manager.restore_from_snapshot(&mut self.shared_mem, dirty_bitmap)?;
317+
Ok(old_rgns - self.mapped_rgns)
315318
}
316319
}
317320
}

src/hyperlight_host/src/mem/shared_memory_snapshot_manager.rs

Lines changed: 35 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -45,15 +45,8 @@ impl SharedMemorySnapshotManager {
4545
#[instrument(err(Debug), skip_all, parent = Span::current(), level= "Trace")]
4646
pub(super) fn new<S: SharedMemory>(
4747
shared_mem: &mut S,
48-
dirty_page_bitmap: &[u64],
4948
layout: &SandboxMemoryLayout,
50-
mapped_rgns: u64,
5149
) -> Result<Self> {
52-
// Build a snapshot of memory from the dirty_page_map
53-
54-
let diff =
55-
Self::build_snapshot_from_dirty_page_map(shared_mem, dirty_page_bitmap, mapped_rgns)?;
56-
5750
// Get the input output buffer details from the layout so that they can be reset to their initial state
5851
let input_data_size_offset = layout.get_input_data_size_offset();
5952
let output_data_size_offset = layout.get_output_data_size_offset();
@@ -76,20 +69,20 @@ impl SharedMemorySnapshotManager {
7669
})??;
7770

7871
Ok(Self {
79-
snapshots: vec![diff],
72+
snapshots: vec![],
8073
input_data_size,
8174
output_data_size,
8275
output_data_buffer_offset,
8376
input_data_buffer_offset,
8477
})
8578
}
8679

87-
fn build_snapshot_from_dirty_page_map<S: SharedMemory>(
80+
pub(super) fn create_new_snapshot<S: SharedMemory>(
81+
&mut self,
8882
shared_mem: &mut S,
8983
dirty_page_bitmap: &[u64],
9084
mapped_rgns: u64,
91-
) -> Result<PageSnapshot> {
92-
// If there is no dirty page map, return an empty snapshot
85+
) -> Result<()> {
9386
if dirty_page_bitmap.is_empty() {
9487
return Err(new_error!(
9588
"Tried to build snapshot from empty dirty page bitmap"
@@ -143,18 +136,6 @@ impl SharedMemorySnapshotManager {
143136

144137
// Create the snapshot with the pre-allocated buffer
145138
let snapshot = PageSnapshot::with_pages_and_buffer(dirty_pages, buffer, mapped_rgns);
146-
147-
Ok(snapshot)
148-
}
149-
150-
pub(super) fn create_new_snapshot<S: SharedMemory>(
151-
&mut self,
152-
shared_mem: &mut S,
153-
dirty_page_map: &[u64],
154-
mapped_rgns: u64,
155-
) -> Result<()> {
156-
let snapshot =
157-
Self::build_snapshot_from_dirty_page_map(shared_mem, dirty_page_map, mapped_rgns)?;
158139
self.snapshots.push(snapshot);
159140
Ok(())
160141
}
@@ -383,8 +364,11 @@ mod tests {
383364

384365
// Create snapshot
385366
let mut snapshot_manager =
386-
super::SharedMemorySnapshotManager::new(&mut shared_mem, &dirty_pages, &layout, 0)
387-
.unwrap();
367+
super::SharedMemorySnapshotManager::new(&mut shared_mem, &layout).unwrap();
368+
369+
snapshot_manager
370+
.create_new_snapshot(&mut shared_mem, &dirty_pages, 0)
371+
.unwrap();
388372

389373
// Modify memory
390374
let modified_data = vec![0xBB; PAGE_SIZE_USIZE];
@@ -454,8 +438,11 @@ mod tests {
454438

455439
// Create initial snapshot (State 1)
456440
let mut snapshot_manager =
457-
super::SharedMemorySnapshotManager::new(&mut shared_mem, &dirty_pages, &layout, 0)
458-
.unwrap();
441+
super::SharedMemorySnapshotManager::new(&mut shared_mem, &layout).unwrap();
442+
443+
snapshot_manager
444+
.create_new_snapshot(&mut shared_mem, &dirty_pages, 0)
445+
.unwrap();
459446

460447
// State 2: Modify and create second snapshot
461448
let state2_data = vec![0x22; PAGE_SIZE_USIZE];
@@ -570,8 +557,10 @@ mod tests {
570557

571558
// Create snapshot
572559
let mut snapshot_manager =
573-
super::SharedMemorySnapshotManager::new(&mut shared_mem, &dirty_pages, &layout, 0)
574-
.unwrap();
560+
super::SharedMemorySnapshotManager::new(&mut shared_mem, &layout).unwrap();
561+
snapshot_manager
562+
.create_new_snapshot(&mut shared_mem, &dirty_pages, 0)
563+
.unwrap();
575564

576565
// Modify first and third pages
577566
let modified_data = [vec![0x11; PAGE_SIZE_USIZE], vec![0x22; PAGE_SIZE_USIZE]];
@@ -651,8 +640,10 @@ mod tests {
651640
}
652641

653642
let mut snapshot_manager =
654-
super::SharedMemorySnapshotManager::new(&mut shared_mem, &dirty_pages, &layout, 0)
655-
.unwrap();
643+
super::SharedMemorySnapshotManager::new(&mut shared_mem, &layout).unwrap();
644+
snapshot_manager
645+
.create_new_snapshot(&mut shared_mem, &dirty_pages, 0)
646+
.unwrap();
656647

657648
// Cycle 2: Modify and snapshot
658649
let cycle2_page0 = vec![0x02; PAGE_SIZE_USIZE];
@@ -768,13 +759,11 @@ mod tests {
768759
}
769760
}
770761

771-
let mut snapshot_manager = super::SharedMemorySnapshotManager::new(
772-
&mut shared_mem,
773-
&dirty_pages_snapshot,
774-
&layout,
775-
0,
776-
)
777-
.unwrap();
762+
let mut snapshot_manager =
763+
super::SharedMemorySnapshotManager::new(&mut shared_mem, &layout).unwrap();
764+
snapshot_manager
765+
.create_new_snapshot(&mut shared_mem, &dirty_pages_snapshot, 0)
766+
.unwrap();
778767

779768
// Modify pages in init_data area
780769
let page0_offset = init_data_offset;
@@ -921,8 +910,10 @@ mod tests {
921910

922911
// Create initial checkpoint
923912
let mut snapshot_manager =
924-
super::SharedMemorySnapshotManager::new(&mut shared_mem, &dirty_pages, &layout, 0)
925-
.unwrap();
913+
super::SharedMemorySnapshotManager::new(&mut shared_mem, &layout).unwrap();
914+
snapshot_manager
915+
.create_new_snapshot(&mut shared_mem, &dirty_pages, 0)
916+
.unwrap();
926917

927918
// Simulate function call 1: modify pages 0 and 2
928919
let func1_page0 = vec![0x10; PAGE_SIZE_USIZE];
@@ -1168,8 +1159,10 @@ mod tests {
11681159

11691160
// Create snapshot
11701161
let mut snapshot_manager =
1171-
super::SharedMemorySnapshotManager::new(&mut shared_mem, &dirty_pages, &layout, 0)
1172-
.unwrap();
1162+
super::SharedMemorySnapshotManager::new(&mut shared_mem, &layout).unwrap();
1163+
snapshot_manager
1164+
.create_new_snapshot(&mut shared_mem, &dirty_pages, 0)
1165+
.unwrap();
11731166

11741167
// Modify only the dirty pages
11751168
let modified_patterns = [

0 commit comments

Comments
 (0)