Skip to content

Commit da99173

Browse files
committed
Add InterruptHandle trait, and implement trait for mshv,kvm and windows
Signed-off-by: Ludvig Liljenberg <4257730+ludfjig@users.noreply.github.com>
1 parent c3a9064 commit da99173

File tree

7 files changed

+231
-201
lines changed

7 files changed

+231
-201
lines changed

src/hyperlight_host/src/hypervisor/hyperv_linux.rs

Lines changed: 45 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,8 @@ extern crate mshv_bindings3 as mshv_bindings;
2525
extern crate mshv_ioctls3 as mshv_ioctls;
2626

2727
use std::fmt::{Debug, Formatter};
28+
use std::sync::atomic::{AtomicBool, AtomicU64, Ordering};
29+
use std::sync::Arc;
2830

2931
use log::{error, LevelFilter};
3032
#[cfg(mshv2)]
@@ -56,10 +58,10 @@ use super::gdb::{DebugCommChannel, DebugMsg, DebugResponse, GuestDebug, MshvDebu
5658
use super::handlers::DbgMemAccessHandlerWrapper;
5759
use super::handlers::{MemAccessHandlerWrapper, OutBHandlerWrapper};
5860
use super::{
59-
Hypervisor, VirtualCPU, CR0_AM, CR0_ET, CR0_MP, CR0_NE, CR0_PE, CR0_PG, CR0_WP, CR4_OSFXSR,
60-
CR4_OSXMMEXCPT, CR4_PAE, EFER_LMA, EFER_LME, EFER_NX, EFER_SCE,
61+
Hypervisor, InterruptHandle, LinuxInterruptHandle, VirtualCPU, CR0_AM, CR0_ET, CR0_MP, CR0_NE,
62+
CR0_PE, CR0_PG, CR0_WP, CR4_OSFXSR, CR4_OSXMMEXCPT, CR4_PAE, EFER_LMA, EFER_LME, EFER_NX,
63+
EFER_SCE,
6164
};
62-
use crate::hypervisor::hypervisor_handler::HypervisorHandler;
6365
use crate::hypervisor::HyperlightExit;
6466
use crate::mem::memory_region::{MemoryRegion, MemoryRegionFlags};
6567
use crate::mem::ptr::{GuestPtr, RawPtr};
@@ -286,13 +288,14 @@ pub(crate) fn is_hypervisor_present() -> bool {
286288

287289
/// A Hypervisor driver for HyperV-on-Linux. This hypervisor is often
288290
/// called the Microsoft Hypervisor (MSHV)
289-
pub(super) struct HypervLinuxDriver {
291+
pub(crate) struct HypervLinuxDriver {
290292
_mshv: Mshv,
291293
vm_fd: VmFd,
292294
vcpu_fd: VcpuFd,
293295
entrypoint: u64,
294296
mem_regions: Vec<MemoryRegion>,
295297
orig_rsp: GuestPtr,
298+
interrupt_handle: Arc<LinuxInterruptHandle>,
296299

297300
#[cfg(gdb)]
298301
debug: Option<MshvDebug>,
@@ -310,7 +313,7 @@ impl HypervLinuxDriver {
310313
/// `apply_registers` method to do that, or more likely call
311314
/// `initialise` to do it for you.
312315
#[instrument(skip_all, parent = Span::current(), level = "Trace")]
313-
pub(super) fn new(
316+
pub(crate) fn new(
314317
mem_regions: Vec<MemoryRegion>,
315318
entrypoint_ptr: GuestPtr,
316319
rsp_ptr: GuestPtr,
@@ -390,6 +393,11 @@ impl HypervLinuxDriver {
390393
mem_regions,
391394
entrypoint: entrypoint_ptr.absolute()?,
392395
orig_rsp: rsp_ptr,
396+
interrupt_handle: Arc::new(LinuxInterruptHandle {
397+
running: AtomicBool::new(false),
398+
tid: AtomicU64::new(unsafe { libc::pthread_self() }),
399+
dropped: AtomicBool::new(false),
400+
}),
393401

394402
#[cfg(gdb)]
395403
debug,
@@ -461,7 +469,6 @@ impl Hypervisor for HypervLinuxDriver {
461469
page_size: u32,
462470
outb_hdl: OutBHandlerWrapper,
463471
mem_access_hdl: MemAccessHandlerWrapper,
464-
hv_handler: Option<HypervisorHandler>,
465472
max_guest_log_level: Option<LevelFilter>,
466473
#[cfg(gdb)] dbg_mem_access_fn: DbgMemAccessHandlerWrapper,
467474
) -> Result<()> {
@@ -487,7 +494,6 @@ impl Hypervisor for HypervLinuxDriver {
487494

488495
VirtualCPU::run(
489496
self.as_mut_hypervisor(),
490-
hv_handler,
491497
outb_hdl,
492498
mem_access_hdl,
493499
#[cfg(gdb)]
@@ -503,7 +509,6 @@ impl Hypervisor for HypervLinuxDriver {
503509
dispatch_func_addr: RawPtr,
504510
outb_handle_fn: OutBHandlerWrapper,
505511
mem_access_fn: MemAccessHandlerWrapper,
506-
hv_handler: Option<HypervisorHandler>,
507512
#[cfg(gdb)] dbg_mem_access_fn: DbgMemAccessHandlerWrapper,
508513
) -> Result<()> {
509514
// Reset general purpose registers, then set RIP and RSP
@@ -527,7 +532,6 @@ impl Hypervisor for HypervLinuxDriver {
527532
// run
528533
VirtualCPU::run(
529534
self.as_mut_hypervisor(),
530-
hv_handler,
531535
outb_handle_fn,
532536
mem_access_fn,
533537
#[cfg(gdb)]
@@ -577,13 +581,38 @@ impl Hypervisor for HypervLinuxDriver {
577581
#[cfg(gdb)]
578582
const EXCEPTION_INTERCEPT: hv_message_type = hv_message_type_HVMSG_X64_EXCEPTION_INTERCEPT;
579583

584+
self.interrupt_handle
585+
.tid
586+
.store(unsafe { libc::pthread_self() as u64 }, Ordering::Relaxed);
587+
// Note: if a `InterruptHandle::kill()` signal is delivered to this thread **here**
588+
// - before we've set the running to true,
589+
// Then the signal does not have any effect, because the signal handler is a no-op.
590+
self.interrupt_handle.running.store(true, Ordering::Relaxed);
591+
// Note: if a `InterruptHandle::kill()` signal is delivered to this thread **here**
592+
// - after we've set the running to true,
593+
// - before we've called `VcpuFd::run()`
594+
// Then the individual signal is lost, because the signal is only processed after we've left userspace.
595+
// However, for this reason, we keep sending the signal again and again until we see that the atomic `running` is set to false.
580596
#[cfg(mshv2)]
581597
let run_result = {
582598
let hv_message: hv_message = Default::default();
583-
&self.vcpu_fd.run(hv_message)
599+
self.vcpu_fd.run(hv_message)
584600
};
585601
#[cfg(mshv3)]
586-
let run_result = &self.vcpu_fd.run();
602+
let run_result = self.vcpu_fd.run();
603+
// Note: if a `InterruptHandle::kill()` signal is delivered to this thread **here**
604+
// - after we've called `VcpuFd::run()`
605+
// - before we've set the running to false
606+
// Then this is fine because the call to `VcpuFd::run()` is already finished,
607+
// the signal handler itself is a no-op, and the signals will stop being sent
608+
// once we've set the `running` to false.
609+
self.interrupt_handle
610+
.running
611+
.store(false, Ordering::Relaxed);
612+
// Note: if a `InterruptHandle::kill()` signal is delivered to this thread **here**
613+
// - after we've set the running to false,
614+
// Then the signal does not have any effect, because the signal handler is a no-op.
615+
// This is fine since we are already done with the `VcpuFd::run()` call.
587616

588617
let result = match run_result {
589618
Ok(m) => match m.header.message_type {
@@ -678,6 +707,10 @@ impl Hypervisor for HypervLinuxDriver {
678707
self as &mut dyn Hypervisor
679708
}
680709

710+
fn interrupt_handle(&self) -> Arc<dyn InterruptHandle> {
711+
self.interrupt_handle.clone()
712+
}
713+
681714
#[cfg(crashdump)]
682715
fn get_memory_regions(&self) -> &[MemoryRegion] {
683716
&self.mem_regions
@@ -732,6 +765,7 @@ impl Hypervisor for HypervLinuxDriver {
732765
impl Drop for HypervLinuxDriver {
733766
#[instrument(skip_all, parent = Span::current(), level = "Trace")]
734767
fn drop(&mut self) {
768+
self.interrupt_handle.dropped.store(true, Ordering::Relaxed);
735769
for region in &self.mem_regions {
736770
let mshv_region: mshv_user_mem_region = region.to_owned().into();
737771
match self.vm_fd.unmap_user_memory(mshv_region) {

src/hyperlight_host/src/hypervisor/hyperv_windows.rs

Lines changed: 40 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -18,14 +18,17 @@ use core::ffi::c_void;
1818
use std::fmt;
1919
use std::fmt::{Debug, Formatter};
2020
use std::string::String;
21+
use std::sync::atomic::{AtomicBool, Ordering};
22+
use std::sync::Arc;
2123

2224
use hyperlight_common::mem::PAGE_SIZE_USIZE;
2325
use log::LevelFilter;
2426
use tracing::{instrument, Span};
2527
use windows::Win32::System::Hypervisor::{
26-
WHvX64RegisterCr0, WHvX64RegisterCr3, WHvX64RegisterCr4, WHvX64RegisterCs, WHvX64RegisterEfer,
27-
WHV_MEMORY_ACCESS_TYPE, WHV_PARTITION_HANDLE, WHV_REGISTER_VALUE, WHV_RUN_VP_EXIT_CONTEXT,
28-
WHV_RUN_VP_EXIT_REASON, WHV_X64_SEGMENT_REGISTER, WHV_X64_SEGMENT_REGISTER_0,
28+
WHvCancelRunVirtualProcessor, WHvX64RegisterCr0, WHvX64RegisterCr3, WHvX64RegisterCr4,
29+
WHvX64RegisterCs, WHvX64RegisterEfer, WHV_MEMORY_ACCESS_TYPE, WHV_PARTITION_HANDLE,
30+
WHV_REGISTER_VALUE, WHV_RUN_VP_EXIT_CONTEXT, WHV_RUN_VP_EXIT_REASON, WHV_X64_SEGMENT_REGISTER,
31+
WHV_X64_SEGMENT_REGISTER_0,
2932
};
3033

3134
use super::fpu::{FP_TAG_WORD_DEFAULT, MXCSR_DEFAULT};
@@ -37,11 +40,11 @@ use super::surrogate_process_manager::*;
3740
use super::windows_hypervisor_platform::{VMPartition, VMProcessor};
3841
use super::wrappers::{HandleWrapper, WHvFPURegisters};
3942
use super::{
40-
HyperlightExit, Hypervisor, VirtualCPU, CR0_AM, CR0_ET, CR0_MP, CR0_NE, CR0_PE, CR0_PG, CR0_WP,
41-
CR4_OSFXSR, CR4_OSXMMEXCPT, CR4_PAE, EFER_LMA, EFER_LME, EFER_NX, EFER_SCE,
43+
HyperlightExit, Hypervisor, InterruptHandle, VirtualCPU, CR0_AM, CR0_ET, CR0_MP, CR0_NE,
44+
CR0_PE, CR0_PG, CR0_WP, CR4_OSFXSR, CR4_OSXMMEXCPT, CR4_PAE, EFER_LMA, EFER_LME, EFER_NX,
45+
EFER_SCE,
4246
};
4347
use crate::hypervisor::fpu::FP_CONTROL_WORD_DEFAULT;
44-
use crate::hypervisor::hypervisor_handler::HypervisorHandler;
4548
use crate::hypervisor::wrappers::WHvGeneralRegisters;
4649
use crate::mem::memory_region::{MemoryRegion, MemoryRegionFlags};
4750
use crate::mem::ptr::{GuestPtr, RawPtr};
@@ -56,6 +59,7 @@ pub(crate) struct HypervWindowsDriver {
5659
entrypoint: u64,
5760
orig_rsp: GuestPtr,
5861
mem_regions: Vec<MemoryRegion>,
62+
interrupt_handle: Arc<WindowsInterruptHandle>,
5963
}
6064
/* This does not automatically impl Send/Sync because the host
6165
* address of the shared memory region is a raw pointer, which are
@@ -90,6 +94,7 @@ impl HypervWindowsDriver {
9094

9195
let mut proc = VMProcessor::new(partition)?;
9296
Self::setup_initial_sregs(&mut proc, pml4_address)?;
97+
let partition_handle = proc.get_partition_hdl();
9398

9499
// subtract 2 pages for the guard pages, since when we copy memory to and from surrogate process,
95100
// we don't want to copy the guard pages themselves (that would cause access violation)
@@ -102,6 +107,11 @@ impl HypervWindowsDriver {
102107
entrypoint,
103108
orig_rsp: GuestPtr::try_from(RawPtr::from(rsp))?,
104109
mem_regions,
110+
interrupt_handle: Arc::new(WindowsInterruptHandle {
111+
running: AtomicBool::new(false),
112+
partition_handle,
113+
dropped: AtomicBool::new(false),
114+
}),
105115
})
106116
}
107117

@@ -151,11 +161,6 @@ impl HypervWindowsDriver {
151161
error.push_str(&format!("Registers: \n{:#?}", self.processor.get_regs()?));
152162
Ok(error)
153163
}
154-
155-
#[instrument(skip_all, parent = Span::current(), level = "Trace")]
156-
pub(crate) fn get_partition_hdl(&self) -> WHV_PARTITION_HANDLE {
157-
self.processor.get_partition_hdl()
158-
}
159164
}
160165

161166
impl Debug for HypervWindowsDriver {
@@ -307,7 +312,6 @@ impl Hypervisor for HypervWindowsDriver {
307312
page_size: u32,
308313
outb_hdl: OutBHandlerWrapper,
309314
mem_access_hdl: MemAccessHandlerWrapper,
310-
hv_handler: Option<HypervisorHandler>,
311315
max_guest_log_level: Option<LevelFilter>,
312316
#[cfg(gdb)] dbg_mem_access_hdl: DbgMemAccessHandlerWrapper,
313317
) -> Result<()> {
@@ -333,7 +337,6 @@ impl Hypervisor for HypervWindowsDriver {
333337

334338
VirtualCPU::run(
335339
self.as_mut_hypervisor(),
336-
hv_handler,
337340
outb_hdl,
338341
mem_access_hdl,
339342
#[cfg(gdb)]
@@ -349,7 +352,6 @@ impl Hypervisor for HypervWindowsDriver {
349352
dispatch_func_addr: RawPtr,
350353
outb_hdl: OutBHandlerWrapper,
351354
mem_access_hdl: MemAccessHandlerWrapper,
352-
hv_handler: Option<HypervisorHandler>,
353355
#[cfg(gdb)] dbg_mem_access_hdl: DbgMemAccessHandlerWrapper,
354356
) -> Result<()> {
355357
// Reset general purpose registers, then set RIP and RSP
@@ -371,7 +373,6 @@ impl Hypervisor for HypervWindowsDriver {
371373

372374
VirtualCPU::run(
373375
self.as_mut_hypervisor(),
374-
hv_handler,
375376
outb_hdl,
376377
mem_access_hdl,
377378
#[cfg(gdb)]
@@ -407,7 +408,11 @@ impl Hypervisor for HypervWindowsDriver {
407408

408409
#[instrument(err(Debug), skip_all, parent = Span::current(), level = "Trace")]
409410
fn run(&mut self) -> Result<super::HyperlightExit> {
411+
self.interrupt_handle.running.store(true, Ordering::Relaxed);
410412
let exit_context: WHV_RUN_VP_EXIT_CONTEXT = self.processor.run()?;
413+
self.interrupt_handle
414+
.running
415+
.store(false, Ordering::Relaxed);
411416

412417
let result = match exit_context.ExitReason {
413418
// WHvRunVpExitReasonX64IoPortAccess
@@ -481,8 +486,8 @@ impl Hypervisor for HypervWindowsDriver {
481486
Ok(result)
482487
}
483488

484-
fn get_partition_handle(&self) -> WHV_PARTITION_HANDLE {
485-
self.processor.get_partition_hdl()
489+
fn interrupt_handle(&self) -> Arc<dyn InterruptHandle> {
490+
self.interrupt_handle.clone()
486491
}
487492

488493
#[instrument(skip_all, parent = Span::current(), level = "Trace")]
@@ -496,28 +501,26 @@ impl Hypervisor for HypervWindowsDriver {
496501
}
497502
}
498503

499-
#[cfg(test)]
500-
pub mod tests {
501-
use std::sync::{Arc, Mutex};
504+
impl Drop for HypervWindowsDriver {
505+
fn drop(&mut self) {
506+
self.interrupt_handle.dropped.store(true, Ordering::Relaxed);
507+
}
508+
}
502509

503-
use serial_test::serial;
510+
pub struct WindowsInterruptHandle {
511+
// `WHvCancelRunVirtualProcessor()` will return Ok even if the vcpu is not running, which is the reason we need this flag.
512+
running: AtomicBool,
513+
partition_handle: WHV_PARTITION_HANDLE,
514+
dropped: AtomicBool,
515+
}
504516

505-
use crate::hypervisor::handlers::{MemAccessHandler, OutBHandler};
506-
use crate::hypervisor::tests::test_initialise;
507-
use crate::Result;
517+
impl InterruptHandle for WindowsInterruptHandle {
518+
fn kill(&self) -> bool {
519+
self.running.load(Ordering::Relaxed)
520+
&& unsafe { WHvCancelRunVirtualProcessor(self.partition_handle, 0, 0).is_ok() }
521+
}
508522

509-
#[test]
510-
#[serial]
511-
fn test_init() {
512-
let outb_handler = {
513-
let func: Box<dyn FnMut(u16, u32) -> Result<()> + Send> =
514-
Box::new(|_, _| -> Result<()> { Ok(()) });
515-
Arc::new(Mutex::new(OutBHandler::from(func)))
516-
};
517-
let mem_access_handler = {
518-
let func: Box<dyn FnMut() -> Result<()> + Send> = Box::new(|| -> Result<()> { Ok(()) });
519-
Arc::new(Mutex::new(MemAccessHandler::from(func)))
520-
};
521-
test_initialise(outb_handler, mem_access_handler).unwrap();
523+
fn dropped(&self) -> bool {
524+
self.dropped.load(Ordering::Relaxed)
522525
}
523526
}

0 commit comments

Comments
 (0)