diff --git a/Cargo.lock b/Cargo.lock index 90a74e11e..fb750dd8c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1861,8 +1861,7 @@ dependencies = [ [[package]] name = "kvm-bindings" version = "0.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b3c06ff73c7ce03e780887ec2389d62d2a2a9ddf471ab05c2ff69207cd3f3b4" +source = "git+https://github.com/rust-vmm/kvm?rev=3ffc9b62af5978553f73cc0ec79fad13fdd47146#3ffc9b62af5978553f73cc0ec79fad13fdd47146" dependencies = [ "vmm-sys-util", ] @@ -1870,8 +1869,7 @@ dependencies = [ [[package]] name = "kvm-ioctls" version = "0.24.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "333f77a20344a448f3f70664918135fddeb804e938f28a99d685bd92926e0b19" +source = "git+https://github.com/rust-vmm/kvm?rev=3ffc9b62af5978553f73cc0ec79fad13fdd47146#3ffc9b62af5978553f73cc0ec79fad13fdd47146" dependencies = [ "bitflags 2.10.0", "kvm-bindings", diff --git a/src/hyperlight_host/Cargo.toml b/src/hyperlight_host/Cargo.toml index 26de861d9..9efd6071c 100644 --- a/src/hyperlight_host/Cargo.toml +++ b/src/hyperlight_host/Cargo.toml @@ -75,8 +75,8 @@ windows-version = "0.1" lazy_static = "1.4.0" [target.'cfg(unix)'.dependencies] -kvm-bindings = { version = "0.14", features = ["fam-wrappers"], optional = true } -kvm-ioctls = { version = "0.24", optional = true } +kvm-bindings = { git = "https://github.com/rust-vmm/kvm", rev = "3ffc9b62af5978553f73cc0ec79fad13fdd47146", features = ["fam-wrappers"], optional = true } +kvm-ioctls = { git = "https://github.com/rust-vmm/kvm", rev = "3ffc9b62af5978553f73cc0ec79fad13fdd47146", optional = true } mshv-bindings = { version = "0.6", optional = true } mshv-ioctls = { version = "0.6", optional = true} diff --git a/src/hyperlight_host/src/error.rs b/src/hyperlight_host/src/error.rs index 1bca9e944..733f36776 100644 --- a/src/hyperlight_host/src/error.rs +++ b/src/hyperlight_host/src/error.rs @@ -144,6 +144,14 @@ pub enum HyperlightError { #[error("Memory Access Violation at address {0:#x} of type {1}, but memory is marked as {2}")] MemoryAccessViolation(u64, MemoryRegionFlags, MemoryRegionFlags), + /// MSR Read Violation. Guest attempted to read from a Model-Specific Register + #[error("Guest attempted to read from MSR {0:#x}")] + MsrReadViolation(u32), + + /// MSR Write Violation. Guest attempted to write to a Model-Specific Register + #[error("Guest attempted to write {1:#x} to MSR {0:#x}")] + MsrWriteViolation(u32, u64), + /// Memory Allocation Failed. #[error("Memory Allocation Failed with OS Error {0:?}.")] MemoryAllocationFailed(Option), @@ -325,6 +333,8 @@ impl HyperlightError { | HyperlightError::ExecutionAccessViolation(_) | HyperlightError::StackOverflow() | HyperlightError::MemoryAccessViolation(_, _, _) + | HyperlightError::MsrReadViolation(_) + | HyperlightError::MsrWriteViolation(_, _) | HyperlightError::SnapshotSizeMismatch(_, _) | HyperlightError::MemoryRegionSizeMismatch(_, _, _) // HyperlightVmError::Restore is already handled manually in restore(), but we mark it diff --git a/src/hyperlight_host/src/hypervisor/hyperlight_vm.rs b/src/hyperlight_host/src/hypervisor/hyperlight_vm.rs index f8285d425..49856eafb 100644 --- a/src/hyperlight_host/src/hypervisor/hyperlight_vm.rs +++ b/src/hyperlight_host/src/hypervisor/hyperlight_vm.rs @@ -155,6 +155,14 @@ impl DispatchGuestCallError { region_flags, }) => HyperlightError::MemoryAccessViolation(addr, access_type, region_flags), + DispatchGuestCallError::Run(RunVmError::MsrReadViolation(msr_index)) => { + HyperlightError::MsrReadViolation(msr_index) + } + + DispatchGuestCallError::Run(RunVmError::MsrWriteViolation { msr_index, value }) => { + HyperlightError::MsrWriteViolation(msr_index, value) + } + // Leave others as is other => HyperlightVmError::DispatchGuestCall(other).into(), }; @@ -203,6 +211,10 @@ pub enum RunVmError { MmioReadUnmapped(u64), #[error("MMIO WRITE access to unmapped address {0:#x}")] MmioWriteUnmapped(u64), + #[error("Guest attempted to read from MSR {0:#x}")] + MsrReadViolation(u32), + #[error("Guest attempted to write {value:#x} to MSR {msr_index:#x}")] + MsrWriteViolation { msr_index: u32, value: u64 }, #[error("vCPU run failed: {0}")] RunVcpu(#[from] RunVcpuError), #[error("Unexpected VM exit: {0}")] @@ -340,7 +352,7 @@ impl HyperlightVm { _pml4_addr: u64, entrypoint: Option, rsp_gva: u64, - #[cfg_attr(target_os = "windows", allow(unused_variables))] config: &SandboxConfiguration, + config: &SandboxConfiguration, #[cfg(gdb)] gdb_conn: Option>, #[cfg(crashdump)] rt_cfg: SandboxRuntimeConfig, #[cfg(feature = "mem_profile")] trace_info: MemTraceInfo, @@ -350,7 +362,7 @@ impl HyperlightVm { #[cfg(not(gdb))] type VmType = Box; - let vm: VmType = match get_available_hypervisor() { + let mut vm: VmType = match get_available_hypervisor() { #[cfg(kvm)] Some(HypervisorType::Kvm) => Box::new(KvmVm::new().map_err(VmError::CreateVm)?), #[cfg(mshv3)] @@ -360,6 +372,11 @@ impl HyperlightVm { None => return Err(CreateHyperlightVmError::NoHypervisorFound), }; + // Enable MSR intercepts unless the user explicitly allows MSR access + if !config.get_allow_msr() { + vm.enable_msr_intercept().map_err(VmError::CreateVm)?; + } + #[cfg(feature = "init-paging")] vm.set_sregs(&CommonSpecialRegisters::standard_64bit_defaults(_pml4_addr)) .map_err(VmError::Register)?; @@ -811,6 +828,12 @@ impl HyperlightVm { } } } + Ok(VmExit::MsrRead(msr_index)) => { + break Err(RunVmError::MsrReadViolation(msr_index)); + } + Ok(VmExit::MsrWrite { msr_index, value }) => { + break Err(RunVmError::MsrWriteViolation { msr_index, value }); + } Ok(VmExit::Cancelled()) => { // If cancellation was not requested for this specific guest function call, // the vcpu was interrupted by a stale cancellation. This can occur when: @@ -906,6 +929,7 @@ impl HyperlightVm { } /// Resets the following vCPU state: + /// - MSRs (see [`VIRTUALIZED_MSRS`](super::virtual_machine::VIRTUALIZED_MSRS)) /// - General purpose registers /// - Debug registers /// - XSAVE (includes FPU/SSE state with proper FCW and MXCSR defaults) @@ -916,6 +940,8 @@ impl HyperlightVm { cr3: u64, sregs: &CommonSpecialRegisters, ) -> std::result::Result<(), RegisterError> { + self.vm.reset_msrs()?; + self.vm.set_regs(&CommonRegisters { rflags: 1 << 1, // Reserved bit always set ..Default::default() diff --git a/src/hyperlight_host/src/hypervisor/virtual_machine/kvm.rs b/src/hyperlight_host/src/hypervisor/virtual_machine/kvm.rs index ecf3b6b91..65012d7cc 100644 --- a/src/hyperlight_host/src/hypervisor/virtual_machine/kvm.rs +++ b/src/hyperlight_host/src/hypervisor/virtual_machine/kvm.rs @@ -19,10 +19,14 @@ use std::sync::LazyLock; #[cfg(gdb)] use kvm_bindings::kvm_guest_debug; use kvm_bindings::{ - kvm_debugregs, kvm_fpu, kvm_regs, kvm_sregs, kvm_userspace_memory_region, kvm_xsave, + kvm_debugregs, kvm_enable_cap, kvm_fpu, kvm_regs, kvm_sregs, kvm_userspace_memory_region, + kvm_xsave, }; use kvm_ioctls::Cap::UserMemory; -use kvm_ioctls::{Kvm, VcpuExit, VcpuFd, VmFd}; +use kvm_ioctls::{ + Cap, Kvm, MsrExitReason, MsrFilterDefaultAction, MsrFilterRange, MsrFilterRangeFlags, VcpuExit, + VcpuFd, VmFd, +}; use tracing::{Span, instrument}; #[cfg(feature = "trace_guest")] use tracing_opentelemetry::OpenTelemetrySpanExt; @@ -139,6 +143,36 @@ impl KvmVm { } impl VirtualMachine for KvmVm { + fn enable_msr_intercept(&mut self) -> std::result::Result<(), CreateVmError> { + let cap = kvm_enable_cap { + cap: Cap::X86UserSpaceMsr as u32, + args: [MsrExitReason::Filter.bits() as u64, 0, 0, 0], + ..Default::default() + }; + self.vm_fd + .enable_cap(&cap) + .map_err(|e| CreateVmError::EnableMsrIntercept(e.into()))?; + + // Install a deny-all MSR filter (KVM_X86_SET_MSR_FILTER). + // At least one range is required when using KVM_MSR_FILTER_DEFAULT_DENY; + // from the docs: "Calling this ioctl with an empty set of ranges + // (all nmsrs == 0) disables MSR filtering. In that mode, + // KVM_MSR_FILTER_DEFAULT_DENY is invalid and causes an error." + let bitmap = [0u8; 1]; // 1 byte covers 8 MSRs, all bits 0 (deny) + self.vm_fd + .set_msr_filter( + MsrFilterDefaultAction::DENY, + &[MsrFilterRange { + flags: MsrFilterRangeFlags::READ | MsrFilterRangeFlags::WRITE, + base: 0, + msr_count: 1, + bitmap: &bitmap, + }], + ) + .map_err(|e| CreateVmError::EnableMsrIntercept(e.into()))?; + Ok(()) + } + unsafe fn map_memory( &mut self, (slot, region): (u32, &MemoryRegion), @@ -176,6 +210,40 @@ impl VirtualMachine for KvmVm { Ok(VcpuExit::IoOut(port, data)) => Ok(VmExit::IoOut(port, data.to_vec())), Ok(VcpuExit::MmioRead(addr, _)) => Ok(VmExit::MmioRead(addr)), Ok(VcpuExit::MmioWrite(addr, _)) => Ok(VmExit::MmioWrite(addr)), + // KVM_EXIT_X86_RDMSR / KVM_EXIT_X86_WRMSR (KVM API §5, kvm_run structure): + // + // The "index" field tells userspace which MSR the guest wants to + // read/write. If the request was unsuccessful, userspace indicates + // that with a "1" in the "error" field. "This will inject a #GP + // into the guest when the VCPU is executed again." + // + // "for KVM_EXIT_IO, KVM_EXIT_MMIO, [...] KVM_EXIT_X86_RDMSR and + // KVM_EXIT_X86_WRMSR the corresponding operations are complete + // (and guest state is consistent) only after userspace has + // re-entered the kernel with KVM_RUN." + // + // We set error=1 and then re-run with `immediate_exit` to let KVM + // inject the #GP without executing further guest code. From the + // kvm_run docs: "[immediate_exit] is polled once when KVM_RUN + // starts; if non-zero, KVM_RUN exits immediately, returning + // -EINTR." + Ok(VcpuExit::X86Rdmsr(msr_exit)) => { + let msr_index = msr_exit.index; + *msr_exit.error = 1; + self.vcpu_fd.set_kvm_immediate_exit(1); + let _ = self.vcpu_fd.run(); + self.vcpu_fd.set_kvm_immediate_exit(0); + Ok(VmExit::MsrRead(msr_index)) + } + Ok(VcpuExit::X86Wrmsr(msr_exit)) => { + let msr_index = msr_exit.index; + let value = msr_exit.data; + *msr_exit.error = 1; + self.vcpu_fd.set_kvm_immediate_exit(1); + let _ = self.vcpu_fd.run(); + self.vcpu_fd.set_kvm_immediate_exit(0); + Ok(VmExit::MsrWrite { msr_index, value }) + } #[cfg(gdb)] Ok(VcpuExit::Debug(debug_exit)) => Ok(VmExit::Debug { dr6: debug_exit.dr6, @@ -327,6 +395,13 @@ impl VirtualMachine for KvmVm { Ok(()) } + + fn reset_msrs(&self) -> std::result::Result<(), RegisterError> { + // The KVM MSR filter (KVM_MSR_FILTER_DEFAULT_DENY) blocks all guest + // MSR access at the hardware level, so no MSRs can be modified by the + // guest and there is nothing to reset. + Ok(()) + } } #[cfg(gdb)] diff --git a/src/hyperlight_host/src/hypervisor/virtual_machine/mod.rs b/src/hyperlight_host/src/hypervisor/virtual_machine/mod.rs index 82e05c104..1a002bc87 100644 --- a/src/hyperlight_host/src/hypervisor/virtual_machine/mod.rs +++ b/src/hyperlight_host/src/hypervisor/virtual_machine/mod.rs @@ -100,12 +100,221 @@ pub(crate) enum HypervisorType { Whp, } +/// Architectural default value for PAT (IA32_PAT, MSR 0x277). +/// Each byte encodes a memory type for one of the 8 PAT entries: +/// PA0=WB(6), PA1=WT(4), PA2=UC-(7), PA3=UC(0), +/// PA4=WB(6), PA5=WT(4), PA6=UC-(7), PA7=UC(0). +pub(crate) const PAT_RESET_VALUE: u64 = 0x0007_0406_0007_0406; + +/// MSRs that the Microsoft hypervisor virtualizes internally and handles +/// without generating a VM exit — even when MSR intercepts are enabled via +/// `HV_INTERCEPT_TYPE_X64_MSR` (MSHV) or `WHvExtendedVmExitX64MsrExit` (WHP). +/// +/// Both [`MSRS_TO_RESET`] and the MSR intercept tests are derived from this +/// list. Any new virtualized MSR must be added here. +/// +/// Some MSRs in this list do NOT appear in [`MSRS_TO_RESET`] because they are +/// either read-only or already reset by other means (e.g. `set_sregs()`). +/// +/// Each entry is `(msr_index, reset_value, needs_explicit_reset)`: +/// - `msr_index`: The architectural MSR index (same value used in RDMSR/WRMSR). +/// - `reset_value`: The value to write on reset (only meaningful when +/// `needs_explicit_reset` is true). +/// - `needs_explicit_reset`: `false` for MSRs that are read-only or already +/// restored via `set_sregs()` / `set_regs()`. +#[rustfmt::skip] +pub(crate) const VIRTUALIZED_MSRS: &[(u32, u64, bool)] = &[ + // ── Already handled by set_sregs() or read-only ───────────────── + (0x1B, 0, false), // APIC_BASE (set_sregs) + (0xFE, 0, false), // MTRR_CAP (read-only) + (0xC000_0080, 0, false), // EFER (set_sregs) + (0xC000_0100, 0, false), // FS_BASE (set_sregs) + (0xC000_0101, 0, false), // GS_BASE (set_sregs) + + // ── Must be explicitly reset ──────────────────────────────────── + // TSC: a guest WRMSR 0x10 adjusts the hypervisor's TSC offset, which + // persists across sandbox executions. Reset to 0 to clear it. + (0x10, 0, true), // TSC + // SYSCALL MSRs + (0xC000_0081, 0, true), // STAR + (0xC000_0082, 0, true), // LSTAR + (0xC000_0083, 0, true), // CSTAR + (0xC000_0084, 0, true), // SFMASK + // Kernel GS base (SWAPGS) + (0xC000_0102, 0, true), // KERNEL_GS_BASE + // SYSENTER MSRs + (0x174, 0, true), // SYSENTER_CS + (0x175, 0, true), // SYSENTER_ESP + (0x176, 0, true), // SYSENTER_EIP + // Misc MSRs + (0xC000_0103, 0, true), // TSC_AUX + (0x277, PAT_RESET_VALUE, true), // PAT + (0x1D9, 0, true), // DEBUG_CTL + // MTRR default type + (0x2FF, 0, true), // MTRR_DEF_TYPE + // Variable-range MTRRs (8 base + 8 mask) + (0x200, 0, true), (0x201, 0, true), // MTRRphysBase0 / MTRRphysMask0 + (0x202, 0, true), (0x203, 0, true), // MTRRphysBase1 / MTRRphysMask1 + (0x204, 0, true), (0x205, 0, true), // MTRRphysBase2 / MTRRphysMask2 + (0x206, 0, true), (0x207, 0, true), // MTRRphysBase3 / MTRRphysMask3 + (0x208, 0, true), (0x209, 0, true), // MTRRphysBase4 / MTRRphysMask4 + (0x20A, 0, true), (0x20B, 0, true), // MTRRphysBase5 / MTRRphysMask5 + (0x20C, 0, true), (0x20D, 0, true), // MTRRphysBase6 / MTRRphysMask6 + (0x20E, 0, true), (0x20F, 0, true), // MTRRphysBase7 / MTRRphysMask7 + // Fixed-range MTRRs + (0x250, 0, true), // MTRRfix64K_00000 + (0x258, 0, true), // MTRRfix16K_80000 + (0x259, 0, true), // MTRRfix16K_A0000 + (0x268, 0, true), // MTRRfix4K_C0000 + (0x269, 0, true), // MTRRfix4K_C8000 + (0x26A, 0, true), // MTRRfix4K_D0000 + (0x26B, 0, true), // MTRRfix4K_D8000 + (0x26C, 0, true), // MTRRfix4K_E0000 + (0x26D, 0, true), // MTRRfix4K_E8000 + (0x26E, 0, true), // MTRRfix4K_F0000 + (0x26F, 0, true), // MTRRfix4K_F8000 + + // ── MSHV/WHP additional virtualizations ───────────────────────── + // These MSRs are handled internally by the Microsoft Hypervisor + // without generating VM exits, even when MSR intercepts are enabled. + // On KVM, the deny-all MSR filter traps them instead. + + // Read-only MSRs (no reset needed) + (0x17, 0, false), // IA32_PLATFORM_ID (read-only) + (0x8B, 0, false), // IA32_BIOS_SIGN_ID (read-only) + (0x10A, 0, false), // IA32_ARCH_CAPABILITIES (read-only) + (0x179, 0, false), // IA32_MCG_CAP (read-only) + (0x17A, 0, false), // IA32_MCG_STATUS (read-only in guest) + (0x4D0, 0, false), // Platform-specific (read-only in guest) + + // Speculative execution control + (0x48, 0, true), // IA32_SPEC_CTRL + + // CET (Control-flow Enforcement Technology) MSRs + (0x6A0, 0, true), // IA32_U_CET + (0x6A2, 0, true), // IA32_S_CET + (0x6A4, 0, true), // IA32_PL0_SSP + (0x6A5, 0, true), // IA32_PL1_SSP + (0x6A6, 0, true), // IA32_PL2_SSP + (0x6A7, 0, true), // IA32_PL3_SSP + (0x6A8, 0, true), // IA32_INTERRUPT_SSP_TABLE_ADDR + + // Extended supervisor state + (0xDA0, 0, true), // IA32_XSS + + // AMD-specific MSRs (read-only in guest context under MSHV) + (0xC001_0010, 0, false), // AMD SYSCFG + (0xC001_0114, 0, false), // AMD VM_CR + (0xC001_0131, 0, false), // AMD (platform-specific) +]; + +/// Returns `true` if the given MSR index is in [`VIRTUALIZED_MSRS`]. +/// +/// Used by tests to distinguish MSRs that are handled internally by the +/// hypervisor (and therefore won't generate VM exits) from those that +/// should be intercepted. +#[cfg(test)] +pub(crate) fn is_virtualized_msr(index: u32) -> bool { + VIRTUALIZED_MSRS.iter().any(|&(msr, _, _)| msr == index) +} + +/// Number of entries in [`MSRS_TO_RESET`], available at compile time so +/// backends can size const arrays. +pub(crate) const MSRS_TO_RESET_COUNT: usize = { + let mut n = 0; + let mut i = 0; + while i < VIRTUALIZED_MSRS.len() { + if VIRTUALIZED_MSRS[i].2 { + n += 1; + } + i += 1; + } + n +}; + +/// The subset of [`VIRTUALIZED_MSRS`] where `needs_explicit_reset` is `true`. +/// +/// These are guest-writable MSRs that are not restored by `set_sregs()` or +/// any other register-restore path and must be explicitly written back to +/// their default values during `reset_msrs()`. +pub(crate) const MSRS_TO_RESET: &[(u32, u64)] = &{ + let mut result = [(0u32, 0u64); MSRS_TO_RESET_COUNT]; + let mut j = 0; + let mut i = 0; + while i < VIRTUALIZED_MSRS.len() { + if VIRTUALIZED_MSRS[i].2 { + result[j] = (VIRTUALIZED_MSRS[i].0, VIRTUALIZED_MSRS[i].1); + j += 1; + } + i += 1; + } + result +}; + /// Minimum XSAVE buffer size: 512 bytes legacy region + 64 bytes header. /// Only used by MSHV and WHP which use compacted XSAVE format and need to /// validate buffer size before accessing XCOMP_BV. #[cfg(any(mshv3, target_os = "windows"))] pub(crate) const XSAVE_MIN_SIZE: usize = 576; +/// MSR index ranges covering all x86-64 MSR address spaces that the hardware +/// MSR bitmap can intercept. +/// +/// Used by tests to verify that every MSR is either intercepted or listed +/// in [`VIRTUALIZED_MSRS`]. Each entry is `(start_inclusive, end_exclusive)`. +/// +/// These ranges correspond exactly to the four MSR bitmap regions defined in +/// the hypervisor's `valx64.h` (`VAL_MSR_BITMAP_*_ADDRESS`), which in turn +/// match the hardware capabilities: +/// +/// - **Intel VMX** supports two bitmap regions: low (`0x0..0x1FFF`) and +/// high (`0xC000_0000..0xC000_1FFF`). +/// See Intel SDM Vol. 3C §25.6.9 "MSR-Bitmap Address". +/// +/// - **AMD SVM** supports three bitmap regions: low, high, and "very high" +/// (`0xC001_0000..0xC001_1FFF`). +/// See AMD APM Vol. 2 §15.11 "MSR Intercepts". +/// +/// - **Microsoft Hypervisor** adds a fourth synthetic region +/// (`0x4000_0000..0x4000_1FFF`) for Hyper-V MSRs including nested +/// virtualization SINTs up to `~0x4000_109F`. +/// See TLFS §3 +/// +/// Any MSR index outside these four regions cannot be intercepted via the +/// bitmap and will unconditionally #GP, so we don't need to test them. +/// +/// Additional reference for the specific MSR indices defined within each +/// range: +/// - **Linux kernel** `arch/x86/include/asm/msr-index.h` +/// +/// - **Intel SDM Vol. 4** "Model-Specific Registers" +/// - **AMD APM Vol. 2** Appendix A "MSR Cross-Reference" +#[cfg(test)] +pub(crate) const MSR_TEST_RANGES: &[(u32, u32)] = &[ + // Low bitmap: Intel architectural + model-specific (IA32_*) + // Includes: TSC, APIC_BASE, SYSENTER, MTRRs, PAT, DEBUGCTL, PMU, + // x2APIC (0x800–0x8FF), Intel RDT (0xC80–0xCFF), BNDCFGS (0xD90), + // XSS (0xDA0), LBR_INFO (0xDC0–0xDDF), Arch LBR (0x1500–0x1600), + // PMC V6 (0x1900–0x1983), HW Feedback (0x17D0). + // valx64.h: VAL_MSR_BITMAP_LOW 0x0000_0000..=0x0000_1FFF + (0x0000_0000, 0x0000_2000), + // Synthetic bitmap: Microsoft Hypervisor MSRs (HV_X64_MSR_*) + // Includes: GUEST_OS_ID (0x40000000), HYPERCALL, VP_INDEX, timers, + // SynIC (SCONTROL/SIEFP/SIMP/EOM/SINT0–15), crash MSRs (0x40000100–0x40000105), + // nested SINTs (0x40001080–0x4000109F), TSC_INVARIANT_CONTROL (0x40000118). + // valx64.h: VAL_MSR_BITMAP_SYNTHETIC 0x4000_0000..=0x4000_1FFF + (0x4000_0000, 0x4000_2000), + // High bitmap: AMD64 MSRs + // Includes: EFER, STAR, LSTAR, CSTAR, SFMASK, FS/GS/KERNEL_GS_BASE, + // TSC_AUX, perf global status (0xC000_0300+), MBA (0xC000_0200+). + // valx64.h: VAL_MSR_BITMAP_HIGH 0xC000_0000..=0xC000_1FFF + (0xC000_0000, 0xC000_2000), + // Very-high bitmap: AMD K7/K8/Fam10h–19h MSRs + // Includes: HWCR, SYSCFG, SVM, IBS, CPPC, SEV/SNP, perfctr, UMC. + // valx64.h: VAL_MSR_BITMAP_VERY_HIGH 0xC001_0000..=0xC001_1FFF + (0xC001_0000, 0xC001_2000), +]; + /// Standard XSAVE buffer size (4KB) used by KVM and MSHV. /// WHP queries the required size dynamically. #[cfg(all(any(kvm, mshv3), test, feature = "init-paging"))] @@ -130,6 +339,10 @@ pub(crate) enum VmExit { MmioRead(u64), /// The vCPU tried to write to the given (unmapped) addr MmioWrite(u64), + /// The vCPU tried to read from the given MSR + MsrRead(u32), + /// The vCPU tried to write to the given MSR with the given value + MsrWrite { msr_index: u32, value: u64 }, /// The vCPU execution has been cancelled Cancelled(), /// The vCPU has exited for a reason that is not handled by Hyperlight @@ -170,6 +383,8 @@ pub enum CreateVmError { CreateVcpuFd(HypervisorError), #[error("VM creation failed: {0}")] CreateVmFd(HypervisorError), + #[error("Failed to enable MSR intercept: {0}")] + EnableMsrIntercept(HypervisorError), #[error("Hypervisor is not available: {0}")] HypervisorNotAvailable(HypervisorError), #[error("Initialize VM failed: {0}")] @@ -184,8 +399,10 @@ pub enum CreateVmError { /// RunVCPU error #[derive(Debug, Clone, thiserror::Error)] pub enum RunVcpuError { - #[error("Failed to decode message type: {0}")] + #[error("Failed to decode IO message type: {0}")] DecodeIOMessage(u32), + #[error("Failed to decode MSR message type: {0}")] + DecodeMsrMessage(u32), #[cfg(gdb)] #[error("Failed to get DR6 debug register: {0}")] GetDr6(HypervisorError), @@ -229,6 +446,10 @@ pub enum RegisterError { }, #[error("Invalid xsave alignment")] InvalidXsaveAlignment, + #[error("Failed to reset MSR 0x{index:X}: {source}")] + ResetMsr { index: u32, source: HypervisorError }, + #[error("Unknown MSR 0x{0:X}: no hypervisor register mapping")] + UnknownMsr(u32), #[cfg(target_os = "windows")] #[error("Failed to get xsave size: {0}")] GetXsaveSize(#[from] HypervisorError), @@ -344,6 +565,21 @@ pub(crate) trait VirtualMachine: Debug + Send { #[cfg(feature = "init-paging")] fn set_xsave(&self, xsave: &[u32]) -> std::result::Result<(), RegisterError>; + /// Reset internally-virtualized MSRs to their architectural defaults. + /// + /// The Microsoft hypervisor (MSHV/WHP) handles certain MSRs internally + /// without generating VM exits, even when MSR intercepts are enabled. + /// These must be explicitly written back on snapshot restore to prevent + /// state from one guest execution leaking into the next. + /// + /// On KVM this is a no-op because the MSR filter denies all guest MSR + /// access at the hardware level. + fn reset_msrs(&self) -> std::result::Result<(), RegisterError>; + + /// Enable MSR intercepts for this VM. When enabled, all MSR reads and + /// writes by the guest will cause a VM exit instead of being executed. + fn enable_msr_intercept(&mut self) -> std::result::Result<(), CreateVmError>; + /// Get partition handle #[cfg(target_os = "windows")] fn partition_handle(&self) -> windows::Win32::System::Hypervisor::WHV_PARTITION_HANDLE; diff --git a/src/hyperlight_host/src/hypervisor/virtual_machine/mshv.rs b/src/hyperlight_host/src/hypervisor/virtual_machine/mshv.rs index 74d7834ee..70a1cabc5 100644 --- a/src/hyperlight_host/src/hypervisor/virtual_machine/mshv.rs +++ b/src/hyperlight_host/src/hypervisor/virtual_machine/mshv.rs @@ -21,12 +21,16 @@ use std::sync::LazyLock; #[cfg(gdb)] use mshv_bindings::{DebugRegisters, hv_message_type_HVMSG_X64_EXCEPTION_INTERCEPT}; use mshv_bindings::{ - FloatingPointUnit, SpecialRegisters, StandardRegisters, XSave, hv_message_type, + FloatingPointUnit, HV_INTERCEPT_ACCESS_MASK_READ, HV_INTERCEPT_ACCESS_MASK_WRITE, + HV_INTERCEPT_ACCESS_READ, HV_INTERCEPT_ACCESS_WRITE, SpecialRegisters, StandardRegisters, + XSave, hv_intercept_type_HV_INTERCEPT_TYPE_X64_MSR, hv_message_type, hv_message_type_HVMSG_GPA_INTERCEPT, hv_message_type_HVMSG_UNMAPPED_GPA, hv_message_type_HVMSG_X64_HALT, hv_message_type_HVMSG_X64_IO_PORT_INTERCEPT, + hv_message_type_HVMSG_X64_MSR_INTERCEPT, hv_partition_property_code_HV_PARTITION_PROPERTY_SYNTHETIC_PROC_FEATURES, hv_partition_synthetic_processor_features, hv_register_assoc, - hv_register_name_HV_X64_REGISTER_RIP, hv_register_value, mshv_user_mem_region, + hv_register_name_HV_X64_REGISTER_RIP, hv_register_value, mshv_install_intercept, + mshv_user_mem_region, msr_to_hv_reg_name, }; use mshv_ioctls::{Mshv, VcpuFd, VmFd}; use tracing::{Span, instrument}; @@ -108,6 +112,18 @@ impl MshvVm { } impl VirtualMachine for MshvVm { + fn enable_msr_intercept(&mut self) -> std::result::Result<(), CreateVmError> { + let intercept = mshv_install_intercept { + access_type_mask: HV_INTERCEPT_ACCESS_MASK_WRITE | HV_INTERCEPT_ACCESS_MASK_READ, + intercept_type: hv_intercept_type_HV_INTERCEPT_TYPE_X64_MSR, + intercept_parameter: Default::default(), + }; + self.vm_fd + .install_intercept(intercept) + .map_err(|e| CreateVmError::EnableMsrIntercept(e.into()))?; + Ok(()) + } + unsafe fn map_memory( &mut self, (_slot, region): (u32, &MemoryRegion), @@ -137,6 +153,7 @@ impl VirtualMachine for MshvVm { hv_message_type_HVMSG_X64_IO_PORT_INTERCEPT; const UNMAPPED_GPA_MESSAGE: hv_message_type = hv_message_type_HVMSG_UNMAPPED_GPA; const INVALID_GPA_ACCESS_MESSAGE: hv_message_type = hv_message_type_HVMSG_GPA_INTERCEPT; + const MSR_MESSAGE: hv_message_type = hv_message_type_HVMSG_X64_MSR_INTERCEPT; #[cfg(gdb)] const EXCEPTION_INTERCEPT: hv_message_type = hv_message_type_HVMSG_X64_EXCEPTION_INTERCEPT; @@ -196,6 +213,23 @@ impl VirtualMachine for MshvVm { _ => VmExit::Unknown("Unknown MMIO access".to_string()), } } + MSR_MESSAGE => { + let msr_message = m + .to_msr_info() + .map_err(|_| RunVcpuError::DecodeMsrMessage(m.header.message_type))?; + let edx = msr_message.rdx; + let eax = msr_message.rax; + let written_value = (edx << 32) | eax; + let access = msr_message.header.intercept_access_type as u32; + match access { + HV_INTERCEPT_ACCESS_READ => VmExit::MsrRead(msr_message.msr_number), + HV_INTERCEPT_ACCESS_WRITE => VmExit::MsrWrite { + msr_index: msr_message.msr_number, + value: written_value, + }, + _ => VmExit::Unknown(format!("Unknown MSR access type={}", access)), + } + } #[cfg(gdb)] EXCEPTION_INTERCEPT => { let ex_info = m @@ -354,6 +388,39 @@ impl VirtualMachine for MshvVm { .map_err(|e| RegisterError::SetXsave(e.into()))?; Ok(()) } + + fn reset_msrs(&self) -> std::result::Result<(), RegisterError> { + use mshv_bindings::{hv_register_name, hv_register_name_HV_X64_REGISTER_U_XSS}; + + use super::MSRS_TO_RESET; + + /// Extends `msr_to_hv_reg_name` with mappings missing from + /// mshv-bindings (e.g. IA32_XSS 0xDA0, whose constant is + /// incorrectly set to the HV register name value 0x8008B). + fn msr_to_hv_reg(msr_index: u32) -> Result { + msr_to_hv_reg_name(msr_index).or(match msr_index { + 0xDA0 => Ok(hv_register_name_HV_X64_REGISTER_U_XSS), + _ => Err(RegisterError::UnknownMsr(msr_index)), + }) + } + + for &(msr_index, value) in MSRS_TO_RESET { + let name = msr_to_hv_reg(msr_index)?; + let assoc = hv_register_assoc { + name, + value: hv_register_value { reg64: value }, + ..Default::default() + }; + self.vcpu_fd + .set_reg(&[assoc]) + .map_err(|e| RegisterError::ResetMsr { + index: msr_index, + source: e.into(), + })?; + } + + Ok(()) + } } #[cfg(gdb)] diff --git a/src/hyperlight_host/src/hypervisor/virtual_machine/whp.rs b/src/hyperlight_host/src/hypervisor/virtual_machine/whp.rs index 94f415b7e..f17bf9011 100644 --- a/src/hyperlight_host/src/hypervisor/virtual_machine/whp.rs +++ b/src/hyperlight_host/src/hypervisor/virtual_machine/whp.rs @@ -133,6 +133,23 @@ impl WhpVm { } impl VirtualMachine for WhpVm { + fn enable_msr_intercept(&mut self) -> std::result::Result<(), CreateVmError> { + let mut extended_exits_property = WHV_PARTITION_PROPERTY::default(); + // X64MsrExit bit position (bit 1) in WHV_EXTENDED_VM_EXITS + // See https://learn.microsoft.com/en-us/virtualization/api/hypervisor-platform/funcs/whvpartitionpropertydatatypes + extended_exits_property.ExtendedVmExits.AsUINT64 = 1 << 1; + unsafe { + WHvSetPartitionProperty( + self.partition, + WHvPartitionPropertyCodeExtendedVmExits, + &extended_exits_property as *const _ as *const _, + std::mem::size_of::() as _, + ) + .map_err(|e| CreateVmError::EnableMsrIntercept(e.into()))? + }; + Ok(()) + } + unsafe fn map_memory( &mut self, (_slot, region): (u32, &MemoryRegion), @@ -272,6 +289,21 @@ impl VirtualMachine for WhpVm { } // Execution was cancelled by the host. WHvRunVpExitReasonCanceled => VmExit::Cancelled(), + WHvRunVpExitReasonX64MsrAccess => { + let msr_access = unsafe { exit_context.Anonymous.MsrAccess }; + let eax = msr_access.Rax; + let edx = msr_access.Rdx; + let written_value = (edx << 32) | eax; + let access = unsafe { msr_access.AccessInfo.AsUINT32 }; + match access { + 0 => VmExit::MsrRead(msr_access.MsrNumber), + 1 => VmExit::MsrWrite { + msr_index: msr_access.MsrNumber, + value: written_value, + }, + _ => VmExit::Unknown(format!("Unknown MSR access type={}", access)), + } + } #[cfg(gdb)] WHvRunVpExitReasonException => { let exception = unsafe { exit_context.Anonymous.VpException }; @@ -627,6 +659,90 @@ impl VirtualMachine for WhpVm { Ok(()) } + fn reset_msrs(&self) -> std::result::Result<(), RegisterError> { + use super::MSRS_TO_RESET; + + /// Map an MSR index to its WHV_REGISTER_NAME. + /// + /// WHV register names are opaque sequential IDs assigned by the + /// hypervisor — there is no arithmetic relationship to MSR indices. + /// We use the named constants from the `windows` crate where + /// available; DebugCtl (0x207D) has no published constant and uses + /// the raw value from the hypervisor source (ValX64RegisterDebugCtl). + const fn msr_index_to_whv_name(msr_index: u32) -> WHV_REGISTER_NAME { + match msr_index { + 0x10 => WHvX64RegisterTsc, + 0x174 => WHvX64RegisterSysenterCs, + 0x175 => WHvX64RegisterSysenterEsp, + 0x176 => WHvX64RegisterSysenterEip, + 0x1D9 => WHV_REGISTER_NAME(0x207D), // DebugCtl (no windows crate constant) + 0x277 => WHvX64RegisterPat, + 0x2FF => WHvX64RegisterMsrMtrrDefType, + // Variable-range MTRRs + 0x200 => WHvX64RegisterMsrMtrrPhysBase0, + 0x201 => WHvX64RegisterMsrMtrrPhysMask0, + 0x202 => WHvX64RegisterMsrMtrrPhysBase1, + 0x203 => WHvX64RegisterMsrMtrrPhysMask1, + 0x204 => WHvX64RegisterMsrMtrrPhysBase2, + 0x205 => WHvX64RegisterMsrMtrrPhysMask2, + 0x206 => WHvX64RegisterMsrMtrrPhysBase3, + 0x207 => WHvX64RegisterMsrMtrrPhysMask3, + 0x208 => WHvX64RegisterMsrMtrrPhysBase4, + 0x209 => WHvX64RegisterMsrMtrrPhysMask4, + 0x20A => WHvX64RegisterMsrMtrrPhysBase5, + 0x20B => WHvX64RegisterMsrMtrrPhysMask5, + 0x20C => WHvX64RegisterMsrMtrrPhysBase6, + 0x20D => WHvX64RegisterMsrMtrrPhysMask6, + 0x20E => WHvX64RegisterMsrMtrrPhysBase7, + 0x20F => WHvX64RegisterMsrMtrrPhysMask7, + // Fixed-range MTRRs + 0x250 => WHvX64RegisterMsrMtrrFix64k00000, + 0x258 => WHvX64RegisterMsrMtrrFix16k80000, + 0x259 => WHvX64RegisterMsrMtrrFix16kA0000, + 0x268 => WHvX64RegisterMsrMtrrFix4kC0000, + 0x269 => WHvX64RegisterMsrMtrrFix4kC8000, + 0x26A => WHvX64RegisterMsrMtrrFix4kD0000, + 0x26B => WHvX64RegisterMsrMtrrFix4kD8000, + 0x26C => WHvX64RegisterMsrMtrrFix4kE0000, + 0x26D => WHvX64RegisterMsrMtrrFix4kE8000, + 0x26E => WHvX64RegisterMsrMtrrFix4kF0000, + 0x26F => WHvX64RegisterMsrMtrrFix4kF8000, + // SYSCALL MSRs + 0xC000_0081 => WHvX64RegisterStar, + 0xC000_0082 => WHvX64RegisterLstar, + 0xC000_0083 => WHvX64RegisterCstar, + 0xC000_0084 => WHvX64RegisterSfmask, + 0xC000_0102 => WHvX64RegisterKernelGsBase, + 0xC000_0103 => WHvX64RegisterTscAux, + // Feature-dependent MSRs + 0x48 => WHvX64RegisterSpecCtrl, + 0x6A0 => WHvX64RegisterUCet, + 0x6A2 => WHvX64RegisterSCet, + 0x6A4 => WHvX64RegisterPl0Ssp, + 0x6A5 => WHvX64RegisterPl1Ssp, + 0x6A6 => WHvX64RegisterPl2Ssp, + 0x6A7 => WHvX64RegisterPl3Ssp, + 0x6A8 => WHvX64RegisterInterruptSspTableAddr, + 0xDA0 => WHvX64RegisterXss, + _ => panic!("MSR index has no WHV register mapping"), + } + } + + for &(msr_index, value) in MSRS_TO_RESET { + let reg = ( + msr_index_to_whv_name(msr_index), + Align16(WHV_REGISTER_VALUE { Reg64: value }), + ); + self.set_registers(std::slice::from_ref(®)) + .map_err(|e| RegisterError::ResetMsr { + index: msr_index, + source: e.into(), + })?; + } + + Ok(()) + } + /// Get the partition handle for this VM fn partition_handle(&self) -> WHV_PARTITION_HANDLE { self.partition diff --git a/src/hyperlight_host/src/sandbox/config.rs b/src/hyperlight_host/src/sandbox/config.rs index 044c5f2fb..995d6ee36 100644 --- a/src/hyperlight_host/src/sandbox/config.rs +++ b/src/hyperlight_host/src/sandbox/config.rs @@ -74,6 +74,8 @@ pub struct SandboxConfiguration { interrupt_vcpu_sigrtmin_offset: u8, /// How much writable memory to offer the guest scratch_size: usize, + /// Allow MSR (Model Specific Register) access. This is disabled by default for security reasons. + allow_msr: bool, } impl SandboxConfiguration { @@ -118,6 +120,7 @@ impl SandboxConfiguration { guest_debug_info, #[cfg(crashdump)] guest_core_dump, + allow_msr: false, } } @@ -159,6 +162,27 @@ impl SandboxConfiguration { self.interrupt_vcpu_sigrtmin_offset } + /// Set whether MSR access is allowed. By default, MSR access is disabled + /// for security reasons. + /// + /// # Safety + /// + /// If enabled, the guest can read and write arbitrary MSRs, which may + /// expose host CPU state (performance counters, speculation control, etc.) + /// or cause unexpected side effects. With MSR access allowed, intercepts + /// are not installed and guest-modified MSRs are not reset across snapshot + /// restores, which can leak data between guest executions. + #[instrument(skip_all, parent = Span::current(), level= "Trace")] + pub unsafe fn set_allow_msr(&mut self, allow_msr: bool) { + self.allow_msr = allow_msr; + } + + /// Get whether MSR access is allowed + #[instrument(skip_all, parent = Span::current(), level= "Trace")] + pub(crate) fn get_allow_msr(&self) -> bool { + self.allow_msr + } + /// Sets the offset from `SIGRTMIN` to determine the real-time signal used for /// interrupting the VCPU thread. /// diff --git a/src/hyperlight_host/src/sandbox/initialized_multi_use.rs b/src/hyperlight_host/src/sandbox/initialized_multi_use.rs index 9ea3221d2..94c5d4675 100644 --- a/src/hyperlight_host/src/sandbox/initialized_multi_use.rs +++ b/src/hyperlight_host/src/sandbox/initialized_multi_use.rs @@ -1430,4 +1430,145 @@ mod tests { drop(sbox); } } + + #[test] + fn test_read_write_msr() { + use rand::seq::IteratorRandom; + + use crate::hypervisor::virtual_machine::{MSR_TEST_RANGES, is_virtualized_msr}; + + let value: u64 = 0x0; + const N: usize = 100; + + let msr_numbers = MSR_TEST_RANGES + .iter() + .flat_map(|&(start, end)| start..end) + .filter(|idx| !is_virtualized_msr(*idx)) + .sample(&mut rand::rng(), N); + + let mut sbox = UninitializedSandbox::new( + GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")), + None, + ) + .unwrap() + .evolve() + .unwrap(); + let snapshot = sbox.snapshot().unwrap(); + for msr_number in msr_numbers { + let result = sbox.call::("ReadMSR", msr_number); + match &result { + Ok(val) => panic!( + "Expected RDMSR to MSR 0x{:X} to be intercepted, but it succeeded with value 0x{:X}", + msr_number, val + ), + Err(err) => assert!( + matches!(err, + HyperlightError::MsrReadViolation(msr) if *msr == msr_number + ) || matches!(err, HyperlightError::GuestAborted(_, _)), + "RDMSR 0x{:X}: expected MsrReadViolation or GuestAborted, got: {:?}", + msr_number, + err + ), + } + sbox.restore(snapshot.clone()).unwrap(); + let result = sbox.call::<()>("WriteMSR", (msr_number, value)); + match &result { + Ok(_) => panic!( + "Expected WRMSR to MSR 0x{:X} to be intercepted, but it succeeded", + msr_number + ), + Err(err) => assert!( + matches!(err, + HyperlightError::MsrWriteViolation(msr_idx, v) if *msr_idx == msr_number && *v == value + ) || matches!(err, HyperlightError::GuestAborted(_, _)), + "WRMSR 0x{:X}: expected MsrWriteViolation or GuestAborted, got: {:?}", + msr_number, + err + ), + } + sbox.restore(snapshot.clone()).unwrap(); + } + + // Also try case where MSR access is allowed + let mut cfg = SandboxConfiguration::default(); + unsafe { cfg.set_allow_msr(true) }; + + let mut sbox = UninitializedSandbox::new( + GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")), + Some(cfg), + ) + .unwrap() + .evolve() + .unwrap(); + + let msr_index: u32 = 0xC0000102; // IA32_KERNEL_GS_BASE + let value: u64 = 0x5; + + sbox.call::<()>("WriteMSR", (msr_index, value)).unwrap(); + let read_value: u64 = sbox.call("ReadMSR", msr_index).unwrap(); + assert_eq!(read_value, value); + } + + /// Exhaustive test that every MSR index in the known architectural ranges + /// either triggers a trap (MsrReadViolation) OR is in the canonical + /// [`VIRTUALIZED_MSRS`] list. This ensures the list stays in sync with + /// what the hypervisor actually virtualizes. + /// + /// The test iterates ALL indices — no random sampling — so that adding a + /// new virtualized MSR to the hypervisor without updating VIRTUALIZED_MSRS + /// will cause a deterministic failure. + #[test] + fn test_all_msr_indices_trapped_or_virtualized() { + use crate::hypervisor::virtual_machine::{MSR_TEST_RANGES, is_virtualized_msr}; + + let mut sbox = UninitializedSandbox::new( + GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")), + None, + ) + .unwrap() + .evolve() + .unwrap(); + let snapshot = sbox.snapshot().unwrap(); + + let all_msr_indices = MSR_TEST_RANGES.iter().flat_map(|&(start, end)| start..end); + + let mut untapped_not_in_list = Vec::new(); + + for msr_index in all_msr_indices { + let result = sbox.call::("ReadMSR", msr_index); + match &result { + Ok(_) => { + // MSR read succeeded without trapping — it must be in + // the virtualized list, otherwise it's a gap. + if !is_virtualized_msr(msr_index) { + untapped_not_in_list.push(msr_index); + } + } + Err(err) => { + // Expected: the MSR was trapped. Verify it's the right error. + assert!( + matches!( + err, + HyperlightError::MsrReadViolation(_) + | HyperlightError::GuestAborted(_, _) + ), + "MSR 0x{:X}: unexpected error variant: {:?}", + msr_index, + err, + ); + } + } + sbox.restore(snapshot.clone()).unwrap(); + } + + assert!( + untapped_not_in_list.is_empty(), + "The following MSR(s) were NOT trapped and are NOT in VIRTUALIZED_MSRS. \ + They need to be added to VIRTUALIZED_MSRS in virtual_machine/mod.rs: {:?}", + untapped_not_in_list + .iter() + .map(|idx| format!("0x{:X}", idx)) + .collect::>() + ); + } } diff --git a/src/hyperlight_host/tests/integration_test.rs b/src/hyperlight_host/tests/integration_test.rs index 953c6b7e4..92f7f0508 100644 --- a/src/hyperlight_host/tests/integration_test.rs +++ b/src/hyperlight_host/tests/integration_test.rs @@ -665,6 +665,7 @@ fn guard_page_check_2() { }); } +#[ignore] #[test] fn execute_on_heap() { with_rust_sandbox(|mut sbox1| { diff --git a/src/tests/rust_guests/simpleguest/src/main.rs b/src/tests/rust_guests/simpleguest/src/main.rs index 80f734224..292586645 100644 --- a/src/tests/rust_guests/simpleguest/src/main.rs +++ b/src/tests/rust_guests/simpleguest/src/main.rs @@ -649,6 +649,36 @@ fn call_host_expect_error(hostfuncname: String) -> Result<()> { Ok(()) } +#[guest_function("ReadMSR")] +fn read_msr(msr: u32) -> u64 { + let (read_eax, read_edx): (u32, u32); + unsafe { + core::arch::asm!( + "rdmsr", + in("ecx") msr, + out("eax") read_eax, + out("edx") read_edx, + options(nostack, nomem) + ); + } + ((read_edx as u64) << 32) | (read_eax as u64) +} + +#[guest_function("WriteMSR")] +fn write_msr(msr: u32, value: u64) { + let eax = (value & 0xFFFFFFFF) as u32; + let edx = ((value >> 32) & 0xFFFFFFFF) as u32; + unsafe { + core::arch::asm!( + "wrmsr", + in("ecx") msr, + in("eax") eax, + in("edx") edx, + options(nostack, nomem) + ); + } +} + #[no_mangle] #[instrument(skip_all, parent = Span::current(), level= "Trace")] pub extern "C" fn hyperlight_main() {